From 20b0d5db81f539dcaee485c671a07c0c5c8f2bde Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 22 Dec 2024 17:54:30 +0000 Subject: [PATCH 001/105] Bump versions to LDK 0.1.0, invoice 0.33.0, types 0.2.0, beta1 Sadly, both `lightning-invoice` and `lightning-types` need a minor version bump as they both upgraded `bech32` which changed the public API. --- lightning-background-processor/Cargo.toml | 12 ++++++------ lightning-block-sync/Cargo.toml | 6 +++--- lightning-custom-message/Cargo.toml | 4 ++-- lightning-dns-resolver/Cargo.toml | 6 +++--- lightning-invoice/Cargo.toml | 4 ++-- lightning-liquidity/Cargo.toml | 14 +++++++------- lightning-net-tokio/Cargo.toml | 6 +++--- lightning-persister/Cargo.toml | 6 +++--- lightning-rapid-gossip-sync/Cargo.toml | 6 +++--- lightning-transaction-sync/Cargo.toml | 6 +++--- lightning-types/Cargo.toml | 2 +- lightning/Cargo.toml | 8 ++++---- 12 files changed, 40 insertions(+), 40 deletions(-) diff --git a/lightning-background-processor/Cargo.toml b/lightning-background-processor/Cargo.toml index 0afc18fdfbb..d34ec5304e6 100644 --- a/lightning-background-processor/Cargo.toml +++ b/lightning-background-processor/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-background-processor" -version = "0.0.124" +version = "0.1.0-beta1" authors = ["Valentine Wallace "] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning" @@ -23,14 +23,14 @@ default = ["std"] bitcoin = { version = "0.32.2", default-features = false } bitcoin_hashes = { version = "0.14.0", default-features = false } bitcoin-io = { version = "0.1.2", default-features = false } -lightning = { version = "0.0.124", path = "../lightning", default-features = false } -lightning-rapid-gossip-sync = { version = "0.0.124", path = "../lightning-rapid-gossip-sync", default-features = false } +lightning = { version = "0.1.0-beta1", path = "../lightning", default-features = false } +lightning-rapid-gossip-sync = { version = "0.1.0-beta1", path = "../lightning-rapid-gossip-sync", default-features = false } [dev-dependencies] tokio = { version = "1.35", features = [ "macros", "rt", "rt-multi-thread", "sync", "time" ] } -lightning = { version = "0.0.124", path = "../lightning", features = ["_test_utils"] } -lightning-invoice = { version = "0.32.0", path = "../lightning-invoice" } -lightning-persister = { version = "0.0.124", path = "../lightning-persister" } +lightning = { version = "0.1.0-beta1", path = "../lightning", features = ["_test_utils"] } +lightning-invoice = { version = "0.33.0-beta1", path = "../lightning-invoice" } +lightning-persister = { version = "0.1.0-beta1", path = "../lightning-persister" } [lints] workspace = true diff --git a/lightning-block-sync/Cargo.toml b/lightning-block-sync/Cargo.toml index 38ec56a51b7..80506cc92fd 100644 --- a/lightning-block-sync/Cargo.toml +++ b/lightning-block-sync/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-block-sync" -version = "0.0.124" +version = "0.1.0-beta1" authors = ["Jeffrey Czyz", "Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning" @@ -19,13 +19,13 @@ rpc-client = [ "serde_json", "chunked_transfer" ] [dependencies] bitcoin = "0.32.2" -lightning = { version = "0.0.124", path = "../lightning" } +lightning = { version = "0.1.0-beta1", path = "../lightning" } tokio = { version = "1.35", features = [ "io-util", "net", "time", "rt" ], optional = true } serde_json = { version = "1.0", optional = true } chunked_transfer = { version = "1.4", optional = true } [dev-dependencies] -lightning = { version = "0.0.124", path = "../lightning", features = ["_test_utils"] } +lightning = { version = "0.1.0-beta1", path = "../lightning", features = ["_test_utils"] } tokio = { version = "1.35", features = [ "macros", "rt" ] } [lints] diff --git a/lightning-custom-message/Cargo.toml b/lightning-custom-message/Cargo.toml index d3eaf125520..daf8638399c 100644 --- a/lightning-custom-message/Cargo.toml +++ b/lightning-custom-message/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-custom-message" -version = "0.0.124" +version = "0.1.0-beta1" authors = ["Jeffrey Czyz"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning" @@ -15,7 +15,7 @@ rustdoc-args = ["--cfg", "docsrs"] [dependencies] bitcoin = "0.32.2" -lightning = { version = "0.0.124", path = "../lightning" } +lightning = { version = "0.1.0-beta1", path = "../lightning" } [lints] workspace = true diff --git a/lightning-dns-resolver/Cargo.toml b/lightning-dns-resolver/Cargo.toml index 1c2ebe615b2..194fbdf02b9 100644 --- a/lightning-dns-resolver/Cargo.toml +++ b/lightning-dns-resolver/Cargo.toml @@ -8,12 +8,12 @@ description = "A crate which implements DNSSEC resolution for lightning clients edition = "2021" [dependencies] -lightning = { version = "0.0.124", path = "../lightning", default-features = false } -lightning-types = { version = "0.1", path = "../lightning-types", default-features = false } +lightning = { version = "0.1.0-beta1", path = "../lightning", default-features = false } +lightning-types = { version = "0.2.0-beta1", path = "../lightning-types", default-features = false } dnssec-prover = { version = "0.6", default-features = false, features = [ "std", "tokio" ] } tokio = { version = "1.0", default-features = false, features = ["rt"] } [dev-dependencies] bitcoin = { version = "0.32" } tokio = { version = "1.0", default-features = false, features = ["macros", "time"] } -lightning = { version = "0.0.124", path = "../lightning", features = ["dnssec", "_test_utils"] } +lightning = { version = "0.1.0-beta1", path = "../lightning", features = ["dnssec", "_test_utils"] } diff --git a/lightning-invoice/Cargo.toml b/lightning-invoice/Cargo.toml index ee1d4aa53dc..f46e1c4928c 100644 --- a/lightning-invoice/Cargo.toml +++ b/lightning-invoice/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lightning-invoice" description = "Data structures to parse and serialize BOLT11 lightning invoices" -version = "0.32.0" +version = "0.33.0-beta1" authors = ["Sebastian Geisler "] documentation = "https://docs.rs/lightning-invoice/" license = "MIT OR Apache-2.0" @@ -19,7 +19,7 @@ std = [] [dependencies] bech32 = { version = "0.11.0", default-features = false } -lightning-types = { version = "0.1.0", path = "../lightning-types", default-features = false } +lightning-types = { version = "0.2.0-beta1", path = "../lightning-types", default-features = false } serde = { version = "1.0.118", optional = true } bitcoin = { version = "0.32.2", default-features = false, features = ["secp-recovery"] } diff --git a/lightning-liquidity/Cargo.toml b/lightning-liquidity/Cargo.toml index 9e76b0c7c68..9d65211da66 100644 --- a/lightning-liquidity/Cargo.toml +++ b/lightning-liquidity/Cargo.toml @@ -19,9 +19,9 @@ std = ["lightning/std"] backtrace = ["dep:backtrace"] [dependencies] -lightning = { version = "0.0.124", path = "../lightning", default-features = false } -lightning-types = { version = "0.1", path = "../lightning-types", default-features = false } -lightning-invoice = { version = "0.32.0", path = "../lightning-invoice", default-features = false, features = ["serde"] } +lightning = { version = "0.1.0-beta1", path = "../lightning", default-features = false } +lightning-types = { version = "0.2.0-beta1", path = "../lightning-types", default-features = false } +lightning-invoice = { version = "0.33.0-beta1", path = "../lightning-invoice", default-features = false, features = ["serde"] } bitcoin = { version = "0.32.2", default-features = false, features = ["serde"] } @@ -31,10 +31,10 @@ serde_json = "1.0" backtrace = { version = "0.3", optional = true } [dev-dependencies] -lightning = { version = "0.0.124", path = "../lightning", default-features = false, features = ["_test_utils"] } -lightning-invoice = { version = "0.32.0", path = "../lightning-invoice", default-features = false, features = ["serde", "std"] } -lightning-persister = { version = "0.0.124", path = "../lightning-persister", default-features = false } -lightning-background-processor = { version = "0.0.124", path = "../lightning-background-processor", default-features = false, features = ["std"] } +lightning = { version = "0.1.0-beta1", path = "../lightning", default-features = false, features = ["_test_utils"] } +lightning-invoice = { version = "0.33.0-beta1", path = "../lightning-invoice", default-features = false, features = ["serde", "std"] } +lightning-persister = { version = "0.1.0-beta1", path = "../lightning-persister", default-features = false } +lightning-background-processor = { version = "0.1.0-beta1", path = "../lightning-background-processor", default-features = false, features = ["std"] } proptest = "1.0.0" tokio = { version = "1.35", default-features = false, features = [ "rt-multi-thread", "time", "sync", "macros" ] } diff --git a/lightning-net-tokio/Cargo.toml b/lightning-net-tokio/Cargo.toml index 9df6594b063..b897d42dac5 100644 --- a/lightning-net-tokio/Cargo.toml +++ b/lightning-net-tokio/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-net-tokio" -version = "0.0.124" +version = "0.1.0-beta1" authors = ["Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning/" @@ -16,12 +16,12 @@ rustdoc-args = ["--cfg", "docsrs"] [dependencies] bitcoin = "0.32.2" -lightning = { version = "0.0.124", path = "../lightning" } +lightning = { version = "0.1.0-beta1", path = "../lightning" } tokio = { version = "1.35", features = [ "rt", "sync", "net", "time" ] } [dev-dependencies] tokio = { version = "1.35", features = [ "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] } -lightning = { version = "0.0.124", path = "../lightning", features = ["_test_utils"] } +lightning = { version = "0.1.0-beta1", path = "../lightning", features = ["_test_utils"] } [lints] workspace = true diff --git a/lightning-persister/Cargo.toml b/lightning-persister/Cargo.toml index 4007f14cfc1..1f8b497ae09 100644 --- a/lightning-persister/Cargo.toml +++ b/lightning-persister/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-persister" -version = "0.0.124" +version = "0.1.0-beta1" authors = ["Valentine Wallace", "Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning" @@ -15,7 +15,7 @@ rustdoc-args = ["--cfg", "docsrs"] [dependencies] bitcoin = "0.32.2" -lightning = { version = "0.0.124", path = "../lightning" } +lightning = { version = "0.1.0-beta1", path = "../lightning" } [target.'cfg(windows)'.dependencies] windows-sys = { version = "0.48.0", default-features = false, features = ["Win32_Storage_FileSystem", "Win32_Foundation"] } @@ -24,7 +24,7 @@ windows-sys = { version = "0.48.0", default-features = false, features = ["Win32 criterion = { version = "0.4", optional = true, default-features = false } [dev-dependencies] -lightning = { version = "0.0.124", path = "../lightning", features = ["_test_utils"] } +lightning = { version = "0.1.0-beta1", path = "../lightning", features = ["_test_utils"] } bitcoin = { version = "0.32.2", default-features = false } [lints] diff --git a/lightning-rapid-gossip-sync/Cargo.toml b/lightning-rapid-gossip-sync/Cargo.toml index 03efbde3daa..4fdd33f88d7 100644 --- a/lightning-rapid-gossip-sync/Cargo.toml +++ b/lightning-rapid-gossip-sync/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-rapid-gossip-sync" -version = "0.0.124" +version = "0.1.0-beta1" authors = ["Arik Sosman "] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning" @@ -14,7 +14,7 @@ default = ["std"] std = ["bitcoin-io/std", "bitcoin_hashes/std"] [dependencies] -lightning = { version = "0.0.124", path = "../lightning", default-features = false } +lightning = { version = "0.1.0-beta1", path = "../lightning", default-features = false } bitcoin = { version = "0.32.2", default-features = false } bitcoin_hashes = { version = "0.14.0", default-features = false } bitcoin-io = { version = "0.1.2", default-features = false } @@ -23,7 +23,7 @@ bitcoin-io = { version = "0.1.2", default-features = false } criterion = { version = "0.4", optional = true, default-features = false } [dev-dependencies] -lightning = { version = "0.0.124", path = "../lightning", features = ["_test_utils"] } +lightning = { version = "0.1.0-beta1", path = "../lightning", features = ["_test_utils"] } [lints] workspace = true diff --git a/lightning-transaction-sync/Cargo.toml b/lightning-transaction-sync/Cargo.toml index 4410b2f5edb..2e604894108 100644 --- a/lightning-transaction-sync/Cargo.toml +++ b/lightning-transaction-sync/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-transaction-sync" -version = "0.0.124" +version = "0.1.0-beta1" authors = ["Elias Rohrer"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning" @@ -23,7 +23,7 @@ electrum = ["electrum-client"] async-interface = [] [dependencies] -lightning = { version = "0.0.124", path = "../lightning", default-features = false, features = ["std"] } +lightning = { version = "0.1.0-beta1", path = "../lightning", default-features = false, features = ["std"] } lightning-macros = { version = "0.1", path = "../lightning-macros", default-features = false } bitcoin = { version = "0.32.2", default-features = false } futures = { version = "0.3", optional = true } @@ -31,7 +31,7 @@ esplora-client = { version = "0.11", default-features = false, optional = true } electrum-client = { version = "0.21.0", optional = true } [dev-dependencies] -lightning = { version = "0.0.124", path = "../lightning", default-features = false, features = ["std", "_test_utils"] } +lightning = { version = "0.1.0-beta1", path = "../lightning", default-features = false, features = ["std", "_test_utils"] } tokio = { version = "1.35.0", features = ["macros"] } [target.'cfg(not(target_os = "windows"))'.dev-dependencies] diff --git a/lightning-types/Cargo.toml b/lightning-types/Cargo.toml index 768b8fb3d90..6adeb561ca0 100644 --- a/lightning-types/Cargo.toml +++ b/lightning-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-types" -version = "0.1.0" +version = "0.2.0-beta1" authors = ["Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning/" diff --git a/lightning/Cargo.toml b/lightning/Cargo.toml index ff746255148..bbb60c523b9 100644 --- a/lightning/Cargo.toml +++ b/lightning/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning" -version = "0.0.124" +version = "0.1.0-beta1" authors = ["Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning/" @@ -33,8 +33,8 @@ grind_signatures = [] default = ["std", "grind_signatures"] [dependencies] -lightning-types = { version = "0.1.0", path = "../lightning-types", default-features = false } -lightning-invoice = { version = "0.32.0", path = "../lightning-invoice", default-features = false } +lightning-types = { version = "0.2.0-beta1", path = "../lightning-types", default-features = false } +lightning-invoice = { version = "0.33.0-beta1", path = "../lightning-invoice", default-features = false } bech32 = { version = "0.11.0", default-features = false } bitcoin = { version = "0.32.2", default-features = false, features = ["secp-recovery"] } @@ -50,7 +50,7 @@ libm = { version = "0.2", default-features = false } [dev-dependencies] regex = "1.5.6" -lightning-types = { version = "0.1.0", path = "../lightning-types", features = ["_test_utils"] } +lightning-types = { version = "0.2.0-beta1", path = "../lightning-types", features = ["_test_utils"] } [dev-dependencies.bitcoin] version = "0.32.2" From 2fedb634d92145f043074ea1048817d0a965700c Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 3 Jan 2025 10:57:26 +0100 Subject: [PATCH 002/105] Bump `lightning-liquidity` to `0.1.0-beta1` .. to align with LDK version numbering and allow a new release depending on the 0.1.0-beta1 crate dependencies. --- lightning-liquidity/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightning-liquidity/Cargo.toml b/lightning-liquidity/Cargo.toml index 9d65211da66..343b9cd6c6c 100644 --- a/lightning-liquidity/Cargo.toml +++ b/lightning-liquidity/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-liquidity" -version = "0.1.0-alpha.6" +version = "0.1.0-beta1" authors = ["John Cantrell ", "Elias Rohrer "] homepage = "https://lightningdevkit.org/" license = "MIT OR Apache-2.0" From 2394deb4d733e72f2657dac4e283b7ed131676aa Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 6 Jan 2025 09:37:07 +0100 Subject: [PATCH 003/105] Change repository in `Cargo.toml` to `rust-lightning` --- lightning-liquidity/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightning-liquidity/Cargo.toml b/lightning-liquidity/Cargo.toml index 343b9cd6c6c..a90f89a05b4 100644 --- a/lightning-liquidity/Cargo.toml +++ b/lightning-liquidity/Cargo.toml @@ -6,7 +6,7 @@ homepage = "https://lightningdevkit.org/" license = "MIT OR Apache-2.0" edition = "2021" description = "Types and primitives to integrate a spec-compliant LSP with an LDK-based node." -repository = "https://github.com/lightningdevkit/lightning-liquidity/" +repository = "https://github.com/lightningdevkit/rust-lightning" readme = "README.md" keywords = ["bitcoin", "lightning", "ldk", "bdk"] categories = ["cryptography::cryptocurrencies"] From e28312777741e86ca04ff70e8acc66564fa12e5c Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 6 Jan 2025 10:42:58 +0100 Subject: [PATCH 004/105] Allow uppercase bech32 HRP Previously, we would fail parsing `Offer`s if the HRP didn't match our expected (lowercase) HRP. Here, we relax this check in accordance with the spec to also allow all-uppercase HRPs. --- lightning/src/offers/parse.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lightning/src/offers/parse.rs b/lightning/src/offers/parse.rs index 3828ecbdffc..bd4c7283a82 100644 --- a/lightning/src/offers/parse.rs +++ b/lightning/src/offers/parse.rs @@ -58,7 +58,8 @@ mod sealed { let parsed = CheckedHrpstring::new::(encoded.as_ref())?; let hrp = parsed.hrp(); - if hrp.as_str() != Self::BECH32_HRP { + // Compare the lowercase'd iter to allow for all-uppercase HRPs + if hrp.lowercase_char_iter().ne(Self::BECH32_HRP.chars()) { return Err(Bolt12ParseError::InvalidBech32Hrp); } From 7272969c19557ad1960b7feb97607c379560aa0d Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 6 Jan 2025 11:37:55 +0100 Subject: [PATCH 005/105] Add test coverage for upper-/mixed-case `Offer` encodings .. to ensure we're able to decode all-uppercase HRPs and reject mixed-case encodings. --- lightning/src/offers/parse.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/lightning/src/offers/parse.rs b/lightning/src/offers/parse.rs index bd4c7283a82..f3c481a9f95 100644 --- a/lightning/src/offers/parse.rs +++ b/lightning/src/offers/parse.rs @@ -234,6 +234,7 @@ impl From for Bolt12ParseError { mod bolt12_tests { use super::Bolt12ParseError; use crate::offers::offer::Offer; + use bech32::primitives::decode::{CheckedHrpstringError, UncheckedHrpstringError, CharError}; #[test] fn encodes_offer_as_bech32_without_checksum() { @@ -250,6 +251,9 @@ mod bolt12_tests { // A complete string is valid "lno1pqps7sjqpgtyzm3qv4uxzmtsd3jjqer9wd3hy6tsw35k7msjzfpy7nz5yqcnygrfdej82um5wf5k2uckyypwa3eyt44h6txtxquqh7lz5djge4afgfjn7k4rgrkuag0jsd5xvxg", + // Uppercase is valid + "LNO1PQPS7SJQPGTYZM3QV4UXZMTSD3JJQER9WD3HY6TSW35K7MSJZFPY7NZ5YQCNYGRFDEJ82UM5WF5K2UCKYYPWA3EYT44H6TXTXQUQH7LZ5DJGE4AFGFJN7K4RGRKUAG0JSD5XVXG", + // + can join anywhere "l+no1pqps7sjqpgtyzm3qv4uxzmtsd3jjqer9wd3hy6tsw35k7msjzfpy7nz5yqcnygrfdej82um5wf5k2uckyypwa3eyt44h6txtxquqh7lz5djge4afgfjn7k4rgrkuag0jsd5xvxg", @@ -283,6 +287,16 @@ mod bolt12_tests { } } } + + #[test] + fn fails_parsing_bech32_encoded_offers_with_mixed_casing() { + // We assert that mixed-case encoding fails to parse. + let mixed_case_offer = "LnO1PqPs7sJqPgTyZm3qV4UxZmTsD3JjQeR9Wd3hY6TsW35k7mSjZfPy7nZ5YqCnYgRfDeJ82uM5Wf5k2uCkYyPwA3EyT44h6tXtXqUqH7Lz5dJgE4AfGfJn7k4rGrKuAg0jSd5xVxG"; + match mixed_case_offer.parse::() { + Ok(_) => panic!("Valid offer: {}", mixed_case_offer), + Err(e) => assert_eq!(e, Bolt12ParseError::Bech32(CheckedHrpstringError::Parse(UncheckedHrpstringError::Char(CharError::MixedCase)))), + } + } } #[cfg(test)] From aa87886f6f60e45cfd3533c65a09beee6ea5841d Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 7 Jan 2025 15:50:43 +0100 Subject: [PATCH 006/105] Fix overly spammy `TRACE` logging in async onion message event handling We recently introduced `TRACE`-level logging for event handling. However, in onion messenger we'd now log (twice, actually) every time `process_events_async` is called, which is very very spammy. Here we fix this by short-cutting to only proceed when we actualy have any event futures to poll. --- lightning/src/onion_message/messenger.rs | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/lightning/src/onion_message/messenger.rs b/lightning/src/onion_message/messenger.rs index cb4fd105811..f076e6a9da4 100644 --- a/lightning/src/onion_message/messenger.rs +++ b/lightning/src/onion_message/messenger.rs @@ -1445,11 +1445,14 @@ where let future = ResultFuture::Pending(handler(ev)); futures.push(future); } - // Let the `OnionMessageIntercepted` events finish before moving on to peer_connecteds - let res = MultiResultFuturePoller::new(futures).await; - log_trace!(self.logger, "Done handling events async, results: {:?}", res); - let mut res_iter = res.iter().skip(intercepted_msgs_offset); - drop_handled_events_and_abort!(self, res_iter, self.pending_intercepted_msgs_events); + + if !futures.is_empty() { + // Let the `OnionMessageIntercepted` events finish before moving on to peer_connecteds + let res = MultiResultFuturePoller::new(futures).await; + log_trace!(self.logger, "Done handling events async, results: {:?}", res); + let mut res_iter = res.iter().skip(intercepted_msgs_offset); + drop_handled_events_and_abort!(self, res_iter, self.pending_intercepted_msgs_events); + } } { @@ -1472,10 +1475,13 @@ where let future = ResultFuture::Pending(handler(event)); futures.push(future); } - let res = MultiResultFuturePoller::new(futures).await; - log_trace!(self.logger, "Done handling events async, results: {:?}", res); - let mut res_iter = res.iter(); - drop_handled_events_and_abort!(self, res_iter, self.pending_peer_connected_events); + + if !futures.is_empty() { + let res = MultiResultFuturePoller::new(futures).await; + log_trace!(self.logger, "Done handling events async, results: {:?}", res); + let mut res_iter = res.iter(); + drop_handled_events_and_abort!(self, res_iter, self.pending_peer_connected_events); + } } } self.pending_events_processor.store(false, Ordering::Release); From 9b46225551dc3c0f2af8c1128e875e020ebf3717 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Fri, 10 Jan 2025 09:51:31 -0600 Subject: [PATCH 007/105] Support Bolt12Invoice::payment_paths in bindings Lack of bindings support was because the method used to return a slice of tuples, it seems. Now that it returns &[BlindedPaymentPath], bindings should be possible given that they can be generated for Bolt12Invoice::message_paths. --- lightning/src/offers/invoice_macros.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/lightning/src/offers/invoice_macros.rs b/lightning/src/offers/invoice_macros.rs index 4a540c16046..93e62d7adaf 100644 --- a/lightning/src/offers/invoice_macros.rs +++ b/lightning/src/offers/invoice_macros.rs @@ -109,9 +109,6 @@ macro_rules! invoice_accessors_common { ($self: ident, $contents: expr, $invoice /// Blinded paths provide recipient privacy by obfuscating its node id. Note, however, that this /// privacy is lost if a public node id is used for #[doc = concat!("[`", stringify!($invoice_type), "::signing_pubkey`].")] - /// - /// This is not exported to bindings users as slices with non-reference types cannot be ABI - /// matched in another language. pub fn payment_paths(&$self) -> &[BlindedPaymentPath] { $contents.payment_paths() } From c5a9c3c3c80e8e1ca8252f877db3d5b5e73c5b81 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 10 Jan 2025 17:17:15 +0000 Subject: [PATCH 008/105] Clean up fuzz test build to fix disk space usage fuzz CI failures --- .github/workflows/build.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 04cdc00a9f9..78fe093a1a2 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -229,7 +229,10 @@ jobs: run: | cd fuzz && cargo update -p regex --precise "1.9.6" --verbose && cd .. - name: Sanity check fuzz targets on Rust ${{ env.TOOLCHAIN }} - run: cd fuzz && RUSTFLAGS="--cfg=fuzzing --cfg=secp256k1_fuzz --cfg=hashes_fuzz" cargo test --verbose --color always + run: | + cd fuzz + RUSTFLAGS="--cfg=fuzzing --cfg=secp256k1_fuzz --cfg=hashes_fuzz" cargo test --verbose --color always + cargo clean - name: Run fuzzers run: cd fuzz && ./ci-fuzz.sh && cd .. From 0a2575f535bd4b06935d36ea5a3a23f8e62a38c9 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 28 Dec 2024 00:24:31 +0000 Subject: [PATCH 009/105] Fix max-value overflows in `set_max_path_length` When either the amount or the `max_total_cltv_expiry_delta` are set to max-value, `set_max_path_length` can trigger overflows in `build_onion_payloads_callback`, leading to debug-panics. --- lightning/src/ln/onion_utils.rs | 35 ++++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index 960209c0e0a..a3ffffa3bc2 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -239,7 +239,7 @@ where // the intended recipient). let value_msat = if cur_value_msat == 0 { hop.fee_msat } else { cur_value_msat }; let cltv = if cur_cltv == starting_htlc_offset { - hop.cltv_expiry_delta + starting_htlc_offset + hop.cltv_expiry_delta.saturating_add(starting_htlc_offset) } else { cur_cltv }; @@ -307,7 +307,7 @@ where if cur_value_msat >= 21000000 * 100000000 * 1000 { return Err(APIError::InvalidRoute { err: "Channel fees overflowed?".to_owned() }); } - cur_cltv += hop.cltv_expiry_delta as u32; + cur_cltv = cur_cltv.saturating_add(hop.cltv_expiry_delta as u32); if cur_cltv >= 500000000 { return Err(APIError::InvalidRoute { err: "Channel CLTV overflowed?".to_owned() }); } @@ -333,10 +333,10 @@ pub(crate) fn set_max_path_length( .saturating_add(PAYLOAD_HMAC_LEN); const OVERPAY_ESTIMATE_MULTIPLER: u64 = 3; - let final_value_msat_with_overpay_buffer = core::cmp::max( - route_params.final_value_msat.saturating_mul(OVERPAY_ESTIMATE_MULTIPLER), - MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY, - ); + let final_value_msat_with_overpay_buffer = route_params + .final_value_msat + .saturating_mul(OVERPAY_ESTIMATE_MULTIPLER) + .clamp(MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY, 0x1000_0000); let blinded_tail_opt = route_params .payment_params @@ -351,13 +351,15 @@ pub(crate) fn set_max_path_length( excess_final_cltv_expiry_delta: 0, }); + let cltv_expiry_delta = + core::cmp::min(route_params.payment_params.max_total_cltv_expiry_delta, 0x1000_0000); let unblinded_route_hop = RouteHop { pubkey: PublicKey::from_slice(&[2; 33]).unwrap(), node_features: NodeFeatures::empty(), short_channel_id: 42, channel_features: ChannelFeatures::empty(), fee_msat: final_value_msat_with_overpay_buffer, - cltv_expiry_delta: route_params.payment_params.max_total_cltv_expiry_delta, + cltv_expiry_delta, maybe_announced_channel: false, }; let mut num_reserved_bytes: usize = 0; @@ -1280,7 +1282,7 @@ fn decode_next_hop, N: NextPacketBytes>( mod tests { use crate::io; use crate::ln::msgs; - use crate::routing::router::{Path, Route, RouteHop}; + use crate::routing::router::{Path, PaymentParameters, Route, RouteHop}; use crate::types::features::{ChannelFeatures, NodeFeatures}; use crate::types::payment::PaymentHash; use crate::util::ser::{VecWriter, Writeable, Writer}; @@ -1292,7 +1294,7 @@ mod tests { use bitcoin::secp256k1::Secp256k1; use bitcoin::secp256k1::{PublicKey, SecretKey}; - use super::OnionKeys; + use super::*; fn get_test_session_key() -> SecretKey { let hex = "4141414141414141414141414141414141414141414141414141414141414141"; @@ -1607,4 +1609,19 @@ mod tests { writer.write_all(&self.data[..]) } } + + #[test] + fn max_length_with_no_cltv_limit() { + // While users generally shouldn't do this, we shouldn't overflow when + // `max_total_cltv_expiry_delta` is `u32::MAX`. + let recipient = PublicKey::from_slice(&[2; 33]).unwrap(); + let mut route_params = RouteParameters { + payment_params: PaymentParameters::for_keysend(recipient, u32::MAX, true), + final_value_msat: u64::MAX, + max_total_routing_fee_msat: Some(u64::MAX), + }; + route_params.payment_params.max_total_cltv_expiry_delta = u32::MAX; + let recipient_onion = RecipientOnionFields::spontaneous_empty(); + set_max_path_length(&mut route_params, &recipient_onion, None, None, 42).unwrap(); + } } From 886177aa6bac88fd92d6e29f360cd77b9810bc99 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 11 Jan 2025 17:33:15 +0000 Subject: [PATCH 010/105] Drop spurious debug assertion in sweeping logic With the `Confirm` interface, transaction confirmations can come in at any time, so asserting that a confirmation is more recent than the last time we broadcasted a transaction can lead to spurious assertion failures. --- lightning/src/util/sweep.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index 49c293054c0..b61306194df 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -173,7 +173,6 @@ impl OutputSpendStatus { latest_broadcast_height, .. } => { - debug_assert!(confirmation_height >= *latest_broadcast_height); *self = Self::PendingThresholdConfirmations { first_broadcast_hash: *first_broadcast_hash, latest_broadcast_height: *latest_broadcast_height, From 276d08245a8c590142c57bf03ff9e9fa4723b2a0 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 13 Jan 2025 10:44:16 +0100 Subject: [PATCH 011/105] Move `lightning-transaction-sync` tests to dedicated script .. and bump its MSRV to 1.75. Recently, `rustls` bumped their MSRV to 1.71. As we depend on them and don't want to continuously pin this security-critical dependency back, we have no choice left but to bump the MSRV for `lightning-transaction-sync` to a version >= 1.71, too. Here, we hence move the `lightning-transaction-sync` tests to a dedicated script and propose to introduce a secondary MSRV of 1.75. We chose this particular version, because: a) it's > 1 year old b) it provides a buffer to 1.71, i.e., if some crate bumped to a version > 1.71, there is a chance we don't immediately have to react again c) it stabilized `async fn`s in traits (see https://blog.rust-lang.org/2023/12/21/async-fn-rpit-in-traits.html), which might become handy for related (BDK) crates, which hopefully will adopt the same target. --- .github/workflows/build.yml | 27 ++++++++++++++++-- Cargo.toml | 2 +- ci/ci-tests.sh | 41 ++------------------------- ci/ci-tx-sync-tests.sh | 39 +++++++++++++++++++++++++ lightning-transaction-sync/Cargo.toml | 10 +++++-- msrv-no-dev-deps-check/Cargo.toml | 1 - no-std-check/Cargo.toml | 4 --- 7 files changed, 74 insertions(+), 50 deletions(-) create mode 100755 ci/ci-tx-sync-tests.sh diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 78fe093a1a2..83ae38a1b9e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -18,7 +18,7 @@ jobs: fail-fast: false matrix: platform: [ ubuntu-latest, windows-latest, macos-latest ] - toolchain: [ stable, beta, 1.63.0 ] # 1.63.0 is the MSRV for all crates. + toolchain: [ stable, beta, 1.63.0 ] # 1.63.0 is the MSRV for all crates but `lightning-transaction-sync`. exclude: - platform: windows-latest toolchain: 1.63.0 @@ -44,6 +44,27 @@ jobs: - name: Set RUSTFLAGS to deny warnings if: "matrix.toolchain == '1.63.0'" run: echo "RUSTFLAGS=-D warnings" >> "$GITHUB_ENV" + - name: Run CI script + shell: bash # Default on Winblows is powershell + run: CI_ENV=1 CI_MINIMIZE_DISK_USAGE=1 ./ci/ci-tests.sh + + build-tx-sync: + strategy: + fail-fast: false + matrix: + platform: [ ubuntu-latest, macos-latest ] + toolchain: [ stable, beta, 1.75.0 ] # 1.75.0 is the MSRV for `lightning-transaction-sync`. + runs-on: ${{ matrix.platform }} + steps: + - name: Checkout source code + uses: actions/checkout@v4 + - name: Install Rust ${{ matrix.toolchain }} toolchain + run: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain ${{ matrix.toolchain }} + rustup override set ${{ matrix.toolchain }} + - name: Set RUSTFLAGS to deny warnings + if: "matrix.toolchain == '1.75.0'" + run: echo "RUSTFLAGS=-D warnings" >> "$GITHUB_ENV" - name: Enable caching for bitcoind id: cache-bitcoind uses: actions/cache@v4 @@ -57,7 +78,7 @@ jobs: path: bin/electrs-${{ runner.os }}-${{ runner.arch }} key: electrs-${{ runner.os }}-${{ runner.arch }} - name: Download bitcoind/electrs - if: "matrix.platform != 'windows-latest' && (steps.cache-bitcoind.outputs.cache-hit != 'true' || steps.cache-electrs.outputs.cache-hit != 'true')" + if: "steps.cache-bitcoind.outputs.cache-hit != 'true' || steps.cache-electrs.outputs.cache-hit != 'true'" run: | source ./contrib/download_bitcoind_electrs.sh mkdir bin @@ -69,7 +90,7 @@ jobs: echo "ELECTRS_EXE=$( pwd )/bin/electrs-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" - name: Run CI script shell: bash # Default on Winblows is powershell - run: CI_ENV=1 CI_MINIMIZE_DISK_USAGE=1 ./ci/ci-tests.sh + run: CI_ENV=1 CI_MINIMIZE_DISK_USAGE=1 ./ci/ci-tx-sync-tests.sh coverage: strategy: diff --git a/Cargo.toml b/Cargo.toml index de0f39a3d25..dc3eb92c7e2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,7 +13,6 @@ members = [ "lightning-background-processor", "lightning-rapid-gossip-sync", "lightning-custom-message", - "lightning-transaction-sync", "lightning-macros", "lightning-dns-resolver", "lightning-liquidity", @@ -21,6 +20,7 @@ members = [ ] exclude = [ + "lightning-transaction-sync", "no-std-check", "msrv-no-dev-deps-check", "bench", diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh index b7e09246ae3..f4987569fda 100755 --- a/ci/ci-tests.sh +++ b/ci/ci-tests.sh @@ -2,7 +2,6 @@ set -eox pipefail RUSTC_MINOR_VERSION=$(rustc --version | awk '{ split($2,a,"."); print a[2] }') -HOST_PLATFORM="$(rustc --version --verbose | grep "host:" | awk '{ print $2 }')" # Some crates require pinning to meet our MSRV even for our downstream users, # which we do here. @@ -11,19 +10,6 @@ function PIN_RELEASE_DEPS { # Starting with version 1.39.0, the `tokio` crate has an MSRV of rustc 1.70.0 [ "$RUSTC_MINOR_VERSION" -lt 70 ] && cargo update -p tokio --precise "1.38.1" --verbose - # Starting with version 0.7.12, the `tokio-util` crate has an MSRV of rustc 1.70.0 - [ "$RUSTC_MINOR_VERSION" -lt 70 ] && cargo update -p tokio-util --precise "0.7.11" --verbose - - # url 2.5.3 switched to idna 1.0.3 and ICU4X, which requires rustc 1.67 or newer. - # Here we opt to keep using unicode-rs by pinning idna_adapter as described here: https://docs.rs/crate/idna_adapter/1.2.0 - [ "$RUSTC_MINOR_VERSION" -lt 67 ] && cargo update -p idna_adapter --precise "1.1.0" --verbose - - # indexmap 2.6.0 upgraded to hashbrown 0.15, which unfortunately bumped their MSRV to rustc 1.65 with the 0.15.1 release (and 2.7.0 was released since). - [ "$RUSTC_MINOR_VERSION" -lt 65 ] && cargo update -p indexmap@2.7.0 --precise "2.5.0" --verbose - - # Starting with version 0.23.20, the `rustls` crate has an MSRV of rustc 1.71.0 - [ "$RUSTC_MINOR_VERSION" -lt 71 ] && cargo update -p rustls@0.23.20 --precise "0.23.19" --verbose - return 0 # Don't fail the script if our rustc is higher than the last check } @@ -35,15 +21,12 @@ PIN_RELEASE_DEPS # pin the release dependencies in our main workspace # The addr2line v0.21 crate (a dependency of `backtrace` starting with 0.3.69) relies on rustc 1.65 [ "$RUSTC_MINOR_VERSION" -lt 65 ] && cargo update -p backtrace --precise "0.3.68" --verbose -# Starting with version 0.5.9 (there is no .6-.8), the `home` crate has an MSRV of rustc 1.70.0. -[ "$RUSTC_MINOR_VERSION" -lt 70 ] && cargo update -p home --precise "0.5.5" --verbose - # proptest 1.3.0 requires rustc 1.64.0 [ "$RUSTC_MINOR_VERSION" -lt 64 ] && cargo update -p proptest --precise "1.2.0" --verbose export RUST_BACKTRACE=1 -echo -e "\n\nChecking the full workspace." +echo -e "\n\nChecking the workspace, except lightning-transaction-sync." cargo check --verbose --color always # When the workspace members change, make sure to update the list here as well @@ -58,7 +41,6 @@ WORKSPACE_MEMBERS=( lightning-background-processor lightning-rapid-gossip-sync lightning-custom-message - lightning-transaction-sync lightning-macros lightning-dns-resolver lightning-liquidity @@ -83,25 +65,6 @@ cargo check -p lightning-block-sync --verbose --color always --features rpc-clie cargo test -p lightning-block-sync --verbose --color always --features rpc-client,rest-client,tokio cargo check -p lightning-block-sync --verbose --color always --features rpc-client,rest-client,tokio -if [[ "$HOST_PLATFORM" != *windows* ]]; then - echo -e "\n\nChecking Transaction Sync Clients with features." - cargo check -p lightning-transaction-sync --verbose --color always --features esplora-blocking - cargo check -p lightning-transaction-sync --verbose --color always --features esplora-async - cargo check -p lightning-transaction-sync --verbose --color always --features esplora-async-https - cargo check -p lightning-transaction-sync --verbose --color always --features electrum - - if [ -z "$CI_ENV" ] && [[ -z "$BITCOIND_EXE" || -z "$ELECTRS_EXE" ]]; then - echo -e "\n\nSkipping testing Transaction Sync Clients due to BITCOIND_EXE or ELECTRS_EXE being unset." - cargo check -p lightning-transaction-sync --tests - else - echo -e "\n\nTesting Transaction Sync Clients with features." - cargo test -p lightning-transaction-sync --verbose --color always --features esplora-blocking - cargo test -p lightning-transaction-sync --verbose --color always --features esplora-async - cargo test -p lightning-transaction-sync --verbose --color always --features esplora-async-https - cargo test -p lightning-transaction-sync --verbose --color always --features electrum - fi -fi - echo -e "\n\nTest futures builds" cargo test -p lightning-background-processor --verbose --color always --features futures cargo test -p lightning-background-processor --verbose --color always --features futures --no-default-features @@ -145,7 +108,7 @@ cargo test -p lightning-invoice --verbose --color always --no-default-features - echo -e "\n\nTesting no_std build on a downstream no-std crate" # check no-std compatibility across dependencies pushd no-std-check -cargo check --verbose --color always --features lightning-transaction-sync +cargo check --verbose --color always [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean popd diff --git a/ci/ci-tx-sync-tests.sh b/ci/ci-tx-sync-tests.sh new file mode 100755 index 00000000000..3ca2fae6725 --- /dev/null +++ b/ci/ci-tx-sync-tests.sh @@ -0,0 +1,39 @@ +#!/bin/bash +set -eox pipefail + +RUSTC_MINOR_VERSION=$(rustc --version | awk '{ split($2,a,"."); print a[2] }') + +pushd lightning-transaction-sync + +# Some crates require pinning to meet our MSRV even for our downstream users, +# which we do here. +# Further crates which appear only as dev-dependencies are pinned further down. +function PIN_RELEASE_DEPS { + return 0 # Don't fail the script if our rustc is higher than the last check +} + +PIN_RELEASE_DEPS # pin the release dependencies + +# Starting with version 0.5.11, the `home` crate has an MSRV of rustc 1.81.0. +[ "$RUSTC_MINOR_VERSION" -lt 81 ] && cargo update -p home --precise "0.5.9" --verbose + +export RUST_BACKTRACE=1 + +echo -e "\n\nChecking Transaction Sync Clients with features." +cargo check --verbose --color always --features esplora-blocking +cargo check --verbose --color always --features esplora-async +cargo check --verbose --color always --features esplora-async-https +cargo check --verbose --color always --features electrum + +if [ -z "$CI_ENV" ] && [[ -z "$BITCOIND_EXE" || -z "$ELECTRS_EXE" ]]; then + echo -e "\n\nSkipping testing Transaction Sync Clients due to BITCOIND_EXE or ELECTRS_EXE being unset." + cargo check --tests +else + echo -e "\n\nTesting Transaction Sync Clients with features." + cargo test --verbose --color always --features esplora-blocking + cargo test --verbose --color always --features esplora-async + cargo test --verbose --color always --features esplora-async-https + cargo test --verbose --color always --features electrum +fi + +popd diff --git a/lightning-transaction-sync/Cargo.toml b/lightning-transaction-sync/Cargo.toml index 2e604894108..71922c92fbb 100644 --- a/lightning-transaction-sync/Cargo.toml +++ b/lightning-transaction-sync/Cargo.toml @@ -37,5 +37,11 @@ tokio = { version = "1.35.0", features = ["macros"] } [target.'cfg(not(target_os = "windows"))'.dev-dependencies] electrsd = { version = "0.28.0", default-features = false, features = ["legacy"] } -[lints] -workspace = true +[lints.rust.unexpected_cfgs] +level = "forbid" +# When adding a new cfg attribute, ensure that it is added to this list. +# +# Note that Cargo automatically declares corresponding cfgs for every feature +# defined in the member-level [features] tables as "expected". +check-cfg = [ +] diff --git a/msrv-no-dev-deps-check/Cargo.toml b/msrv-no-dev-deps-check/Cargo.toml index be594f6e5c4..3a4acc675e6 100644 --- a/msrv-no-dev-deps-check/Cargo.toml +++ b/msrv-no-dev-deps-check/Cargo.toml @@ -6,7 +6,6 @@ edition = "2021" [dependencies] lightning = { path = "../lightning" } lightning-block-sync = { path = "../lightning-block-sync", features = [ "rest-client", "rpc-client" ] } -lightning-transaction-sync = { path = "../lightning-transaction-sync", features = [ "esplora-async-https", "electrum" ] } lightning-invoice = { path = "../lightning-invoice" } lightning-net-tokio = { path = "../lightning-net-tokio" } lightning-persister = { path = "../lightning-persister" } diff --git a/no-std-check/Cargo.toml b/no-std-check/Cargo.toml index bc43e63404a..f97673414d9 100644 --- a/no-std-check/Cargo.toml +++ b/no-std-check/Cargo.toml @@ -11,7 +11,3 @@ lightning = { path = "../lightning", default-features = false } lightning-invoice = { path = "../lightning-invoice", default-features = false } lightning-rapid-gossip-sync = { path = "../lightning-rapid-gossip-sync", default-features = false } lightning-background-processor = { path = "../lightning-background-processor", features = ["futures"], default-features = false } - -# Obviously lightning-transaction-sync doesn't support no-std, but it should build -# even if lightning is built with no-std. -lightning-transaction-sync = { path = "../lightning-transaction-sync", optional = true } From de15b0f31d2db0a080eae0eb8d57ce908b45555c Mon Sep 17 00:00:00 2001 From: Matt Morehouse Date: Tue, 14 Jan 2025 15:40:07 -0600 Subject: [PATCH 012/105] Fix package splitting logic When scanning confirmed transactions for spends that conflict with our existing packages, we should continue scanning after detecting the first conflicting package since a transaction can conflict with multiple packages. This ensures that we remove *all* inputs from our packages that have already been spent by the counterparty so that valid claim transactions are generated. Fixes https://github.com/lightningdevkit/rust-lightning/issues/3537. --- lightning/src/chain/onchaintx.rs | 1 - lightning/src/ln/functional_tests.rs | 259 +++++++++++++++++++++++++++ 2 files changed, 259 insertions(+), 1 deletion(-) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index cf7f8d7bfe0..759668cfa9c 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -964,7 +964,6 @@ impl OnchainTxHandler { self.pending_claim_events.retain(|entry| entry.0 != *claim_id); } } - break; //No need to iterate further, either tx is our or their } else { panic!("Inconsistencies between pending_claim_requests map and claimable_outpoints map"); } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index ac43efe4499..e52870cf19d 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -18,6 +18,7 @@ use crate::chain::channelmonitor; use crate::chain::channelmonitor::{Balance, ChannelMonitorUpdateStep, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; use crate::chain::transaction::OutPoint; use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, SignerProvider}; +use crate::events::bump_transaction::WalletSource; use crate::events::{Event, FundingInfo, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, ClosureReason, HTLCDestination, PaymentFailureReason}; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentPreimage, PaymentSecret, PaymentHash}; @@ -2774,6 +2775,264 @@ fn claim_htlc_outputs() { assert_eq!(nodes[1].node.list_channels().len(), 0); } +// Test that the HTLC package logic removes HTLCs from the package when they are claimed by the +// counterparty, even when the counterparty claims HTLCs from multiple packages in a single +// transaction. +// +// This is a regression test for https://github.com/lightningdevkit/rust-lightning/issues/3537. +#[test] +fn test_multiple_package_conflicts() { + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let mut user_cfg = test_default_channel_config(); + + // Anchor channels are required so that multiple HTLC-Successes can be aggregated into a single + // transaction. + user_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + user_cfg.manually_accept_inbound_channels = true; + + let node_chanmgrs = + create_node_chanmgrs(3, &node_cfgs, &[Some(user_cfg), Some(user_cfg), Some(user_cfg)]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + // Since we're using anchor channels, make sure each node has a UTXO for paying fees. + let coinbase_tx = Transaction { + version: Version::TWO, + lock_time: LockTime::ZERO, + input: vec![TxIn { ..Default::default() }], + output: vec![ + TxOut { + value: Amount::ONE_BTC, + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }, + TxOut { + value: Amount::ONE_BTC, + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }, + TxOut { + value: Amount::ONE_BTC, + script_pubkey: nodes[2].wallet_source.get_change_script().unwrap(), + }, + ], + }; + nodes[0].wallet_source.add_utxo( + bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, + coinbase_tx.output[0].value, + ); + nodes[1].wallet_source.add_utxo( + bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 1 }, + coinbase_tx.output[1].value, + ); + nodes[2].wallet_source.add_utxo( + bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 2 }, + coinbase_tx.output[2].value, + ); + + // Create the network. + // 0 -- 1 -- 2 + // + // Payments will be routed from node 0 to node 2. Node 2 will force close and spend HTLCs from + // two of node 1's packages. We will then verify that node 1 correctly removes the conflicting + // HTLC spends from its packages. + const CHAN_CAPACITY: u64 = 10_000_000; + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, CHAN_CAPACITY, 0); + let (_, _, cid_1_2, funding_tx_1_2) = + create_announced_chan_between_nodes_with_value(&nodes, 1, 2, CHAN_CAPACITY, 0); + + // Ensure all nodes are at the same initial height. + let node_max_height = nodes.iter().map(|node| node.best_block_info().1).max().unwrap(); + for node in &nodes { + let blocks_to_mine = node_max_height - node.best_block_info().1; + if blocks_to_mine > 0 { + connect_blocks(node, blocks_to_mine); + } + } + + // Route HTLC 1. + let (preimage_1, payment_hash_1, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); + + // Route HTLCs 2 and 3, with CLTVs 1 higher than HTLC 1. The higher CLTVs will cause these + // HTLCs to be included in a different package than HTLC 1. + connect_blocks(&nodes[0], 1); + connect_blocks(&nodes[1], 1); + connect_blocks(&nodes[2], 1); + let (preimage_2, payment_hash_2, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 900_000_000); + + // Mine blocks until HTLC 1 times out in 1 block and HTLCs 2 and 3 time out in 2 blocks. + connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1); + + // Node 2 force closes, causing node 1 to group the HTLCs into the following packages: + // Package 1: HTLC 1 + // Package 2: HTLCs 2 and 3 + let node2_commit_tx = get_local_commitment_txn!(nodes[2], cid_1_2); + assert_eq!(node2_commit_tx.len(), 1); + let node2_commit_tx = &node2_commit_tx[0]; + check_spends!(node2_commit_tx, funding_tx_1_2); + mine_transaction(&nodes[1], node2_commit_tx); + check_closed_event( + &nodes[1], + 1, + ClosureReason::CommitmentTxConfirmed, + false, + &[nodes[2].node.get_our_node_id()], + CHAN_CAPACITY, + ); + check_closed_broadcast!(nodes[1], true); + check_added_monitors(&nodes[1], 1); + + // Node 1 should immediately claim package 1 but has to wait a block to claim package 2. + let timeout_tx = nodes[1].tx_broadcaster.txn_broadcast(); + assert_eq!(timeout_tx.len(), 1); + check_spends!(timeout_tx[0], node2_commit_tx); + assert_eq!(timeout_tx[0].input.len(), 1); + + // After one block, node 1 should also attempt to claim package 2. + connect_blocks(&nodes[1], 1); + let timeout_tx = nodes[1].tx_broadcaster.txn_broadcast(); + assert_eq!(timeout_tx.len(), 1); + check_spends!(timeout_tx[0], node2_commit_tx); + assert_eq!(timeout_tx[0].input.len(), 2); + + // Force node 2 to broadcast an aggregated HTLC-Success transaction spending HTLCs 1 and 2. + // This will conflict with both of node 1's HTLC packages. + { + let broadcaster = &node_cfgs[2].tx_broadcaster; + let fee_estimator = &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator); + let logger = &node_cfgs[2].logger; + let monitor = get_monitor!(nodes[2], cid_1_2); + monitor.provide_payment_preimage_unsafe_legacy( + &payment_hash_1, + &preimage_1, + broadcaster, + fee_estimator, + logger, + ); + monitor.provide_payment_preimage_unsafe_legacy( + &payment_hash_2, + &preimage_2, + broadcaster, + fee_estimator, + logger, + ); + } + mine_transaction(&nodes[2], node2_commit_tx); + check_closed_event( + &nodes[2], + 1, + ClosureReason::CommitmentTxConfirmed, + false, + &[nodes[1].node.get_our_node_id()], + CHAN_CAPACITY, + ); + check_closed_broadcast!(nodes[2], true); + check_added_monitors(&nodes[2], 1); + + let process_bump_event = |node: &Node| { + let events = node.chain_monitor.chain_monitor.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + let bump_event = match &events[0] { + Event::BumpTransaction(bump_event) => bump_event, + _ => panic!("Unexepected event"), + }; + node.bump_tx_handler.handle_event(bump_event); + + let mut tx = node.tx_broadcaster.txn_broadcast(); + assert_eq!(tx.len(), 1); + tx.pop().unwrap() + }; + + let conflict_tx = process_bump_event(&nodes[2]); + assert_eq!(conflict_tx.input.len(), 3); + assert_eq!(conflict_tx.input[0].previous_output.txid, node2_commit_tx.compute_txid()); + assert_eq!(conflict_tx.input[1].previous_output.txid, node2_commit_tx.compute_txid()); + assert_eq!(conflict_tx.input[2].previous_output.txid, coinbase_tx.compute_txid()); + + // Mine node 2's aggregated HTLC-Success transaction on node 1, causing the package splitting + // logic to run. Package 2 should get split so that only HTLC 3 gets claimed. + mine_transaction(&nodes[1], &conflict_tx); + + // Check that node 1 only attempts to claim HTLC 3 now. There should be no conflicting spends + // in the newly broadcasted transaction. + let broadcasted_txs = nodes[1].tx_broadcaster.txn_broadcast(); + assert_eq!(broadcasted_txs.len(), 1); + let txins = &broadcasted_txs[0].input; + assert_eq!(txins.len(), 1); + assert_eq!(txins[0].previous_output.txid, node2_commit_tx.compute_txid()); + for conflict_in in &conflict_tx.input { + assert_ne!(txins[0].previous_output, conflict_in.previous_output); + } + + // Node 1 should also extract the preimages from the mined transaction and claim them upstream. + // + // Because two update_fulfill_htlc messages are created at once, the commitment_signed_dance + // macro doesn't work properly and we must process the first update_fulfill_htlc manually. + let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[0].node.handle_update_fulfill_htlc( + nodes[1].node.get_our_node_id(), + &updates.update_fulfill_htlcs[0], + ); + nodes[0] + .node + .handle_commitment_signed(nodes[1].node.get_our_node_id(), &updates.commitment_signed); + check_added_monitors(&nodes[0], 1); + + let (revoke_ack, commit_signed) = + get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &revoke_ack); + nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &commit_signed); + check_added_monitors(&nodes[1], 4); + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + let revoke_ack = match &events[1] { + MessageSendEvent::SendRevokeAndACK { node_id: _, msg } => msg, + _ => panic!("Unexpected event"), + }; + nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), revoke_ack); + expect_payment_sent!(nodes[0], preimage_1); + + let updates = match &events[0] { + MessageSendEvent::UpdateHTLCs { node_id: _, updates } => updates, + _ => panic!("Unexpected event"), + }; + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + nodes[0].node.handle_update_fulfill_htlc( + nodes[1].node.get_our_node_id(), + &updates.update_fulfill_htlcs[0], + ); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); + expect_payment_sent!(nodes[0], preimage_2); + + let mut events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + expect_payment_forwarded( + events.pop().unwrap(), + &nodes[1], + &nodes[0], + &nodes[2], + Some(1000), + None, + false, + true, + false, + ); + expect_payment_forwarded( + events.pop().unwrap(), + &nodes[1], + &nodes[0], + &nodes[2], + Some(1000), + None, + false, + true, + false, + ); +} + #[test] fn test_htlc_on_chain_success() { // Test that in case of a unilateral close onchain, we detect the state of output and pass From e290d44e16c26da6f41137a14a2fe0c74a6934d0 Mon Sep 17 00:00:00 2001 From: Duncan Dean Date: Fri, 10 Jan 2025 07:57:05 +0200 Subject: [PATCH 013/105] ci: silence unnecessary_map_or lint as solution requires MSRV >= 1.70 Rust 1.84.0 was recently released along with some new clippy lints, one of which is `unnecessary_map_or`. Unfortunately this lint suggests using `Option::is_some_and` as a fix, but this is only available in Rust version >= 1.70, while we still have an MSRV of 1.63. So we silence that lint for now. We'd still like our lint CI to use stable Rust so that we can benefit from new lint checks which may be helpful and don't require an MSRV bump, but sometimes new lints (like in this case) do. See: https://rust-lang.github.io/rust-clippy/master/index.html#unnecessary_map_or https://doc.rust-lang.org/std/option/enum.Option.html#method.is_some_and --- ci/check-lint.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ci/check-lint.sh b/ci/check-lint.sh index b7f1bb909f3..b66a856797a 100755 --- a/ci/check-lint.sh +++ b/ci/check-lint.sh @@ -93,4 +93,5 @@ RUSTFLAGS='-D warnings' cargo clippy -- \ -A clippy::unnecessary_to_owned \ -A clippy::unnecessary_unwrap \ -A clippy::unused_unit \ - -A clippy::useless_conversion + -A clippy::useless_conversion \ + -A clippy::unnecessary_map_or `# to be removed once we hit MSRV 1.70` From 82022509d0ff280115d4d18baad130b80bf48b79 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Tue, 14 Jan 2025 11:15:21 +0100 Subject: [PATCH 014/105] `liquidity`: Allow setting `process_events` callback in `c_bindings` To trigger message processing, we previously had the user set a callback to `PeerManager::process_events` via an `Fn()` callback. This is however not supported by `c_bindings`. Here, we therefore introduce as `ProcessMesssagesCallback` trait that can be used via `LiquidityManager::set_process_msgs_callback_fn`, which is exposed in `c_bindings`. --- lightning-liquidity/src/manager.rs | 69 ++---------------------- lightning-liquidity/src/message_queue.rs | 38 ++++++++----- 2 files changed, 29 insertions(+), 78 deletions(-) diff --git a/lightning-liquidity/src/manager.rs b/lightning-liquidity/src/manager.rs index 1e467c302de..a4c13033370 100644 --- a/lightning-liquidity/src/manager.rs +++ b/lightning-liquidity/src/manager.rs @@ -7,7 +7,7 @@ use crate::lsps0::ser::{ LSPS_MESSAGE_TYPE_ID, }; use crate::lsps0::service::LSPS0ServiceHandler; -use crate::message_queue::MessageQueue; +use crate::message_queue::{MessageQueue, ProcessMessagesCallback}; use crate::lsps1::client::{LSPS1ClientConfig, LSPS1ClientHandler}; use crate::lsps1::msgs::LSPS1Message; @@ -17,7 +17,7 @@ use crate::lsps1::service::{LSPS1ServiceConfig, LSPS1ServiceHandler}; use crate::lsps2::client::{LSPS2ClientConfig, LSPS2ClientHandler}; use crate::lsps2::msgs::LSPS2Message; use crate::lsps2::service::{LSPS2ServiceConfig, LSPS2ServiceHandler}; -use crate::prelude::{new_hash_map, new_hash_set, HashMap, HashSet, ToString, Vec}; +use crate::prelude::{new_hash_map, new_hash_set, Box, HashMap, HashSet, ToString, Vec}; use crate::sync::{Arc, Mutex, RwLock}; use lightning::chain::{self, BestBlock, Confirm, Filter, Listen}; @@ -315,69 +315,8 @@ where { /// ``` /// /// [`PeerManager::process_events`]: lightning::ln::peer_handler::PeerManager::process_events - #[cfg(feature = "std")] - pub fn set_process_msgs_callback(&self, callback: impl Fn() + Send + Sync + 'static) { - self.pending_messages.set_process_msgs_callback(callback) - } - - /// Allows to set a callback that will be called after new messages are pushed to the message - /// queue. - /// - /// Usually, you'll want to use this to call [`PeerManager::process_events`] to clear the - /// message queue. For example: - /// - /// ``` - /// # use lightning::io; - /// # use lightning_liquidity::LiquidityManager; - /// # use std::sync::{Arc, RwLock}; - /// # use std::sync::atomic::{AtomicBool, Ordering}; - /// # use std::time::SystemTime; - /// # struct MyStore {} - /// # impl lightning::util::persist::KVStore for MyStore { - /// # fn read(&self, primary_namespace: &str, secondary_namespace: &str, key: &str) -> io::Result> { Ok(Vec::new()) } - /// # fn write(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8]) -> io::Result<()> { Ok(()) } - /// # fn remove(&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool) -> io::Result<()> { Ok(()) } - /// # fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { Ok(Vec::new()) } - /// # } - /// # struct MyEntropySource {} - /// # impl lightning::sign::EntropySource for MyEntropySource { - /// # fn get_secure_random_bytes(&self) -> [u8; 32] { [0u8; 32] } - /// # } - /// # struct MyEventHandler {} - /// # impl MyEventHandler { - /// # async fn handle_event(&self, _: lightning::events::Event) {} - /// # } - /// # #[derive(Eq, PartialEq, Clone, Hash)] - /// # struct MySocketDescriptor {} - /// # impl lightning::ln::peer_handler::SocketDescriptor for MySocketDescriptor { - /// # fn send_data(&mut self, _data: &[u8], _resume_read: bool) -> usize { 0 } - /// # fn disconnect_socket(&mut self) {} - /// # } - /// # type MyBroadcaster = dyn lightning::chain::chaininterface::BroadcasterInterface; - /// # type MyFeeEstimator = dyn lightning::chain::chaininterface::FeeEstimator; - /// # type MyNodeSigner = dyn lightning::sign::NodeSigner; - /// # type MyUtxoLookup = dyn lightning::routing::utxo::UtxoLookup; - /// # type MyFilter = dyn lightning::chain::Filter; - /// # type MyLogger = dyn lightning::util::logger::Logger; - /// # type MyChainMonitor = lightning::chain::chainmonitor::ChainMonitor, Arc, Arc, Arc, Arc>; - /// # type MyPeerManager = lightning::ln::peer_handler::SimpleArcPeerManager, MyLogger>; - /// # type MyNetworkGraph = lightning::routing::gossip::NetworkGraph>; - /// # type MyGossipSync = lightning::routing::gossip::P2PGossipSync, Arc, Arc>; - /// # type MyChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager; - /// # type MyScorer = RwLock, Arc>>; - /// # type MyLiquidityManager = LiquidityManager, Arc, Arc>; - /// # fn setup_background_processing(my_persister: Arc, my_event_handler: Arc, my_chain_monitor: Arc, my_channel_manager: Arc, my_logger: Arc, my_peer_manager: Arc, my_liquidity_manager: Arc) { - /// let process_msgs_pm = Arc::clone(&my_peer_manager); - /// let process_msgs_callback = move || process_msgs_pm.process_events(); - /// - /// my_liquidity_manager.set_process_msgs_callback(process_msgs_callback); - /// # } - /// ``` - /// - /// [`PeerManager::process_events`]: lightning::ln::peer_handler::PeerManager::process_events - #[cfg(not(feature = "std"))] - pub fn set_process_msgs_callback(&self, callback: impl Fn() + 'static) { - self.pending_messages.set_process_msgs_callback(callback) + pub fn set_process_msgs_callback(&self, callback: F) { + self.pending_messages.set_process_msgs_callback(Box::new(callback)); } /// Blocks the current thread until next event is ready and returns it. diff --git a/lightning-liquidity/src/message_queue.rs b/lightning-liquidity/src/message_queue.rs index 89dab8a318e..7b61a87bcd4 100644 --- a/lightning-liquidity/src/message_queue.rs +++ b/lightning-liquidity/src/message_queue.rs @@ -11,10 +11,7 @@ use bitcoin::secp256k1::PublicKey; /// [`LiquidityManager`]: crate::LiquidityManager pub struct MessageQueue { queue: Mutex>, - #[cfg(feature = "std")] - process_msgs_callback: RwLock>>, - #[cfg(not(feature = "std"))] - process_msgs_callback: RwLock>>, + process_msgs_callback: RwLock>>, } impl MessageQueue { @@ -24,14 +21,8 @@ impl MessageQueue { Self { queue, process_msgs_callback } } - #[cfg(feature = "std")] - pub(crate) fn set_process_msgs_callback(&self, callback: impl Fn() + Send + Sync + 'static) { - *self.process_msgs_callback.write().unwrap() = Some(Box::new(callback)); - } - - #[cfg(not(feature = "std"))] - pub(crate) fn set_process_msgs_callback(&self, callback: impl Fn() + 'static) { - *self.process_msgs_callback.write().unwrap() = Some(Box::new(callback)); + pub(crate) fn set_process_msgs_callback(&self, callback: Box) { + *self.process_msgs_callback.write().unwrap() = Some(callback); } pub(crate) fn get_and_clear_pending_msgs(&self) -> Vec<(PublicKey, LSPSMessage)> { @@ -45,7 +36,28 @@ impl MessageQueue { } if let Some(process_msgs_callback) = self.process_msgs_callback.read().unwrap().as_ref() { - (process_msgs_callback)() + process_msgs_callback.call() } } } + +macro_rules! define_callback { ($($bounds: path),*) => { +/// A callback which will be called to trigger network message processing. +/// +/// Usually, this should call [`PeerManager::process_events`]. +/// +/// [`PeerManager::process_events`]: lightning::ln::peer_handler::PeerManager::process_events +pub trait ProcessMessagesCallback : $($bounds +)* { + /// The method which is called. + fn call(&self); +} + +impl ProcessMessagesCallback for F { + fn call(&self) { (self)(); } +} +} } + +#[cfg(feature = "std")] +define_callback!(Send, Sync); +#[cfg(not(feature = "std"))] +define_callback!(); From 3386c4b9925c93826f28dd080841403e88910a8d Mon Sep 17 00:00:00 2001 From: Ian Slane Date: Thu, 8 Aug 2024 07:50:03 -0600 Subject: [PATCH 015/105] Validate amount_msats against invreq amount Add a check to ensure that the amount_msats in an invoice matches the amount_msats specified in the invoice_request or offer (or refund). Reject the invoice as invalid if there is a mismatch between these amounts. Otherwise, an invoice may be paid with an amount greater than the requested amount. Co-authored-by: Ian Slane Co-authored-by: Jeffrey Czyz --- lightning/src/ln/offers_tests.rs | 12 ++-- lightning/src/offers/invoice.rs | 77 ++++++++++++++++++++++++- lightning/src/offers/invoice_macros.rs | 8 +++ lightning/src/offers/invoice_request.rs | 56 +++++++++++++++++- lightning/src/offers/parse.rs | 2 +- 5 files changed, 144 insertions(+), 11 deletions(-) diff --git a/lightning/src/ln/offers_tests.rs b/lightning/src/ln/offers_tests.rs index 6455a60b139..35a4c61713c 100644 --- a/lightning/src/ln/offers_tests.rs +++ b/lightning/src/ln/offers_tests.rs @@ -566,7 +566,7 @@ fn creates_and_pays_for_offer_using_two_hop_blinded_path() { human_readable_name: None, }, }); - assert_eq!(invoice_request.amount_msats(), None); + assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), david_id); assert_eq!(reply_path.introduction_node(), &IntroductionNode::NodeId(charlie_id)); @@ -727,7 +727,7 @@ fn creates_and_pays_for_offer_using_one_hop_blinded_path() { human_readable_name: None, }, }); - assert_eq!(invoice_request.amount_msats(), None); + assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), bob_id); assert_eq!(reply_path.introduction_node(), &IntroductionNode::NodeId(bob_id)); @@ -1116,7 +1116,7 @@ fn creates_and_pays_for_offer_with_retry() { human_readable_name: None, }, }); - assert_eq!(invoice_request.amount_msats(), None); + assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), bob_id); assert_eq!(reply_path.introduction_node(), &IntroductionNode::NodeId(bob_id)); let onion_message = alice.onion_messenger.next_onion_message_for_peer(bob_id).unwrap(); @@ -1411,7 +1411,7 @@ fn fails_authentication_when_handling_invoice_request() { alice.onion_messenger.handle_onion_message(david_id, &onion_message); let (invoice_request, reply_path) = extract_invoice_request(alice, &onion_message); - assert_eq!(invoice_request.amount_msats(), None); + assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), david_id); assert_eq!(reply_path.introduction_node(), &IntroductionNode::NodeId(charlie_id)); @@ -1441,7 +1441,7 @@ fn fails_authentication_when_handling_invoice_request() { alice.onion_messenger.handle_onion_message(bob_id, &onion_message); let (invoice_request, reply_path) = extract_invoice_request(alice, &onion_message); - assert_eq!(invoice_request.amount_msats(), None); + assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), david_id); assert_eq!(reply_path.introduction_node(), &IntroductionNode::NodeId(charlie_id)); @@ -1543,7 +1543,7 @@ fn fails_authentication_when_handling_invoice_for_offer() { alice.onion_messenger.handle_onion_message(bob_id, &onion_message); let (invoice_request, reply_path) = extract_invoice_request(alice, &onion_message); - assert_eq!(invoice_request.amount_msats(), None); + assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), david_id); assert_eq!(reply_path.introduction_node(), &IntroductionNode::NodeId(charlie_id)); diff --git a/lightning/src/offers/invoice.rs b/lightning/src/offers/invoice.rs index 136ea2625de..d6a0392dac2 100644 --- a/lightning/src/offers/invoice.rs +++ b/lightning/src/offers/invoice.rs @@ -342,7 +342,7 @@ macro_rules! invoice_builder_methods { ( pub(crate) fn amount_msats( invoice_request: &InvoiceRequest ) -> Result { - match invoice_request.amount_msats() { + match invoice_request.contents.inner.amount_msats() { Some(amount_msats) => Ok(amount_msats), None => match invoice_request.contents.inner.offer.amount() { Some(Amount::Bitcoin { amount_msats }) => { @@ -1531,6 +1531,11 @@ impl TryFrom for InvoiceContents { experimental_offer_tlv_stream, experimental_invoice_request_tlv_stream, ) )?; + + if amount_msats != refund.amount_msats() { + return Err(Bolt12SemanticError::InvalidAmount); + } + Ok(InvoiceContents::ForRefund { refund, fields }) } else { let invoice_request = InvoiceRequestContents::try_from( @@ -1539,6 +1544,13 @@ impl TryFrom for InvoiceContents { experimental_offer_tlv_stream, experimental_invoice_request_tlv_stream, ) )?; + + if let Some(requested_amount_msats) = invoice_request.amount_msats() { + if amount_msats != requested_amount_msats { + return Err(Bolt12SemanticError::InvalidAmount); + } + } + Ok(InvoiceContents::ForOffer { invoice_request, fields }) } } @@ -2707,6 +2719,69 @@ mod tests { } } + #[test] + fn fails_parsing_invoice_with_wrong_amount() { + let expanded_key = ExpandedKey::new([42; 32]); + let entropy = FixedEntropy {}; + let nonce = Nonce::from_entropy_source(&entropy); + let secp_ctx = Secp256k1::new(); + let payment_id = PaymentId([1; 32]); + + let invoice = OfferBuilder::new(recipient_pubkey()) + .amount_msats(1000) + .build().unwrap() + .request_invoice(&expanded_key, nonce, &secp_ctx, payment_id).unwrap() + .build_and_sign().unwrap() + .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap() + .amount_msats_unchecked(2000) + .build().unwrap() + .sign(recipient_sign).unwrap(); + + let mut buffer = Vec::new(); + invoice.write(&mut buffer).unwrap(); + + match Bolt12Invoice::try_from(buffer) { + Ok(_) => panic!("expected error"), + Err(e) => assert_eq!(e, Bolt12ParseError::InvalidSemantics(Bolt12SemanticError::InvalidAmount)), + } + + let invoice = OfferBuilder::new(recipient_pubkey()) + .amount_msats(1000) + .build().unwrap() + .request_invoice(&expanded_key, nonce, &secp_ctx, payment_id).unwrap() + .amount_msats(1000).unwrap() + .build_and_sign().unwrap() + .respond_with_no_std(payment_paths(), payment_hash(), now()).unwrap() + .amount_msats_unchecked(2000) + .build().unwrap() + .sign(recipient_sign).unwrap(); + + let mut buffer = Vec::new(); + invoice.write(&mut buffer).unwrap(); + + match Bolt12Invoice::try_from(buffer) { + Ok(_) => panic!("expected error"), + Err(e) => assert_eq!(e, Bolt12ParseError::InvalidSemantics(Bolt12SemanticError::InvalidAmount)), + } + + let invoice = RefundBuilder::new(vec![1; 32], payer_pubkey(), 1000).unwrap() + .build().unwrap() + .respond_using_derived_keys_no_std( + payment_paths(), payment_hash(), now(), &expanded_key, &entropy + ) + .unwrap() + .amount_msats_unchecked(2000) + .build_and_sign(&secp_ctx).unwrap(); + + let mut buffer = Vec::new(); + invoice.write(&mut buffer).unwrap(); + + match Bolt12Invoice::try_from(buffer) { + Ok(_) => panic!("expected error"), + Err(e) => assert_eq!(e, Bolt12ParseError::InvalidSemantics(Bolt12SemanticError::InvalidAmount)), + } + } + #[test] fn fails_parsing_invoice_without_signature() { let expanded_key = ExpandedKey::new([42; 32]); diff --git a/lightning/src/offers/invoice_macros.rs b/lightning/src/offers/invoice_macros.rs index 93e62d7adaf..dd75fe62504 100644 --- a/lightning/src/offers/invoice_macros.rs +++ b/lightning/src/offers/invoice_macros.rs @@ -87,6 +87,14 @@ macro_rules! invoice_builder_methods_test { ( $self: ident, $self_type: ty, $invoice_fields: expr, $return_type: ty, $return_value: expr $(, $self_mut: tt)? ) => { + #[cfg_attr(c_bindings, allow(dead_code))] + pub(crate) fn amount_msats_unchecked( + $($self_mut)* $self: $self_type, amount_msats: u64, + ) -> $return_type { + $invoice_fields.amount_msats = amount_msats; + $return_value + } + #[cfg_attr(c_bindings, allow(dead_code))] pub(crate) fn features_unchecked( $($self_mut)* $self: $self_type, features: Bolt12InvoiceFeatures diff --git a/lightning/src/offers/invoice_request.rs b/lightning/src/offers/invoice_request.rs index 74bbdb8a0bf..1a3eb0b5e5a 100644 --- a/lightning/src/offers/invoice_request.rs +++ b/lightning/src/offers/invoice_request.rs @@ -79,7 +79,7 @@ use crate::ln::inbound_payment::{ExpandedKey, IV_LEN}; use crate::ln::msgs::DecodeError; use crate::offers::merkle::{SignError, SignFn, SignatureTlvStream, SignatureTlvStreamRef, TaggedHash, TlvStream, self, SIGNATURE_TLV_RECORD_SIZE}; use crate::offers::nonce::Nonce; -use crate::offers::offer::{EXPERIMENTAL_OFFER_TYPES, ExperimentalOfferTlvStream, ExperimentalOfferTlvStreamRef, OFFER_TYPES, Offer, OfferContents, OfferId, OfferTlvStream, OfferTlvStreamRef}; +use crate::offers::offer::{Amount, EXPERIMENTAL_OFFER_TYPES, ExperimentalOfferTlvStream, ExperimentalOfferTlvStreamRef, OFFER_TYPES, Offer, OfferContents, OfferId, OfferTlvStream, OfferTlvStreamRef}; use crate::offers::parse::{Bolt12ParseError, ParsedMessage, Bolt12SemanticError}; use crate::offers::payer::{PayerContents, PayerTlvStream, PayerTlvStreamRef}; use crate::offers::signer::{Metadata, MetadataMaterial}; @@ -974,7 +974,15 @@ impl InvoiceRequestContents { } pub(super) fn amount_msats(&self) -> Option { - self.inner.amount_msats + self.inner + .amount_msats() + .or_else(|| match self.inner.offer.amount() { + Some(Amount::Bitcoin { amount_msats }) => { + Some(amount_msats.saturating_mul(self.quantity().unwrap_or(1))) + }, + Some(Amount::Currency { .. }) => None, + None => { debug_assert!(false); None}, + }) } pub(super) fn features(&self) -> &InvoiceRequestFeatures { @@ -1015,6 +1023,10 @@ impl InvoiceRequestContentsWithoutPayerSigningPubkey { self.chain.unwrap_or_else(|| self.offer.implied_chain()) } + pub(super) fn amount_msats(&self) -> Option { + self.amount_msats + } + pub(super) fn as_tlv_stream(&self) -> PartialInvoiceRequestTlvStreamRef { let payer = PayerTlvStreamRef { metadata: self.payer.0.as_bytes(), @@ -1381,7 +1393,7 @@ mod tests { assert_eq!(invoice_request.supported_quantity(), Quantity::One); assert_eq!(invoice_request.issuer_signing_pubkey(), Some(recipient_pubkey())); assert_eq!(invoice_request.chain(), ChainHash::using_genesis_block(Network::Bitcoin)); - assert_eq!(invoice_request.amount_msats(), None); + assert_eq!(invoice_request.amount_msats(), Some(1000)); assert_eq!(invoice_request.invoice_request_features(), &InvoiceRequestFeatures::empty()); assert_eq!(invoice_request.quantity(), None); assert_eq!(invoice_request.payer_note(), None); @@ -1748,6 +1760,44 @@ mod tests { } } + #[test] + fn builds_invoice_request_without_amount() { + let expanded_key = ExpandedKey::new([42; 32]); + let entropy = FixedEntropy {}; + let nonce = Nonce::from_entropy_source(&entropy); + let secp_ctx = Secp256k1::new(); + let payment_id = PaymentId([1; 32]); + + let invoice_request = OfferBuilder::new(recipient_pubkey()) + .amount_msats(1000) + .build().unwrap() + .request_invoice(&expanded_key, nonce, &secp_ctx, payment_id).unwrap() + .build_and_sign().unwrap(); + let (_, _, tlv_stream, _, _, _) = invoice_request.as_tlv_stream(); + assert_eq!(invoice_request.amount_msats(), Some(1000)); + assert_eq!(tlv_stream.amount, None); + + let invoice_request = OfferBuilder::new(recipient_pubkey()) + .amount_msats(1000) + .supported_quantity(Quantity::Unbounded) + .build().unwrap() + .request_invoice(&expanded_key, nonce, &secp_ctx, payment_id).unwrap() + .quantity(2).unwrap() + .build_and_sign().unwrap(); + let (_, _, tlv_stream, _, _, _) = invoice_request.as_tlv_stream(); + assert_eq!(invoice_request.amount_msats(), Some(2000)); + assert_eq!(tlv_stream.amount, None); + + let invoice_request = OfferBuilder::new(recipient_pubkey()) + .amount(Amount::Currency { iso4217_code: *b"USD", amount: 10 }) + .build_unchecked() + .request_invoice(&expanded_key, nonce, &secp_ctx, payment_id).unwrap() + .build_unchecked_and_sign(); + let (_, _, tlv_stream, _, _, _) = invoice_request.as_tlv_stream(); + assert_eq!(invoice_request.amount_msats(), None); + assert_eq!(tlv_stream.amount, None); + } + #[test] fn builds_invoice_request_with_features() { let expanded_key = ExpandedKey::new([42; 32]); diff --git a/lightning/src/offers/parse.rs b/lightning/src/offers/parse.rs index f3c481a9f95..6b72c6b1682 100644 --- a/lightning/src/offers/parse.rs +++ b/lightning/src/offers/parse.rs @@ -147,7 +147,7 @@ pub enum Bolt12SemanticError { UnexpectedChain, /// An amount was expected but was missing. MissingAmount, - /// The amount exceeded the total bitcoin supply. + /// The amount exceeded the total bitcoin supply or didn't match an expected amount. InvalidAmount, /// An amount was provided but was not sufficient in value. InsufficientAmount, From 3c356abbd782754bcc97bc552f037fd843f36162 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Wed, 15 Jan 2025 09:56:04 -0600 Subject: [PATCH 016/105] Add InvoiceRequest::has_amount_msats When InvoiceRequest::amount_msats returns Some, it may have been inferred from the Offer::amount and InvoiceRequest::quantity. Add a method to InvoiceRequest for determining if the amount was explicitly set. --- lightning/src/offers/invoice_request.rs | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/lightning/src/offers/invoice_request.rs b/lightning/src/offers/invoice_request.rs index 1a3eb0b5e5a..957884f69d0 100644 --- a/lightning/src/offers/invoice_request.rs +++ b/lightning/src/offers/invoice_request.rs @@ -665,6 +665,15 @@ macro_rules! invoice_request_accessors { ($self: ident, $contents: expr) => { $contents.amount_msats() } + /// Returns whether an amount was set in the request; otherwise, if [`amount_msats`] is `Some` + /// then it was inferred from the [`Offer::amount`] and [`quantity`]. + /// + /// [`amount_msats`]: Self::amount_msats + /// [`quantity`]: Self::quantity + pub fn has_amount_msats(&$self) -> bool { + $contents.has_amount_msats() + } + /// Features pertaining to requesting an invoice. pub fn invoice_request_features(&$self) -> &InvoiceRequestFeatures { &$contents.features() @@ -985,6 +994,10 @@ impl InvoiceRequestContents { }) } + pub(super) fn has_amount_msats(&self) -> bool { + self.inner.amount_msats().is_some() + } + pub(super) fn features(&self) -> &InvoiceRequestFeatures { &self.inner.features } @@ -1669,6 +1682,7 @@ mod tests { .amount_msats(1000).unwrap() .build_and_sign().unwrap(); let (_, _, tlv_stream, _, _, _) = invoice_request.as_tlv_stream(); + assert!(invoice_request.has_amount_msats()); assert_eq!(invoice_request.amount_msats(), Some(1000)); assert_eq!(tlv_stream.amount, Some(1000)); @@ -1680,6 +1694,7 @@ mod tests { .amount_msats(1000).unwrap() .build_and_sign().unwrap(); let (_, _, tlv_stream, _, _, _) = invoice_request.as_tlv_stream(); + assert!(invoice_request.has_amount_msats()); assert_eq!(invoice_request.amount_msats(), Some(1000)); assert_eq!(tlv_stream.amount, Some(1000)); @@ -1690,6 +1705,7 @@ mod tests { .amount_msats(1001).unwrap() .build_and_sign().unwrap(); let (_, _, tlv_stream, _, _, _) = invoice_request.as_tlv_stream(); + assert!(invoice_request.has_amount_msats()); assert_eq!(invoice_request.amount_msats(), Some(1001)); assert_eq!(tlv_stream.amount, Some(1001)); @@ -1774,6 +1790,7 @@ mod tests { .request_invoice(&expanded_key, nonce, &secp_ctx, payment_id).unwrap() .build_and_sign().unwrap(); let (_, _, tlv_stream, _, _, _) = invoice_request.as_tlv_stream(); + assert!(!invoice_request.has_amount_msats()); assert_eq!(invoice_request.amount_msats(), Some(1000)); assert_eq!(tlv_stream.amount, None); @@ -1785,6 +1802,7 @@ mod tests { .quantity(2).unwrap() .build_and_sign().unwrap(); let (_, _, tlv_stream, _, _, _) = invoice_request.as_tlv_stream(); + assert!(!invoice_request.has_amount_msats()); assert_eq!(invoice_request.amount_msats(), Some(2000)); assert_eq!(tlv_stream.amount, None); @@ -1794,6 +1812,7 @@ mod tests { .request_invoice(&expanded_key, nonce, &secp_ctx, payment_id).unwrap() .build_unchecked_and_sign(); let (_, _, tlv_stream, _, _, _) = invoice_request.as_tlv_stream(); + assert!(!invoice_request.has_amount_msats()); assert_eq!(invoice_request.amount_msats(), None); assert_eq!(tlv_stream.amount, None); } From be1a3abc194c749303f733d8f42d88d6ae4b84c5 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 13 Jan 2025 22:00:43 +0000 Subject: [PATCH 017/105] Add draft 0.1 release notes --- CHANGELOG.md | 174 ++++++++++++++++++ .../3137-channel-negotiation-type.txt | 7 - .../3243-remove-balance_msat.txt | 1 - pending_changelog/3259-no-downgrade.txt | 4 - pending_changelog/3322-a.txt | 6 - pending_changelog/3322-b.txt | 7 - ...e-counterparty-id-in-payment-forwarded.txt | 7 - .../3383-deprecate-old-inbounds.txt | 6 - ...3435-authenticate-payment-receive-tlvs.txt | 9 - .../3439-remove-accept-mpp-keysend-cfg.txt | 3 - pending_changelog/matt-no-upgrade-skip.txt | 6 - .../matt-persist-preimage-on-upgrade.txt | 8 - 12 files changed, 174 insertions(+), 64 deletions(-) delete mode 100644 pending_changelog/3137-channel-negotiation-type.txt delete mode 100644 pending_changelog/3243-remove-balance_msat.txt delete mode 100644 pending_changelog/3259-no-downgrade.txt delete mode 100644 pending_changelog/3322-a.txt delete mode 100644 pending_changelog/3322-b.txt delete mode 100644 pending_changelog/3358-include-counterparty-id-in-payment-forwarded.txt delete mode 100644 pending_changelog/3383-deprecate-old-inbounds.txt delete mode 100644 pending_changelog/3435-authenticate-payment-receive-tlvs.txt delete mode 100644 pending_changelog/3439-remove-accept-mpp-keysend-cfg.txt delete mode 100644 pending_changelog/matt-no-upgrade-skip.txt delete mode 100644 pending_changelog/matt-persist-preimage-on-upgrade.txt diff --git a/CHANGELOG.md b/CHANGELOG.md index 6df2b2c2133..3400ab642b2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,177 @@ +# 0.1 - Jan XXX, 2025 - XXX + +## API Updates + * The `lightning-liquidity` crate has been moved into the `rust-lightning` + git tree, enabling support for both sides of the LSPS channel open + negotiation protocols (#3436). + * Since its last alpha release, `lightning-liquidity` has also gained support + for acting as an LSPS1 client (#3436). + * This release includes support for BIP 353 Human Readable Names resolution. + With the `dnssec` feature enabled, simply call `ChannelManager`'s + `pay_for_offer_from_human_readable_name` with a list of lightning nodes that + have the `dns_resolver` feature flag set (e.g. those running LDK with the + new `lightning_dns_resolver::OMDomainResolver` set up to resolve DNS queries + for others) and a Human Readable Name (#3346, #3179, #3283). + * Asynchronous `ChannelMonitorUpdate` persistence (i.e. the use of + `ChannelMonitorUpdateStatus::InProgress`) is now considered beta-quality. + There are no known issues with it, though the likelihood of unknown issues + is high (#3414). + * `ChannelManager`'s `send_payment_with_route` and `send_spontaneous_payment` + were removed. Use `send_payment` and `send_spontaneous_payment_with_retry` + (now renamed `send_spontaneous_payment`) instead (#3430). + * `ChannelMonitor`s no longer need to be re-persisted after deserializing the + `ChannelManager` before beginning normal operation. As such, + `ChannelManagerReadArgs::channel_monitors` no longer requires mutable + references (#3322). See the Backwards Compatibility section for more info. + * Additional information is now stored in `ChannelMonitorUpdate`s which may + increase the average size of `ChannelMonitorUpdate`s when claiming inbound + payments substantially. The expected maximum size of `ChannelMonitorUpdate`s + shouldn't change materially (#3322). + * Redundant `Event::PaymentClaimed`s will be generated more frequently on + startup compared to previous versions. + `Event::PaymentClaim{able,ed}::payment_id` has been added to allow for more + robust handling of redundant events on payments with duplicate + `PaymentHash`es (#3303, #3322). + * `ChannelMonitorUpdate::update_id`s no longer have a magic value (of + `u64::MAX`) for updates after a channel has been closed. They are now + always monotonically increasing (#3355). + * The MSRV of `lightning-transaction-sync` has been increased to rustc 1.75 due + to its HTTP client dependencies (#3528). + * The default `ProbabilisticScoringFeeParameters` values now recommend specific + ratios between different penalties, and default penalties now allow for + higher fees in order to reduce payment latency (#3495). + * On-chain state resolution now more aggressively batches claims into single + transactions, reducing on-chain fee costs when resolving multiple HTLCs for a + single channel force-closure. This also reduces the on-chain reserve + requirements for nodes using anchor channels (#3340). + * A `MigratableKVStore` trait was added (and implemented for + `FilesystemStore`), enabling easy migration between `KVStore`s (#3481). + * `InvoiceRequest::amount_msats` now returns the `offer`-implied amount if a + Bitcoin-denominated amount was set in the `offer` and no amount was set + directly in the `invoice_request` (#3535). + * `Event::OpenChannelRequest::push_msat` has been replaced with an enum in + preparation for the dual-funding protocol coming in a future release (#3137). + * `GossipVerifier` now requires a `P2PGossipSync` which holds a reference to + the `GossipVerifier` via an `Arc` (#3432). + * The `max_level_*` features were removed as the performance gain compared to + doing the limiting at runtime was negligible (#3431). + * `ChannelManager::create_bolt11_invoice` was added, deprecating the + `lightning::ln::invoice_utils` module (#3389). + * The `bech32` dependency has been upgraded to 0.11 across crates (#3270). + * Support for creating BOLT 12 `invoice_request`s with a static signing key + rather than an ephemeral one has been removed (#3264). + * The `Router` trait no longer extends the `MessageRouter` trait, creating an + extra argument to `ChannelManager` construction (#3326). + * The deprecated `AvailableBalances::balance_msat` has been removed in favor of + `ChannelMonitor::get_claimable_balances` (#3243). + * Deprecated re-exports of `Payment{Hash,Preimage,Secret}` and `features` were + removed (#3359). + * `bolt11_payment::*_from_zero_amount_invoice` methods were renamed + `*_from_variable_amount_invoice` (#3397) + * Offer `signing_pubkey` (and related struct names) have been renamed + `issuer_signing_pubkey` (#3218). + * `Event::PaymentForwarded::{prev,next}_node_id` were added (#3458). + * `Event::ChannelClosed::last_local_balance_msat` was added (#3235). + * `RoutingMessageHandler::handle_*` now all have a `node_id` argument (#3291). + * `lightning::util::persist::MonitorName` has been exposed (#3376). + * `ProbabilisticScorer::live_estimated_payment_success_probability` was added + (#3420) + * `EcdsaChannelSigner::sign_splicing_funding_input` was added to support an + eventual splicing feature (#3316). + * `{Payment,Offer}Id` now support lowercase-hex formatting (#3377). + +## Bug Fixes + * Fixed a rare case where a BOLT 12 payment may be made duplicatively if the + node crashes while processing a BOLT 12 `invoice` message (#3313). + * Fixed a bug where a malicious sender could cause a payment `Event` to be + generated with an `OfferId` using a payment with a lower amount than the + corresponding BOLT 12 offer would have required. The amount in the + `Event::Payment{Claimable,Claimed}` were still correct (#3435). + * The `ProbabilisticScorer` model and associated default scoring parameters + were tweaked to be more predictive of real-world results (#3368, #3495). + * `ProbabilisticScoringFeeParameters::base_penalty_amount_multiplier_msat` no + longer includes any pending HTLCs we already have through channels in the + graph, avoiding over-penalizing them in comparison to other channels (#3356). + * A `ChannelMonitor` will no longer be archived if a `MonitorEvent` containing + a preimage for another channel is pending. This fixes an issue where a + payment preimage needed for another channel claim is lost if events go + un-processed for 4038 blocks (#3450). + * `std` builds no longer send the full gossip state to peers that do not + request it (#3390). + * `lightning-block-sync` listeners now receive `block_connected` calls, rather + than always receiving `filtered_block_connected` calls (#3354). + * Fixed a bug where some transactions were broadcasted one block before their + locktime made them candidates for inclusion in the mempool (though they would + be automatically re-broadcasted later, #3453). + * `ChainMonitor` now persists `ChannelMonitor`s when their `Balance` set first + goes empty, making `ChannelMonitor` pruning more reliable on nodes that are + only online briefly (e.g. mobile nodes, #3442). + * BOLT 12 invoice requests now better handle intermittent internet connectivity + (e.g. on mobile devices with app interruptions, #3010). + * Broadcast-gossip `MessageSendEvent`s from the `ChannelMessageHandler` are now + delivered to peers even if the peer is behind in processing relayed gossip. + This ensures our own gossip propagates well even if we have very limited + upload bandwidth (#3142). + * Fixed a bug where calling `OutputSweeper::transactions_confirmed` with + transactions from anything but the latest block may have triggered a spurious + assertion in debug mode (#3524). + +## Performance Improvements + * LDK now verifies `channel_update` gossip messages without holding a lock, + allowing additional parallelism during gossip sync (#3310). + * LDK now checks if it already has certain gossip messages before verifying the + message signatures, reducing CPU usage during gossip sync after the first + startup (#3305). + +## Node Compatibility + * LDK now handles fields in the experimental range of BOLT 12 messages (#3237). + +## Backwards Compatibility + * Nodes with pending forwarded HTLCs or unclaimed payments cannot be + upgraded directly from 0.0.123 or earlier to 0.1. Instead, they must + first either resolve all pending HTLCs (including those pending + resolution on-chain), or run 0.0.124 or 0.0.125 and resolve any HTLCs that + were originally forwarded or received running 0.0.123 or earlier (#3355). + * `ChannelMonitor`s not being re-persisted after deserializing the + `ChannelManager` only applies to upgraded nodes *after* a startup with the + old semantics completes at least once. In other words, you must deserialize + the `ChannelManager` with an upgraded LDK, persist the `ChannelMonitor`s as + you would on pre-0.1 versions of LDK, then continue to normal startup once, + and for startups thereafter you can take advantage of the new semantics + avoiding redundant persistence on startup (#3322). + * Pending inbound payments paying a BOLT 12 `invoice` issued prior to upgrade + to LDK 0.1 will fail. Issued BOLT 12 `offer`s remain payable (#3435). + * `UserConfig::accept_mpp_keysend` was removed, thus the presence of pending + inbound MPP keysend payments will prevent downgrade to LDK 0.0.115 and + earlier (#3439). + * Inbound payments initialized using the removed + `ChannelManager::create_inbound_payment{,_for_hash}_legacy` API will no + longer be accepted by LDK 0.1 (#3383). + * Downgrading to prior versions of LDK after using `ChannelManager`'s + `unsafe_manual_funding_transaction_generated` may cause `ChannelManager` + deserialization to fail (#3259). + * `ChannelDetails` serialized with LDK 0.1+ read with versions prior to 0.1 + will have `balance_msat` equal to `next_outbound_htlc_limit_msat` (#3243). + +## Security +0.1 fixes a funds-theft vulnerability when paying BOLT 12 offers as well as a +funds-lockup denial-of-service issue for anchor channels. + * When paying a BOLT 12 offer, if the recipient responds to our + `invoice_request` with an `invoice` which had an amount different from the + amount we intended to pay (either from the `offer` or the `amount_msats` + passed to `ChannelManager::pay_for_offer`), LDK would pay the amount from the + `invoice`. As a result, a malicious recipient could cause us to overpay the + amount we intended to pay (#3535). + * Fixed a bug where a counterparty can cause funds of ours to be locked up + by broadcasting a revoked commitment transaction and following HTLC + transactions in specific formats when using an anchor channel. The funds can + be recovered by upgrading to 0.1 and replaying the counterparty's broadcasted + transactions (using `Confirm::transactions_confirmed`) (#3537). Thanks to + Matt Morehouse for reporting and fixing this issue. + * Various denial-of-service issues in the formerly-alpha `lightning-liquidity` + crate have been addressed (#3436, #3493). + + # 0.0.125 - Oct 14, 2024 - "Delayed Beta Testing" ## Bug Fixes diff --git a/pending_changelog/3137-channel-negotiation-type.txt b/pending_changelog/3137-channel-negotiation-type.txt deleted file mode 100644 index 8eafa4e072b..00000000000 --- a/pending_changelog/3137-channel-negotiation-type.txt +++ /dev/null @@ -1,7 +0,0 @@ -# API Updates - * `Event::OpenChannelRequest::push_msat` has been replaced by the field `channel_negotiation_type` to - differentiate between an inbound request for a dual-funded (V2) or non-dual-funded (V1) channel to be - opened, with value being either of the enum variants `InboundChannelFunds::DualFunded` and - `InboundChannelFunds::PushMsat(u64)` corresponding to V2 and V1 channel open requests respectively. - This is in preparation for supporting accepting dual-funded channels, which will be available in a later release. - diff --git a/pending_changelog/3243-remove-balance_msat.txt b/pending_changelog/3243-remove-balance_msat.txt deleted file mode 100644 index 6378bd79054..00000000000 --- a/pending_changelog/3243-remove-balance_msat.txt +++ /dev/null @@ -1 +0,0 @@ -* The `AvailableBalances::balance_msat` field has been removed in favor of `ChainMonitor::get_claimable_balances`. `ChannelDetails` serialized with versions of LDK >= 0.0.125 will have their `balance_msat` field set to `next_outbound_htlc_limit_msat` when read by versions of LDK prior to 0.0.125 (#3243). diff --git a/pending_changelog/3259-no-downgrade.txt b/pending_changelog/3259-no-downgrade.txt deleted file mode 100644 index ed4193da480..00000000000 --- a/pending_changelog/3259-no-downgrade.txt +++ /dev/null @@ -1,4 +0,0 @@ -# Backwards Compatibility - * Downgrading after using `ChannelManager`'s - `unsafe_manual_funding_transaction_generated` may cause deserialization of - `ChannelManager` to fail with an `Err` (#3259). diff --git a/pending_changelog/3322-a.txt b/pending_changelog/3322-a.txt deleted file mode 100644 index 83849926a4e..00000000000 --- a/pending_changelog/3322-a.txt +++ /dev/null @@ -1,6 +0,0 @@ -API Changes -=========== - -Additional information is now stored in `ChannelMonitorUpdate`s which may increase the size of -`ChannelMonitorUpdate`s claiming inbound payments substantially. The expected maximum size of -`ChannelMonitorUpdate`s shouldn't change materially. diff --git a/pending_changelog/3322-b.txt b/pending_changelog/3322-b.txt deleted file mode 100644 index c8bb0c64bd9..00000000000 --- a/pending_changelog/3322-b.txt +++ /dev/null @@ -1,7 +0,0 @@ -API Updates -=========== - -As a part of adding robustness against several unlikely scenarios, redundant `PaymentClaimed` -`Event`s will be generated more frequently on startup for payments received on LDK 0.1 and -newer. A new `Event::PaymentClaimed::payment_id` field may be used to better differentiate -between redundant payments. diff --git a/pending_changelog/3358-include-counterparty-id-in-payment-forwarded.txt b/pending_changelog/3358-include-counterparty-id-in-payment-forwarded.txt deleted file mode 100644 index 8c2b1e338a6..00000000000 --- a/pending_changelog/3358-include-counterparty-id-in-payment-forwarded.txt +++ /dev/null @@ -1,7 +0,0 @@ -API Updates -=========== - -To improve clarity and uniqueness in identifying forwarded payments, the `PaymentForwarded` -event now includes counterparty node IDs alongside `ChannelIds`. This change resolves -potential ambiguity in cases like v1 0conf channel opens, where `ChannelIds` alone may not -be unique. \ No newline at end of file diff --git a/pending_changelog/3383-deprecate-old-inbounds.txt b/pending_changelog/3383-deprecate-old-inbounds.txt deleted file mode 100644 index 654cbcb5078..00000000000 --- a/pending_changelog/3383-deprecate-old-inbounds.txt +++ /dev/null @@ -1,6 +0,0 @@ -# Backwards Compatibility -* Pending inbound payments added in versions 0.0.116 or earlier using the - `create_inbound_payment{,_for_hash}_legacy` API will be ignored on `ChannelManager` - deserialization and fail to be received - - diff --git a/pending_changelog/3435-authenticate-payment-receive-tlvs.txt b/pending_changelog/3435-authenticate-payment-receive-tlvs.txt deleted file mode 100644 index 714bd00d8ce..00000000000 --- a/pending_changelog/3435-authenticate-payment-receive-tlvs.txt +++ /dev/null @@ -1,9 +0,0 @@ -## API Updates - * Payment `ReceiveTlvs` now contains an `authentication` field. It should be - set to `None` and then filled in with a nonce and the HMAC produced by - `ReceiveTlvs::hmac_for_offer_payment` when passing in the nonce (#3435). - -## Backwards Compatibility - * `ReceiveTlvs` for payments over `BlindedPaymentPath`s are now authenticated. - Any inbound payments for a preexisting `Bolt12Invoice` will therefore fail - (#3435). diff --git a/pending_changelog/3439-remove-accept-mpp-keysend-cfg.txt b/pending_changelog/3439-remove-accept-mpp-keysend-cfg.txt deleted file mode 100644 index f52d123d2ac..00000000000 --- a/pending_changelog/3439-remove-accept-mpp-keysend-cfg.txt +++ /dev/null @@ -1,3 +0,0 @@ -# Backwards Compatibility -* The presence of pending inbound MPP keysend payments breaks downgrades to LDK versions 0.0.115 and - earlier. diff --git a/pending_changelog/matt-no-upgrade-skip.txt b/pending_changelog/matt-no-upgrade-skip.txt deleted file mode 100644 index f5fcb8c5f25..00000000000 --- a/pending_changelog/matt-no-upgrade-skip.txt +++ /dev/null @@ -1,6 +0,0 @@ -## Backwards Compatibility - * Nodes with pending forwarded HTLCs or unclaimed payments cannot be - upgraded directly from 0.0.123 or earlier to 0.1. Instead, they must - first either resolve all pending HTLCs (including those pending - resolution on-chain), or run 0.0.124 and resolve any HTLCs that were - originally forwarded or received running 0.0.123 or earlier. diff --git a/pending_changelog/matt-persist-preimage-on-upgrade.txt b/pending_changelog/matt-persist-preimage-on-upgrade.txt deleted file mode 100644 index fc53469c6f6..00000000000 --- a/pending_changelog/matt-persist-preimage-on-upgrade.txt +++ /dev/null @@ -1,8 +0,0 @@ -# Backwards Compatibility - * The `ChannelManager` deserialization semantics no longer require that - `ChannelMonitor`s be re-persisted after `(BlockHash, ChannelManager)::read` - is called prior to normal node operation. This applies to upgraded nodes - only *after* a startup with the old semantics completes at least once. IOW, - you must deserialize the `ChannelManager` with upgraded LDK, persist the - `ChannelMonitor`s then continue to normal startup once, and thereafter you - may skip the `ChannelMonitor` persistence step. From a016cc92dd094466cfcc0614651799e51d4163bc Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 15 Jan 2025 14:12:36 -0500 Subject: [PATCH 018/105] Fix build for cfg(async_payments) Static invoices don't have an amount_msats field. --- lightning/src/offers/invoice.rs | 26 ++++++++++++++++++++++---- lightning/src/offers/invoice_macros.rs | 12 ++---------- lightning/src/offers/static_invoice.rs | 4 ++-- 3 files changed, 26 insertions(+), 16 deletions(-) diff --git a/lightning/src/offers/invoice.rs b/lightning/src/offers/invoice.rs index d6a0392dac2..75095e058e7 100644 --- a/lightning/src/offers/invoice.rs +++ b/lightning/src/offers/invoice.rs @@ -120,7 +120,7 @@ use crate::ln::inbound_payment::{ExpandedKey, IV_LEN}; use crate::ln::msgs::DecodeError; use crate::offers::invoice_macros::{invoice_accessors_common, invoice_builder_methods_common}; #[cfg(test)] -use crate::offers::invoice_macros::invoice_builder_methods_test; +use crate::offers::invoice_macros::invoice_builder_methods_test_common; use crate::offers::invoice_request::{EXPERIMENTAL_INVOICE_REQUEST_TYPES, ExperimentalInvoiceRequestTlvStream, ExperimentalInvoiceRequestTlvStreamRef, INVOICE_REQUEST_PAYER_ID_TYPE, INVOICE_REQUEST_TYPES, IV_BYTES as INVOICE_REQUEST_IV_BYTES, InvoiceRequest, InvoiceRequestContents, InvoiceRequestTlvStream, InvoiceRequestTlvStreamRef}; use crate::offers::merkle::{SignError, SignFn, SignatureTlvStream, SignatureTlvStreamRef, TaggedHash, TlvStream, self, SIGNATURE_TLV_RECORD_SIZE}; use crate::offers::nonce::Nonce; @@ -380,6 +380,20 @@ macro_rules! invoice_builder_methods { ( } } } +#[cfg(test)] +macro_rules! invoice_builder_methods_test { ( + $self: ident, $self_type: ty, $return_type: ty, $return_value: expr + $(, $self_mut: tt)? +) => { + #[cfg_attr(c_bindings, allow(dead_code))] + pub(crate) fn amount_msats_unchecked( + $($self_mut)* $self: $self_type, amount_msats: u64, + ) -> $return_type { + $self.invoice.fields_mut().amount_msats = amount_msats; + $return_value + } +} } + impl<'a> InvoiceBuilder<'a, ExplicitSigningPubkey> { invoice_explicit_signing_pubkey_builder_methods!(self, Self); } @@ -393,7 +407,9 @@ impl<'a, S: SigningPubkeyStrategy> InvoiceBuilder<'a, S> { invoice_builder_methods_common!(self, Self, self.invoice.fields_mut(), Self, self, Bolt12Invoice, mut); #[cfg(test)] - invoice_builder_methods_test!(self, Self, self.invoice.fields_mut(), Self, self, mut); + invoice_builder_methods_test!(self, Self, Self, self, mut); + #[cfg(test)] + invoice_builder_methods_test_common!(self, Self, self.invoice.fields_mut(), Self, self, mut); } #[cfg(all(c_bindings, not(test)))] @@ -408,7 +424,8 @@ impl<'a> InvoiceWithExplicitSigningPubkeyBuilder<'a> { invoice_explicit_signing_pubkey_builder_methods!(self, &mut Self); invoice_builder_methods!(self, &mut Self, &mut Self, self, ExplicitSigningPubkey); invoice_builder_methods_common!(self, &mut Self, self.invoice.fields_mut(), &mut Self, self, Bolt12Invoice); - invoice_builder_methods_test!(self, &mut Self, self.invoice.fields_mut(), &mut Self, self); + invoice_builder_methods_test!(self, &mut Self, &mut Self, self); + invoice_builder_methods_test_common!(self, &mut Self, self.invoice.fields_mut(), &mut Self, self); } #[cfg(all(c_bindings, not(test)))] @@ -423,7 +440,8 @@ impl<'a> InvoiceWithDerivedSigningPubkeyBuilder<'a> { invoice_derived_signing_pubkey_builder_methods!(self, &mut Self); invoice_builder_methods!(self, &mut Self, &mut Self, self, DerivedSigningPubkey); invoice_builder_methods_common!(self, &mut Self, self.invoice.fields_mut(), &mut Self, self, Bolt12Invoice); - invoice_builder_methods_test!(self, &mut Self, self.invoice.fields_mut(), &mut Self, self); + invoice_builder_methods_test!(self, &mut Self, &mut Self, self); + invoice_builder_methods_test_common!(self, &mut Self, self.invoice.fields_mut(), &mut Self, self); } #[cfg(c_bindings)] diff --git a/lightning/src/offers/invoice_macros.rs b/lightning/src/offers/invoice_macros.rs index dd75fe62504..2b276a37d29 100644 --- a/lightning/src/offers/invoice_macros.rs +++ b/lightning/src/offers/invoice_macros.rs @@ -83,18 +83,10 @@ macro_rules! invoice_builder_methods_common { ( } } #[cfg(test)] -macro_rules! invoice_builder_methods_test { ( +macro_rules! invoice_builder_methods_test_common { ( $self: ident, $self_type: ty, $invoice_fields: expr, $return_type: ty, $return_value: expr $(, $self_mut: tt)? ) => { - #[cfg_attr(c_bindings, allow(dead_code))] - pub(crate) fn amount_msats_unchecked( - $($self_mut)* $self: $self_type, amount_msats: u64, - ) -> $return_type { - $invoice_fields.amount_msats = amount_msats; - $return_value - } - #[cfg_attr(c_bindings, allow(dead_code))] pub(crate) fn features_unchecked( $($self_mut)* $self: $self_type, features: Bolt12InvoiceFeatures @@ -154,4 +146,4 @@ macro_rules! invoice_accessors_common { ($self: ident, $contents: expr, $invoice pub(super) use invoice_accessors_common; pub(super) use invoice_builder_methods_common; #[cfg(test)] -pub(super) use invoice_builder_methods_test; +pub(super) use invoice_builder_methods_test_common; diff --git a/lightning/src/offers/static_invoice.rs b/lightning/src/offers/static_invoice.rs index 39c17eb3bcc..411ba3ff272 100644 --- a/lightning/src/offers/static_invoice.rs +++ b/lightning/src/offers/static_invoice.rs @@ -20,7 +20,7 @@ use crate::offers::invoice::{ InvoiceTlvStream, InvoiceTlvStreamRef, }; #[cfg(test)] -use crate::offers::invoice_macros::invoice_builder_methods_test; +use crate::offers::invoice_macros::invoice_builder_methods_test_common; use crate::offers::invoice_macros::{invoice_accessors_common, invoice_builder_methods_common}; use crate::offers::invoice_request::InvoiceRequest; use crate::offers::merkle::{ @@ -174,7 +174,7 @@ impl<'a> StaticInvoiceBuilder<'a> { invoice_builder_methods_common!(self, Self, self.invoice, Self, self, StaticInvoice, mut); #[cfg(test)] - invoice_builder_methods_test!(self, Self, self.invoice, Self, self, mut); + invoice_builder_methods_test_common!(self, Self, self.invoice, Self, self, mut); } /// A semantically valid [`StaticInvoice`] that hasn't been signed. From 708d3c3ded0adfcd9948c2a86b9f041d2ef8e34f Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 15 Jan 2025 18:12:48 +0000 Subject: [PATCH 019/105] Bump crate versions to 0.1.0/invoice 0.33.0/dns-resolver 0.2 Sadly, dns-resolver got uploaded as 0.1.0 without a -beta1 tag (and yanked), and thus we release it here as 0.2.0. --- lightning-background-processor/Cargo.toml | 12 ++++++------ lightning-block-sync/Cargo.toml | 6 +++--- lightning-custom-message/Cargo.toml | 4 ++-- lightning-dns-resolver/Cargo.toml | 8 ++++---- lightning-invoice/Cargo.toml | 4 ++-- lightning-liquidity/Cargo.toml | 16 ++++++++-------- lightning-net-tokio/Cargo.toml | 6 +++--- lightning-persister/Cargo.toml | 6 +++--- lightning-rapid-gossip-sync/Cargo.toml | 6 +++--- lightning-transaction-sync/Cargo.toml | 6 +++--- lightning-types/Cargo.toml | 2 +- lightning/Cargo.toml | 8 ++++---- 12 files changed, 42 insertions(+), 42 deletions(-) diff --git a/lightning-background-processor/Cargo.toml b/lightning-background-processor/Cargo.toml index d34ec5304e6..6fa936b4cb4 100644 --- a/lightning-background-processor/Cargo.toml +++ b/lightning-background-processor/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-background-processor" -version = "0.1.0-beta1" +version = "0.1.0" authors = ["Valentine Wallace "] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning" @@ -23,14 +23,14 @@ default = ["std"] bitcoin = { version = "0.32.2", default-features = false } bitcoin_hashes = { version = "0.14.0", default-features = false } bitcoin-io = { version = "0.1.2", default-features = false } -lightning = { version = "0.1.0-beta1", path = "../lightning", default-features = false } -lightning-rapid-gossip-sync = { version = "0.1.0-beta1", path = "../lightning-rapid-gossip-sync", default-features = false } +lightning = { version = "0.1.0", path = "../lightning", default-features = false } +lightning-rapid-gossip-sync = { version = "0.1.0", path = "../lightning-rapid-gossip-sync", default-features = false } [dev-dependencies] tokio = { version = "1.35", features = [ "macros", "rt", "rt-multi-thread", "sync", "time" ] } -lightning = { version = "0.1.0-beta1", path = "../lightning", features = ["_test_utils"] } -lightning-invoice = { version = "0.33.0-beta1", path = "../lightning-invoice" } -lightning-persister = { version = "0.1.0-beta1", path = "../lightning-persister" } +lightning = { version = "0.1.0", path = "../lightning", features = ["_test_utils"] } +lightning-invoice = { version = "0.33.0", path = "../lightning-invoice" } +lightning-persister = { version = "0.1.0", path = "../lightning-persister" } [lints] workspace = true diff --git a/lightning-block-sync/Cargo.toml b/lightning-block-sync/Cargo.toml index 80506cc92fd..6c4ab1c45e7 100644 --- a/lightning-block-sync/Cargo.toml +++ b/lightning-block-sync/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-block-sync" -version = "0.1.0-beta1" +version = "0.1.0" authors = ["Jeffrey Czyz", "Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning" @@ -19,13 +19,13 @@ rpc-client = [ "serde_json", "chunked_transfer" ] [dependencies] bitcoin = "0.32.2" -lightning = { version = "0.1.0-beta1", path = "../lightning" } +lightning = { version = "0.1.0", path = "../lightning" } tokio = { version = "1.35", features = [ "io-util", "net", "time", "rt" ], optional = true } serde_json = { version = "1.0", optional = true } chunked_transfer = { version = "1.4", optional = true } [dev-dependencies] -lightning = { version = "0.1.0-beta1", path = "../lightning", features = ["_test_utils"] } +lightning = { version = "0.1.0", path = "../lightning", features = ["_test_utils"] } tokio = { version = "1.35", features = [ "macros", "rt" ] } [lints] diff --git a/lightning-custom-message/Cargo.toml b/lightning-custom-message/Cargo.toml index daf8638399c..1395ea8b98f 100644 --- a/lightning-custom-message/Cargo.toml +++ b/lightning-custom-message/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-custom-message" -version = "0.1.0-beta1" +version = "0.1.0" authors = ["Jeffrey Czyz"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning" @@ -15,7 +15,7 @@ rustdoc-args = ["--cfg", "docsrs"] [dependencies] bitcoin = "0.32.2" -lightning = { version = "0.1.0-beta1", path = "../lightning" } +lightning = { version = "0.1.0", path = "../lightning" } [lints] workspace = true diff --git a/lightning-dns-resolver/Cargo.toml b/lightning-dns-resolver/Cargo.toml index 194fbdf02b9..5180a32ed43 100644 --- a/lightning-dns-resolver/Cargo.toml +++ b/lightning-dns-resolver/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-dns-resolver" -version = "0.1.0" +version = "0.2.0" authors = ["Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning/" @@ -8,12 +8,12 @@ description = "A crate which implements DNSSEC resolution for lightning clients edition = "2021" [dependencies] -lightning = { version = "0.1.0-beta1", path = "../lightning", default-features = false } -lightning-types = { version = "0.2.0-beta1", path = "../lightning-types", default-features = false } +lightning = { version = "0.1.0", path = "../lightning", default-features = false } +lightning-types = { version = "0.2.0", path = "../lightning-types", default-features = false } dnssec-prover = { version = "0.6", default-features = false, features = [ "std", "tokio" ] } tokio = { version = "1.0", default-features = false, features = ["rt"] } [dev-dependencies] bitcoin = { version = "0.32" } tokio = { version = "1.0", default-features = false, features = ["macros", "time"] } -lightning = { version = "0.1.0-beta1", path = "../lightning", features = ["dnssec", "_test_utils"] } +lightning = { version = "0.1.0", path = "../lightning", features = ["dnssec", "_test_utils"] } diff --git a/lightning-invoice/Cargo.toml b/lightning-invoice/Cargo.toml index f46e1c4928c..ff8d88c5f63 100644 --- a/lightning-invoice/Cargo.toml +++ b/lightning-invoice/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lightning-invoice" description = "Data structures to parse and serialize BOLT11 lightning invoices" -version = "0.33.0-beta1" +version = "0.33.0" authors = ["Sebastian Geisler "] documentation = "https://docs.rs/lightning-invoice/" license = "MIT OR Apache-2.0" @@ -19,7 +19,7 @@ std = [] [dependencies] bech32 = { version = "0.11.0", default-features = false } -lightning-types = { version = "0.2.0-beta1", path = "../lightning-types", default-features = false } +lightning-types = { version = "0.2.0", path = "../lightning-types", default-features = false } serde = { version = "1.0.118", optional = true } bitcoin = { version = "0.32.2", default-features = false, features = ["secp-recovery"] } diff --git a/lightning-liquidity/Cargo.toml b/lightning-liquidity/Cargo.toml index a90f89a05b4..ed229b8b69a 100644 --- a/lightning-liquidity/Cargo.toml +++ b/lightning-liquidity/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-liquidity" -version = "0.1.0-beta1" +version = "0.1.0" authors = ["John Cantrell ", "Elias Rohrer "] homepage = "https://lightningdevkit.org/" license = "MIT OR Apache-2.0" @@ -19,9 +19,9 @@ std = ["lightning/std"] backtrace = ["dep:backtrace"] [dependencies] -lightning = { version = "0.1.0-beta1", path = "../lightning", default-features = false } -lightning-types = { version = "0.2.0-beta1", path = "../lightning-types", default-features = false } -lightning-invoice = { version = "0.33.0-beta1", path = "../lightning-invoice", default-features = false, features = ["serde"] } +lightning = { version = "0.1.0", path = "../lightning", default-features = false } +lightning-types = { version = "0.2.0", path = "../lightning-types", default-features = false } +lightning-invoice = { version = "0.33.0", path = "../lightning-invoice", default-features = false, features = ["serde"] } bitcoin = { version = "0.32.2", default-features = false, features = ["serde"] } @@ -31,10 +31,10 @@ serde_json = "1.0" backtrace = { version = "0.3", optional = true } [dev-dependencies] -lightning = { version = "0.1.0-beta1", path = "../lightning", default-features = false, features = ["_test_utils"] } -lightning-invoice = { version = "0.33.0-beta1", path = "../lightning-invoice", default-features = false, features = ["serde", "std"] } -lightning-persister = { version = "0.1.0-beta1", path = "../lightning-persister", default-features = false } -lightning-background-processor = { version = "0.1.0-beta1", path = "../lightning-background-processor", default-features = false, features = ["std"] } +lightning = { version = "0.1.0", path = "../lightning", default-features = false, features = ["_test_utils"] } +lightning-invoice = { version = "0.33.0", path = "../lightning-invoice", default-features = false, features = ["serde", "std"] } +lightning-persister = { version = "0.1.0", path = "../lightning-persister", default-features = false } +lightning-background-processor = { version = "0.1.0", path = "../lightning-background-processor", default-features = false, features = ["std"] } proptest = "1.0.0" tokio = { version = "1.35", default-features = false, features = [ "rt-multi-thread", "time", "sync", "macros" ] } diff --git a/lightning-net-tokio/Cargo.toml b/lightning-net-tokio/Cargo.toml index b897d42dac5..efc51126c13 100644 --- a/lightning-net-tokio/Cargo.toml +++ b/lightning-net-tokio/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-net-tokio" -version = "0.1.0-beta1" +version = "0.1.0" authors = ["Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning/" @@ -16,12 +16,12 @@ rustdoc-args = ["--cfg", "docsrs"] [dependencies] bitcoin = "0.32.2" -lightning = { version = "0.1.0-beta1", path = "../lightning" } +lightning = { version = "0.1.0", path = "../lightning" } tokio = { version = "1.35", features = [ "rt", "sync", "net", "time" ] } [dev-dependencies] tokio = { version = "1.35", features = [ "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] } -lightning = { version = "0.1.0-beta1", path = "../lightning", features = ["_test_utils"] } +lightning = { version = "0.1.0", path = "../lightning", features = ["_test_utils"] } [lints] workspace = true diff --git a/lightning-persister/Cargo.toml b/lightning-persister/Cargo.toml index 1f8b497ae09..5863b0d3a19 100644 --- a/lightning-persister/Cargo.toml +++ b/lightning-persister/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-persister" -version = "0.1.0-beta1" +version = "0.1.0" authors = ["Valentine Wallace", "Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning" @@ -15,7 +15,7 @@ rustdoc-args = ["--cfg", "docsrs"] [dependencies] bitcoin = "0.32.2" -lightning = { version = "0.1.0-beta1", path = "../lightning" } +lightning = { version = "0.1.0", path = "../lightning" } [target.'cfg(windows)'.dependencies] windows-sys = { version = "0.48.0", default-features = false, features = ["Win32_Storage_FileSystem", "Win32_Foundation"] } @@ -24,7 +24,7 @@ windows-sys = { version = "0.48.0", default-features = false, features = ["Win32 criterion = { version = "0.4", optional = true, default-features = false } [dev-dependencies] -lightning = { version = "0.1.0-beta1", path = "../lightning", features = ["_test_utils"] } +lightning = { version = "0.1.0", path = "../lightning", features = ["_test_utils"] } bitcoin = { version = "0.32.2", default-features = false } [lints] diff --git a/lightning-rapid-gossip-sync/Cargo.toml b/lightning-rapid-gossip-sync/Cargo.toml index 4fdd33f88d7..bbd23b41325 100644 --- a/lightning-rapid-gossip-sync/Cargo.toml +++ b/lightning-rapid-gossip-sync/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-rapid-gossip-sync" -version = "0.1.0-beta1" +version = "0.1.0" authors = ["Arik Sosman "] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning" @@ -14,7 +14,7 @@ default = ["std"] std = ["bitcoin-io/std", "bitcoin_hashes/std"] [dependencies] -lightning = { version = "0.1.0-beta1", path = "../lightning", default-features = false } +lightning = { version = "0.1.0", path = "../lightning", default-features = false } bitcoin = { version = "0.32.2", default-features = false } bitcoin_hashes = { version = "0.14.0", default-features = false } bitcoin-io = { version = "0.1.2", default-features = false } @@ -23,7 +23,7 @@ bitcoin-io = { version = "0.1.2", default-features = false } criterion = { version = "0.4", optional = true, default-features = false } [dev-dependencies] -lightning = { version = "0.1.0-beta1", path = "../lightning", features = ["_test_utils"] } +lightning = { version = "0.1.0", path = "../lightning", features = ["_test_utils"] } [lints] workspace = true diff --git a/lightning-transaction-sync/Cargo.toml b/lightning-transaction-sync/Cargo.toml index 71922c92fbb..8256a44ded5 100644 --- a/lightning-transaction-sync/Cargo.toml +++ b/lightning-transaction-sync/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-transaction-sync" -version = "0.1.0-beta1" +version = "0.1.0" authors = ["Elias Rohrer"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning" @@ -23,7 +23,7 @@ electrum = ["electrum-client"] async-interface = [] [dependencies] -lightning = { version = "0.1.0-beta1", path = "../lightning", default-features = false, features = ["std"] } +lightning = { version = "0.1.0", path = "../lightning", default-features = false, features = ["std"] } lightning-macros = { version = "0.1", path = "../lightning-macros", default-features = false } bitcoin = { version = "0.32.2", default-features = false } futures = { version = "0.3", optional = true } @@ -31,7 +31,7 @@ esplora-client = { version = "0.11", default-features = false, optional = true } electrum-client = { version = "0.21.0", optional = true } [dev-dependencies] -lightning = { version = "0.1.0-beta1", path = "../lightning", default-features = false, features = ["std", "_test_utils"] } +lightning = { version = "0.1.0", path = "../lightning", default-features = false, features = ["std", "_test_utils"] } tokio = { version = "1.35.0", features = ["macros"] } [target.'cfg(not(target_os = "windows"))'.dev-dependencies] diff --git a/lightning-types/Cargo.toml b/lightning-types/Cargo.toml index 6adeb561ca0..d7f622ddb2d 100644 --- a/lightning-types/Cargo.toml +++ b/lightning-types/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning-types" -version = "0.2.0-beta1" +version = "0.2.0" authors = ["Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning/" diff --git a/lightning/Cargo.toml b/lightning/Cargo.toml index bbb60c523b9..3e46c996147 100644 --- a/lightning/Cargo.toml +++ b/lightning/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning" -version = "0.1.0-beta1" +version = "0.1.0" authors = ["Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning/" @@ -33,8 +33,8 @@ grind_signatures = [] default = ["std", "grind_signatures"] [dependencies] -lightning-types = { version = "0.2.0-beta1", path = "../lightning-types", default-features = false } -lightning-invoice = { version = "0.33.0-beta1", path = "../lightning-invoice", default-features = false } +lightning-types = { version = "0.2.0", path = "../lightning-types", default-features = false } +lightning-invoice = { version = "0.33.0", path = "../lightning-invoice", default-features = false } bech32 = { version = "0.11.0", default-features = false } bitcoin = { version = "0.32.2", default-features = false, features = ["secp-recovery"] } @@ -50,7 +50,7 @@ libm = { version = "0.2", default-features = false } [dev-dependencies] regex = "1.5.6" -lightning-types = { version = "0.2.0-beta1", path = "../lightning-types", features = ["_test_utils"] } +lightning-types = { version = "0.2.0", path = "../lightning-types", features = ["_test_utils"] } [dev-dependencies.bitcoin] version = "0.32.2" From 0241f6e8c2b485516f399b388b35f15d5dace99c Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 15 Jan 2025 22:01:14 +0000 Subject: [PATCH 020/105] Set release date for 0.1, plus some text about the milestone --- CHANGELOG.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3400ab642b2..a35f38d05a6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,11 @@ -# 0.1 - Jan XXX, 2025 - XXX +# 0.1 - Jan 15, 2025 - "Human Readable Version Numbers" + +The LDK 0.1 release represents an important milestone for the LDK project. While +there are certainly many important features which are still being built, the LDK +project has come a long way, and the LDK project is happy with the quality of +the features included in this release. Thus, the project will begin doing patch +releases to fix bugs in prior versions as new features continue to ship in new +minor versions. ## API Updates * The `lightning-liquidity` crate has been moved into the `rust-lightning` From 1ece5a9aa0de2dfb901183109ea4006863f36d90 Mon Sep 17 00:00:00 2001 From: Arik Sosman Date: Wed, 15 Jan 2025 09:13:06 -0800 Subject: [PATCH 021/105] Consider dust threshold for fee rate determination Previously, the `feerate_bump` method did not enforce the dust threshold, which could result in us thinking we had raised the fee rate without actually having done so. Instead, `compute_package_output` blindly accepted the updated fee rate while enforcing a non-dust output value, resulting in repeated broadcast attempts of an identical transaction. Conflicts due to removal of a preceding commit resolved in: * lightning/src/chain/package.rs --- lightning/src/chain/onchaintx.rs | 1 + lightning/src/chain/package.rs | 44 ++++++++++++++++++++------------ 2 files changed, 28 insertions(+), 17 deletions(-) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 759668cfa9c..2a43b006920 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -215,6 +215,7 @@ pub(crate) enum OnchainClaim { } /// Represents the different feerate strategies a pending request can use when generating a claim. +#[derive(Debug)] pub(crate) enum FeerateStrategy { /// We must reuse the most recently used feerate, if any. RetryPrevious, diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index 53bba3a754b..3698b041eb7 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -1117,10 +1117,10 @@ impl PackageTemplate { // If old feerate is 0, first iteration of this claim, use normal fee calculation if self.feerate_previous != 0 { if let Some((new_fee, feerate)) = feerate_bump( - predicted_weight, input_amounts, self.feerate_previous, feerate_strategy, - conf_target, fee_estimator, logger, + predicted_weight, input_amounts, dust_limit_sats, self.feerate_previous, + feerate_strategy, conf_target, fee_estimator, logger, ) { - return Some((cmp::max(input_amounts as i64 - new_fee as i64, dust_limit_sats as i64) as u64, feerate)); + return Some((input_amounts.saturating_sub(new_fee), feerate)); } } else { if let Some((new_fee, feerate)) = compute_fee_from_spent_amounts(input_amounts, predicted_weight, conf_target, fee_estimator, logger) { @@ -1270,16 +1270,20 @@ fn compute_fee_from_spent_amounts( /// respect BIP125 rules 3) and 4) and if required adjust the new fee to meet the RBF policy /// requirement. fn feerate_bump( - predicted_weight: u64, input_amounts: u64, previous_feerate: u64, feerate_strategy: &FeerateStrategy, - conf_target: ConfirmationTarget, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + predicted_weight: u64, input_amounts: u64, dust_limit_sats: u64, previous_feerate: u64, + feerate_strategy: &FeerateStrategy, conf_target: ConfirmationTarget, + fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Option<(u64, u64)> where F::Target: FeeEstimator, { + let previous_fee = previous_feerate * predicted_weight / 1000; + // If old feerate inferior to actual one given back by Fee Estimator, use it to compute new fee... let (new_fee, new_feerate) = if let Some((new_fee, new_feerate)) = compute_fee_from_spent_amounts(input_amounts, predicted_weight, conf_target, fee_estimator, logger) { + log_debug!(logger, "Initiating fee rate bump from {} s/kWU ({} s) to {} s/kWU ({} s) using {:?} strategy", previous_feerate, previous_fee, new_feerate, new_fee, feerate_strategy); match feerate_strategy { FeerateStrategy::RetryPrevious => { let previous_fee = previous_feerate * predicted_weight / 1000; @@ -1297,15 +1301,12 @@ where // ...else just increase the previous feerate by 25% (because that's a nice number) let bumped_feerate = previous_feerate + (previous_feerate / 4); let bumped_fee = bumped_feerate * predicted_weight / 1000; - if input_amounts <= bumped_fee { - log_warn!(logger, "Can't 25% bump new claiming tx, amount {} is too small", input_amounts); - return None; - } + (bumped_fee, bumped_feerate) }, } } else { - log_warn!(logger, "Can't new-estimation bump new claiming tx, amount {} is too small", input_amounts); + log_warn!(logger, "Can't bump new claiming tx, input amount {} is too small", input_amounts); return None; }; @@ -1316,17 +1317,26 @@ where return Some((new_fee, new_feerate)); } - let previous_fee = previous_feerate * predicted_weight / 1000; let min_relay_fee = MIN_RELAY_FEE_SAT_PER_1000_WEIGHT * predicted_weight / 1000; // BIP 125 Opt-in Full Replace-by-Fee Signaling // * 3. The replacement transaction pays an absolute fee of at least the sum paid by the original transactions. // * 4. The replacement transaction must also pay for its own bandwidth at or above the rate set by the node's minimum relay fee setting. - let new_fee = if new_fee < previous_fee + min_relay_fee { - new_fee + previous_fee + min_relay_fee - new_fee - } else { - new_fee - }; - Some((new_fee, new_fee * 1000 / predicted_weight)) + let naive_new_fee = new_fee; + let new_fee = cmp::max(new_fee, previous_fee + min_relay_fee); + + if new_fee > naive_new_fee { + log_debug!(logger, "Naive fee bump of {}s does not meet min relay fee requirements of {}s", naive_new_fee - previous_fee, min_relay_fee); + } + + let remaining_output_amount = input_amounts.saturating_sub(new_fee); + if remaining_output_amount < dust_limit_sats { + log_warn!(logger, "Can't bump new claiming tx, output amount {} would end up below dust threshold {}", remaining_output_amount, dust_limit_sats); + return None; + } + + let new_feerate = new_fee * 1000 / predicted_weight; + log_debug!(logger, "Fee rate bumped by {}s from {} s/KWU ({} s) to {} s/KWU ({} s)", new_fee - previous_fee, previous_feerate, previous_fee, new_feerate, new_fee); + Some((new_fee, new_feerate)) } #[cfg(test)] From 6d8e18b230eeb1774293d2a0ea0ee1839dcf7c6b Mon Sep 17 00:00:00 2001 From: Arik Sosman Date: Tue, 21 Jan 2025 06:11:58 -0800 Subject: [PATCH 022/105] Fix incremental relay fee to be 1s/vB Bitcoin Core relay policy does not require 16s/vB, which it was previously set to. Trivial conflicts due to removal of a preceding commit resolved in: * lightning/src/chain/chaininterface.rs --- lightning/src/chain/chaininterface.rs | 2 +- lightning/src/ln/functional_tests.rs | 62 ++++++++++++++++----------- 2 files changed, 37 insertions(+), 27 deletions(-) diff --git a/lightning/src/chain/chaininterface.rs b/lightning/src/chain/chaininterface.rs index 84281df1d7b..b9c7e88420d 100644 --- a/lightning/src/chain/chaininterface.rs +++ b/lightning/src/chain/chaininterface.rs @@ -176,7 +176,7 @@ pub trait FeeEstimator { } /// Minimum relay fee as required by bitcoin network mempool policy. -pub const MIN_RELAY_FEE_SAT_PER_1000_WEIGHT: u64 = 4000; +pub const MIN_RELAY_FEE_SAT_PER_1000_WEIGHT: u64 = 253; /// Minimum feerate that takes a sane approach to bitcoind weight-to-vbytes rounding. /// See the following Core Lightning commit for an explanation: /// diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index e52870cf19d..123b394f374 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -1313,18 +1313,22 @@ fn test_duplicate_htlc_different_direction_onchain() { let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); + // post-bump fee (288 satoshis) + dust threshold for output type (294 satoshis) = 582 + let payment_value_sats = 582; + let payment_value_msats = payment_value_sats * 1000; + // balancing send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000); - let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], 800_000); + let (route, _, _, _) = get_route_and_payment_hash!(nodes[1], nodes[0], payment_value_msats); let node_a_payment_secret = nodes[0].node.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap(); - send_along_route_with_secret(&nodes[1], route, &[&[&nodes[0]]], 800_000, payment_hash, node_a_payment_secret); + send_along_route_with_secret(&nodes[1], route, &[&[&nodes[0]]], payment_value_msats, payment_hash, node_a_payment_secret); // Provide preimage to node 0 by claiming payment nodes[0].node.claim_funds(payment_preimage); - expect_payment_claimed!(nodes[0], payment_hash, 800_000); + expect_payment_claimed!(nodes[0], payment_hash, payment_value_msats); check_added_monitors!(nodes[0], 1); // Broadcast node 1 commitment txn @@ -1333,7 +1337,7 @@ fn test_duplicate_htlc_different_direction_onchain() { assert_eq!(remote_txn[0].output.len(), 4); // 1 local, 1 remote, 1 htlc inbound, 1 htlc outbound let mut has_both_htlcs = 0; // check htlcs match ones committed for outp in remote_txn[0].output.iter() { - if outp.value.to_sat() == 800_000 / 1000 { + if outp.value.to_sat() == payment_value_sats { has_both_htlcs += 1; } else if outp.value.to_sat() == 900_000 / 1000 { has_both_htlcs += 1; @@ -1353,18 +1357,15 @@ fn test_duplicate_htlc_different_direction_onchain() { check_spends!(claim_txn[1], remote_txn[0]); check_spends!(claim_txn[2], remote_txn[0]); let preimage_tx = &claim_txn[0]; - let (preimage_bump_tx, timeout_tx) = if claim_txn[1].input[0].previous_output == preimage_tx.input[0].previous_output { - (&claim_txn[1], &claim_txn[2]) - } else { - (&claim_txn[2], &claim_txn[1]) - }; + let timeout_tx = claim_txn.iter().skip(1).find(|t| t.input[0].previous_output != preimage_tx.input[0].previous_output).unwrap(); + let preimage_bump_tx = claim_txn.iter().skip(1).find(|t| t.input[0].previous_output == preimage_tx.input[0].previous_output).unwrap(); assert_eq!(preimage_tx.input.len(), 1); assert_eq!(preimage_bump_tx.input.len(), 1); assert_eq!(preimage_tx.input.len(), 1); assert_eq!(preimage_tx.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC 1 <--> 0, preimage tx - assert_eq!(remote_txn[0].output[preimage_tx.input[0].previous_output.vout as usize].value.to_sat(), 800); + assert_eq!(remote_txn[0].output[preimage_tx.input[0].previous_output.vout as usize].value.to_sat(), payment_value_sats); assert_eq!(timeout_tx.input.len(), 1); assert_eq!(timeout_tx.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // HTLC 0 <--> 1, timeout tx @@ -7935,22 +7936,31 @@ fn test_bump_penalty_txn_on_remote_commitment() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000); - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000); - route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0; - - // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC - let remote_txn = get_local_commitment_txn!(nodes[0], chan.2); - assert_eq!(remote_txn[0].output.len(), 4); - assert_eq!(remote_txn[0].input.len(), 1); - assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.compute_txid()); - - // Claim a HTLC without revocation (provide B monitor with preimage) - nodes[1].node.claim_funds(payment_preimage); - expect_payment_claimed!(nodes[1], payment_hash, 3_000_000); - mine_transaction(&nodes[1], &remote_txn[0]); - check_added_monitors!(nodes[1], 2); - connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires + let remote_txn = { + // post-bump fee (288 satoshis) + dust threshold for output type (294 satoshis) = 582 + let htlc_value_a_msats = 582_000; + let htlc_value_b_msats = 583_000; + + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000); + let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], htlc_value_a_msats); + route_payment(&nodes[1], &vec!(&nodes[0])[..], htlc_value_b_msats); + + // Remote commitment txn with 4 outputs : to_local, to_remote, 1 outgoing HTLC, 1 incoming HTLC + let remote_txn = get_local_commitment_txn!(nodes[0], chan.2); + assert_eq!(remote_txn[0].output.len(), 4); + assert_eq!(remote_txn[0].input.len(), 1); + assert_eq!(remote_txn[0].input[0].previous_output.txid, chan.3.compute_txid()); + + // Claim a HTLC without revocation (provide B monitor with preimage) + nodes[1].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[1], payment_hash, htlc_value_a_msats); + mine_transaction(&nodes[1], &remote_txn[0]); + check_added_monitors!(nodes[1], 2); + connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires + // depending on the block connection style, node 1 may have broadcast either 3 or 10 txs + + remote_txn + }; // One or more claim tx should have been broadcast, check it let timeout; From bc6ae06f409b077f57b5c2ef56e4fd15442a82bc Mon Sep 17 00:00:00 2001 From: Arik Sosman Date: Fri, 17 Jan 2025 10:53:44 -0800 Subject: [PATCH 023/105] Test fee rate bumping Create some tests for various `feerate_bump` scenarios and ensure among other thigns that there are no underflows. --- lightning/src/chain/package.rs | 85 +++++++++++++++++++++++++++++++++- 1 file changed, 84 insertions(+), 1 deletion(-) diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index 3698b041eb7..55214006d4c 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -1341,7 +1341,7 @@ where #[cfg(test)] mod tests { - use crate::chain::package::{CounterpartyOfferedHTLCOutput, CounterpartyReceivedHTLCOutput, HolderFundingOutput, HolderHTLCOutput, PackageTemplate, PackageSolvingData, RevokedHTLCOutput, RevokedOutput, WEIGHT_REVOKED_OUTPUT, weight_offered_htlc, weight_received_htlc}; + use crate::chain::package::{CounterpartyOfferedHTLCOutput, CounterpartyReceivedHTLCOutput, HolderFundingOutput, HolderHTLCOutput, PackageTemplate, PackageSolvingData, RevokedHTLCOutput, RevokedOutput, WEIGHT_REVOKED_OUTPUT, weight_offered_htlc, weight_received_htlc, feerate_bump}; use crate::chain::Txid; use crate::ln::chan_utils::HTLCOutputInCommitment; use crate::types::payment::{PaymentPreimage, PaymentHash}; @@ -1359,7 +1359,10 @@ mod tests { use bitcoin::secp256k1::{PublicKey,SecretKey}; use bitcoin::secp256k1::Secp256k1; + use crate::chain::chaininterface::{ConfirmationTarget, FeeEstimator, FEERATE_FLOOR_SATS_PER_KW, LowerBoundedFeeEstimator}; + use crate::chain::onchaintx::FeerateStrategy; use crate::types::features::ChannelTypeFeatures; + use crate::util::test_utils::TestLogger; fn fake_txid(n: u64) -> Txid { Transaction { @@ -1669,4 +1672,84 @@ mod tests { } } } + + struct TestFeeEstimator { + sat_per_kw: u32, + } + + impl FeeEstimator for TestFeeEstimator { + fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 { + self.sat_per_kw + } + } + + #[test] + fn test_feerate_bump() { + let sat_per_kw = FEERATE_FLOOR_SATS_PER_KW; + let test_fee_estimator = &TestFeeEstimator { sat_per_kw }; + let fee_estimator = LowerBoundedFeeEstimator::new(test_fee_estimator); + let fee_rate_strategy = FeerateStrategy::ForceBump; + let confirmation_target = ConfirmationTarget::UrgentOnChainSweep; + + { + // Check underflow doesn't occur + let predicted_weight_units = 1000; + let input_satoshis = 505; + + let logger = TestLogger::new(); + let bumped_fee_rate = feerate_bump(predicted_weight_units, input_satoshis, 546, 253, &fee_rate_strategy, confirmation_target, &fee_estimator, &logger); + assert!(bumped_fee_rate.is_none()); + logger.assert_log_regex("lightning::chain::package", regex::Regex::new(r"Can't bump new claiming tx, input amount 505 is too small").unwrap(), 1); + } + + { + // Check post-25%-bump-underflow scenario satisfying the following constraints: + // input - fee = 546 + // input - fee * 1.25 = -1 + + // We accomplish that scenario with the following values: + // input = 2734 + // fee = 2188 + + let predicted_weight_units = 1000; + let input_satoshis = 2734; + + let logger = TestLogger::new(); + let bumped_fee_rate = feerate_bump(predicted_weight_units, input_satoshis, 546, 2188, &fee_rate_strategy, confirmation_target, &fee_estimator, &logger); + assert!(bumped_fee_rate.is_none()); + logger.assert_log_regex("lightning::chain::package", regex::Regex::new(r"Can't bump new claiming tx, output amount 0 would end up below dust threshold 546").unwrap(), 1); + } + + { + // Check that an output amount of 0 is caught + let predicted_weight_units = 1000; + let input_satoshis = 506; + + let logger = TestLogger::new(); + let bumped_fee_rate = feerate_bump(predicted_weight_units, input_satoshis, 546, 253, &fee_rate_strategy, confirmation_target, &fee_estimator, &logger); + assert!(bumped_fee_rate.is_none()); + logger.assert_log_regex("lightning::chain::package", regex::Regex::new(r"Can't bump new claiming tx, output amount 0 would end up below dust threshold 546").unwrap(), 1); + } + + { + // Check that dust_threshold - 1 is blocked + let predicted_weight_units = 1000; + let input_satoshis = 1051; + + let logger = TestLogger::new(); + let bumped_fee_rate = feerate_bump(predicted_weight_units, input_satoshis, 546, 253, &fee_rate_strategy, confirmation_target, &fee_estimator, &logger); + assert!(bumped_fee_rate.is_none()); + logger.assert_log_regex("lightning::chain::package", regex::Regex::new(r"Can't bump new claiming tx, output amount 545 would end up below dust threshold 546").unwrap(), 1); + } + + { + let predicted_weight_units = 1000; + let input_satoshis = 1052; + + let logger = TestLogger::new(); + let bumped_fee_rate = feerate_bump(predicted_weight_units, input_satoshis, 546, 253, &fee_rate_strategy, confirmation_target, &fee_estimator, &logger).unwrap(); + assert_eq!(bumped_fee_rate, (506, 506)); + logger.assert_log_regex("lightning::chain::package", regex::Regex::new(r"Naive fee bump of 63s does not meet min relay fee requirements of 253s").unwrap(), 1); + } + } } From dd5bec95044b048597e11d8f4cb26a2f9d77ac71 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 15 Jan 2025 16:29:44 -0500 Subject: [PATCH 024/105] Outbound payments: pass session privs by reference We need to stop passing this Vec by value for the next commit so we can pass it to a different method. --- lightning/src/ln/outbound_payment.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index c0dea7df52d..4a4b4aaffb2 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -803,7 +803,7 @@ impl OutboundPayments { { let onion_session_privs = self.add_new_pending_payment(payment_hash, recipient_onion.clone(), payment_id, None, route, None, None, entropy_source, best_block_height)?; self.pay_route_internal(route, payment_hash, &recipient_onion, None, None, payment_id, None, - onion_session_privs, node_signer, best_block_height, &send_payment_along_path) + &onion_session_privs, node_signer, best_block_height, &send_payment_along_path) .map_err(|e| { self.remove_outbound_if_all_failed(payment_id, &e); e }) } @@ -983,7 +983,7 @@ impl OutboundPayments { let result = self.pay_route_internal( &route, payment_hash, &recipient_onion, keysend_preimage, invoice_request, payment_id, - Some(route_params.final_value_msat), onion_session_privs, node_signer, best_block_height, + Some(route_params.final_value_msat), &onion_session_privs, node_signer, best_block_height, &send_payment_along_path ); log_info!( @@ -1269,7 +1269,7 @@ impl OutboundPayments { })?; let res = self.pay_route_internal(&route, payment_hash, &recipient_onion, - keysend_preimage, None, payment_id, None, onion_session_privs, node_signer, + keysend_preimage, None, payment_id, None, &onion_session_privs, node_signer, best_block_height, &send_payment_along_path); log_info!(logger, "Sending payment with id {} and hash {} returned {:?}", payment_id, payment_hash, res); @@ -1426,7 +1426,7 @@ impl OutboundPayments { } }; let res = self.pay_route_internal(&route, payment_hash, &recipient_onion, keysend_preimage, - invoice_request.as_ref(), payment_id, Some(total_msat), onion_session_privs, node_signer, + invoice_request.as_ref(), payment_id, Some(total_msat), &onion_session_privs, node_signer, best_block_height, &send_payment_along_path); log_info!(logger, "Result retrying payment id {}: {:?}", &payment_id, res); if let Err(e) = res { @@ -1542,7 +1542,7 @@ impl OutboundPayments { let recipient_onion_fields = RecipientOnionFields::spontaneous_empty(); match self.pay_route_internal(&route, payment_hash, &recipient_onion_fields, - None, None, payment_id, None, onion_session_privs, node_signer, best_block_height, + None, None, payment_id, None, &onion_session_privs, node_signer, best_block_height, &send_payment_along_path ) { Ok(()) => Ok((payment_hash, payment_id)), @@ -1733,7 +1733,7 @@ impl OutboundPayments { fn pay_route_internal( &self, route: &Route, payment_hash: PaymentHash, recipient_onion: &RecipientOnionFields, keysend_preimage: Option, invoice_request: Option<&InvoiceRequest>, - payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: Vec<[u8; 32]>, + payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: &Vec<[u8; 32]>, node_signer: &NS, best_block_height: u32, send_payment_along_path: &F ) -> Result<(), PaymentSendFailure> where @@ -1788,7 +1788,7 @@ impl OutboundPayments { let mut path_res = send_payment_along_path(SendAlongPathArgs { path: &path, payment_hash: &payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage: &keysend_preimage, invoice_request, - session_priv_bytes + session_priv_bytes: *session_priv_bytes }); match path_res { Ok(_) => {}, @@ -1872,7 +1872,7 @@ impl OutboundPayments { F: Fn(SendAlongPathArgs) -> Result<(), APIError>, { self.pay_route_internal(route, payment_hash, &recipient_onion, - keysend_preimage, None, payment_id, recv_value_msat, onion_session_privs, + keysend_preimage, None, payment_id, recv_value_msat, &onion_session_privs, node_signer, best_block_height, &send_payment_along_path) .map_err(|e| { self.remove_outbound_if_all_failed(payment_id, &e); e }) } From 152f3578222bc33edeb7d0af831e5d7bdd33b22c Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Wed, 15 Jan 2025 16:43:36 -0500 Subject: [PATCH 025/105] Fix outbound payments memory leak on buggy router MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prior to this patch, if we attempted to send a payment or probe to a buggy route, we would error but continue storing the pending outbound payment forever. Attempts to retry would result in a “duplicate payment” error. In the case of ChannelManager::send_payment, we would also fail to generate a PaymentFailed event, even if the user manually called abandon_payment. This bug is unlikely to have ever been hit in the wild as most users use LDK’s router. Discovered in the course of adding a new send_to_route API. Now, we’ll properly generate events and remove the outbound from storage. --- lightning/src/ln/channelmanager.rs | 1 + lightning/src/ln/functional_tests.rs | 1 + lightning/src/ln/outbound_payment.rs | 57 ++++++++++++--- lightning/src/ln/payment_tests.rs | 77 +++++++++++++++++++- pending_changelog/3531-buggy-router-leak.txt | 4 + 5 files changed, 127 insertions(+), 13 deletions(-) create mode 100644 pending_changelog/3531-buggy-router-leak.txt diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a5ae07eab7f..d58fbaab1af 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -14797,6 +14797,7 @@ mod tests { }, _ => panic!("unexpected error") } + assert!(nodes[0].node.list_recent_payments().is_empty()); } #[test] diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 123b394f374..853985311fe 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -6495,6 +6495,7 @@ fn test_payment_route_reaching_same_channel_twice() { RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), false, APIError::InvalidRoute { ref err }, assert_eq!(err, &"Path went through the same channel twice")); + assert!(nodes[0].node.list_recent_payments().is_empty()); } // BOLT 2 Requirements for the Sender when constructing and sending an update_add_htlc message. diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index 4a4b4aaffb2..8517d599c12 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -992,9 +992,9 @@ impl OutboundPayments { ); if let Err(e) = result { self.handle_pay_route_err( - e, payment_id, payment_hash, route, route_params, router, first_hops, - &inflight_htlcs, entropy_source, node_signer, best_block_height, logger, - pending_events, &send_payment_along_path + e, payment_id, payment_hash, route, route_params, onion_session_privs, router, first_hops, + &inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, + &send_payment_along_path ); } Ok(()) @@ -1274,7 +1274,11 @@ impl OutboundPayments { log_info!(logger, "Sending payment with id {} and hash {} returned {:?}", payment_id, payment_hash, res); if let Err(e) = res { - self.handle_pay_route_err(e, payment_id, payment_hash, route, route_params, router, first_hops, &inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, &send_payment_along_path); + self.handle_pay_route_err( + e, payment_id, payment_hash, route, route_params, onion_session_privs, router, first_hops, + &inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, + &send_payment_along_path + ); } Ok(()) } @@ -1430,15 +1434,21 @@ impl OutboundPayments { best_block_height, &send_payment_along_path); log_info!(logger, "Result retrying payment id {}: {:?}", &payment_id, res); if let Err(e) = res { - self.handle_pay_route_err(e, payment_id, payment_hash, route, route_params, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, send_payment_along_path); + self.handle_pay_route_err( + e, payment_id, payment_hash, route, route_params, onion_session_privs, router, first_hops, + inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, + send_payment_along_path + ); } } fn handle_pay_route_err( &self, err: PaymentSendFailure, payment_id: PaymentId, payment_hash: PaymentHash, route: Route, - mut route_params: RouteParameters, router: &R, first_hops: Vec, - inflight_htlcs: &IH, entropy_source: &ES, node_signer: &NS, best_block_height: u32, logger: &L, - pending_events: &Mutex)>>, send_payment_along_path: &SP, + mut route_params: RouteParameters, onion_session_privs: Vec<[u8; 32]>, router: &R, + first_hops: Vec, inflight_htlcs: &IH, entropy_source: &ES, node_signer: &NS, + best_block_height: u32, logger: &L, + pending_events: &Mutex)>>, + send_payment_along_path: &SP, ) where R::Target: Router, @@ -1467,11 +1477,13 @@ impl OutboundPayments { }, PaymentSendFailure::PathParameterError(results) => { log_error!(logger, "Failed to send to route due to parameter error in a single path. Your router is buggy"); + self.remove_session_privs(payment_id, &route, onion_session_privs); Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut route_params, route.paths, results.into_iter(), logger, pending_events); self.abandon_payment(payment_id, PaymentFailureReason::UnexpectedError, pending_events); }, PaymentSendFailure::ParameterError(e) => { log_error!(logger, "Failed to send to route due to parameter error: {:?}. Your router is buggy", e); + self.remove_session_privs(payment_id, &route, onion_session_privs); self.abandon_payment(payment_id, PaymentFailureReason::UnexpectedError, pending_events); }, PaymentSendFailure::DuplicatePayment => debug_assert!(false), // unreachable @@ -1511,6 +1523,21 @@ impl OutboundPayments { } } + // If a payment fails after adding the pending payment but before any HTLCs are locked into + // channels, we need to clear the session_privs in order for abandoning the payment to succeed. + fn remove_session_privs( + &self, payment_id: PaymentId, route: &Route, onion_session_privs: Vec<[u8; 32]> + ) { + if let Some(payment) = self.pending_outbound_payments.lock().unwrap().get_mut(&payment_id) { + for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.into_iter()) { + let removed = payment.remove(&session_priv_bytes, Some(path)); + debug_assert!(removed, "This can't happen as the payment has an entry for this path added by callers"); + } + } else { + debug_assert!(false, "This can't happen as the payment was added by callers"); + } + } + pub(super) fn send_probe( &self, path: Path, probing_cookie_secret: [u8; 32], entropy_source: &ES, node_signer: &NS, best_block_height: u32, send_payment_along_path: F @@ -1784,7 +1811,7 @@ impl OutboundPayments { let cur_height = best_block_height + 1; let mut results = Vec::new(); debug_assert_eq!(route.paths.len(), onion_session_privs.len()); - for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.into_iter()) { + for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) { let mut path_res = send_payment_along_path(SendAlongPathArgs { path: &path, payment_hash: &payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage: &keysend_preimage, invoice_request, @@ -1880,9 +1907,15 @@ impl OutboundPayments { // If we failed to send any paths, remove the new PaymentId from the `pending_outbound_payments` // map as the payment is free to be resent. fn remove_outbound_if_all_failed(&self, payment_id: PaymentId, err: &PaymentSendFailure) { - if let &PaymentSendFailure::AllFailedResendSafe(_) = err { - let removed = self.pending_outbound_payments.lock().unwrap().remove(&payment_id).is_some(); - debug_assert!(removed, "We should always have a pending payment to remove here"); + match err { + PaymentSendFailure::AllFailedResendSafe(_) + | PaymentSendFailure::ParameterError(_) + | PaymentSendFailure::PathParameterError(_) => + { + let removed = self.pending_outbound_payments.lock().unwrap().remove(&payment_id).is_some(); + debug_assert!(removed, "We should always have a pending payment to remove here"); + }, + PaymentSendFailure::DuplicatePayment | PaymentSendFailure::PartialFailure { .. } => {} } } diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 0c9c5d0e920..0b06a18eae7 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -24,7 +24,7 @@ use crate::types::payment::{PaymentHash, PaymentSecret, PaymentPreimage}; use crate::ln::chan_utils; use crate::ln::msgs::ChannelMessageHandler; use crate::ln::onion_utils; -use crate::ln::outbound_payment::{IDEMPOTENCY_TIMEOUT_TICKS, Retry, RetryableSendFailure}; +use crate::ln::outbound_payment::{IDEMPOTENCY_TIMEOUT_TICKS, ProbeSendFailure, Retry, RetryableSendFailure}; use crate::routing::gossip::{EffectiveCapacity, RoutingFees}; use crate::routing::router::{get_route, Path, PaymentParameters, Route, Router, RouteHint, RouteHintHop, RouteHop, RouteParameters}; use crate::routing::scoring::ChannelUsage; @@ -1249,6 +1249,7 @@ fn sent_probe_is_probe_of_sending_node() { // First check we refuse to build a single-hop probe let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000); assert!(nodes[0].node.send_probe(route.paths[0].clone()).is_err()); + assert!(nodes[0].node.list_recent_payments().is_empty()); // Then build an actual two-hop probing path let (route, _, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[2], 100_000); @@ -4375,3 +4376,77 @@ fn test_non_strict_forwarding() { let events = nodes[0].node.get_and_clear_pending_events(); expect_payment_failed_conditions_event(events, payment_hash, false, PaymentFailedConditions::new().blamed_scid(routed_scid)); } + +#[test] +fn remove_pending_outbounds_on_buggy_router() { + // Ensure that if a payment errors due to a bogus route, we'll abandon the payment and remove the + // pending outbound from storage. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let amt_msat = 10_000; + let payment_id = PaymentId([42; 32]); + let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0) + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); + let (mut route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); + + // Extend the path by itself, essentially simulating route going through same channel twice + let cloned_hops = route.paths[0].hops.clone(); + route.paths[0].hops.extend_from_slice(&cloned_hops); + let route_params = route.route_params.clone().unwrap(); + nodes[0].router.expect_find_route(route_params.clone(), Ok(route.clone())); + + nodes[0].node.send_payment( + payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id, route_params, + Retry::Attempts(1) // Even though another attempt is allowed, the payment should fail + ).unwrap(); + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + match &events[0] { + Event::PaymentPathFailed { failure, payment_failed_permanently, .. } => { + assert_eq!(failure, &PathFailure::InitialSend { + err: APIError::InvalidRoute { err: "Path went through the same channel twice".to_string() } + }); + assert!(!payment_failed_permanently); + }, + _ => panic!() + } + match events[1] { + Event::PaymentFailed { reason, .. } => { + assert_eq!(reason.unwrap(), PaymentFailureReason::UnexpectedError); + }, + _ => panic!() + } + assert!(nodes[0].node.list_recent_payments().is_empty()); +} + +#[test] +fn remove_pending_outbound_probe_on_buggy_path() { + // Ensure that if a probe errors due to a bogus route, we'll return an error and remove the + // pending outbound from storage. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let amt_msat = 10_000; + let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), 0) + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); + let (mut route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); + + // Extend the path by itself, essentially simulating route going through same channel twice + let cloned_hops = route.paths[0].hops.clone(); + route.paths[0].hops.extend_from_slice(&cloned_hops); + + assert_eq!( + nodes[0].node.send_probe(route.paths.pop().unwrap()).unwrap_err(), + ProbeSendFailure::ParameterError( + APIError::InvalidRoute { err: "Path went through the same channel twice".to_string() } + ) + ); + assert!(nodes[0].node.list_recent_payments().is_empty()); +} diff --git a/pending_changelog/3531-buggy-router-leak.txt b/pending_changelog/3531-buggy-router-leak.txt new file mode 100644 index 00000000000..72714aa8a8b --- /dev/null +++ b/pending_changelog/3531-buggy-router-leak.txt @@ -0,0 +1,4 @@ +## Bug Fixes + +* Fixed a rare case where a custom router returning a buggy route could result in holding onto a + pending payment forever and in some cases failing to generate a PaymentFailed event (#3531). From 440a8cc69caf3e970a7593602a46f154ad77ebba Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Fri, 17 Jan 2025 12:20:50 -0500 Subject: [PATCH 026/105] Unify session_priv removal on PaymentSendFailure When an outbound payment fails while paying to a route, we need to remove the session_privs for each failed path in the outbound payment. Previously we were sometimes removing in pay_route_internal and sometimes in handle_pay_route_err, so refactor this so we always remove in handle_pay_route_err. --- lightning/src/ln/outbound_payment.rs | 46 +++++++++++++--------------- 1 file changed, 21 insertions(+), 25 deletions(-) diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index 8517d599c12..996df77a69b 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -1460,10 +1460,24 @@ impl OutboundPayments { { match err { PaymentSendFailure::AllFailedResendSafe(errs) => { + self.remove_session_privs(payment_id, route.paths.iter().zip(onion_session_privs.iter())); Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut route_params, route.paths, errs.into_iter().map(|e| Err(e)), logger, pending_events); self.find_route_and_send_payment(payment_hash, payment_id, route_params, router, first_hops, inflight_htlcs, entropy_source, node_signer, best_block_height, logger, pending_events, send_payment_along_path); }, PaymentSendFailure::PartialFailure { failed_paths_retry: Some(mut retry), results, .. } => { + debug_assert_eq!(results.len(), route.paths.len()); + debug_assert_eq!(results.len(), onion_session_privs.len()); + let failed_paths = results.iter().zip(route.paths.iter().zip(onion_session_privs.iter())) + .filter_map(|(path_res, (path, session_priv))| { + match path_res { + // While a MonitorUpdateInProgress is an Err(_), the payment is still + // considered "in flight" and we shouldn't remove it from the + // PendingOutboundPayment set. + Ok(_) | Err(APIError::MonitorUpdateInProgress) => None, + _ => Some((path, session_priv)) + } + }); + self.remove_session_privs(payment_id, failed_paths); Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut retry, route.paths, results.into_iter(), logger, pending_events); // Some paths were sent, even if we failed to send the full MPP value our recipient may // misbehave and claim the funds, at which point we have to consider the payment sent, so @@ -1477,13 +1491,13 @@ impl OutboundPayments { }, PaymentSendFailure::PathParameterError(results) => { log_error!(logger, "Failed to send to route due to parameter error in a single path. Your router is buggy"); - self.remove_session_privs(payment_id, &route, onion_session_privs); + self.remove_session_privs(payment_id, route.paths.iter().zip(onion_session_privs.iter())); Self::push_path_failed_evs_and_scids(payment_id, payment_hash, &mut route_params, route.paths, results.into_iter(), logger, pending_events); self.abandon_payment(payment_id, PaymentFailureReason::UnexpectedError, pending_events); }, PaymentSendFailure::ParameterError(e) => { log_error!(logger, "Failed to send to route due to parameter error: {:?}. Your router is buggy", e); - self.remove_session_privs(payment_id, &route, onion_session_privs); + self.remove_session_privs(payment_id, route.paths.iter().zip(onion_session_privs.iter())); self.abandon_payment(payment_id, PaymentFailureReason::UnexpectedError, pending_events); }, PaymentSendFailure::DuplicatePayment => debug_assert!(false), // unreachable @@ -1525,12 +1539,12 @@ impl OutboundPayments { // If a payment fails after adding the pending payment but before any HTLCs are locked into // channels, we need to clear the session_privs in order for abandoning the payment to succeed. - fn remove_session_privs( - &self, payment_id: PaymentId, route: &Route, onion_session_privs: Vec<[u8; 32]> + fn remove_session_privs<'a, I: Iterator>( + &self, payment_id: PaymentId, path_session_priv: I ) { if let Some(payment) = self.pending_outbound_payments.lock().unwrap().get_mut(&payment_id) { - for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.into_iter()) { - let removed = payment.remove(&session_priv_bytes, Some(path)); + for (path, session_priv_bytes) in path_session_priv { + let removed = payment.remove(session_priv_bytes, Some(path)); debug_assert!(removed, "This can't happen as the payment has an entry for this path added by callers"); } } else { @@ -1812,29 +1826,11 @@ impl OutboundPayments { let mut results = Vec::new(); debug_assert_eq!(route.paths.len(), onion_session_privs.len()); for (path, session_priv_bytes) in route.paths.iter().zip(onion_session_privs.iter()) { - let mut path_res = send_payment_along_path(SendAlongPathArgs { + let path_res = send_payment_along_path(SendAlongPathArgs { path: &path, payment_hash: &payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage: &keysend_preimage, invoice_request, session_priv_bytes: *session_priv_bytes }); - match path_res { - Ok(_) => {}, - Err(APIError::MonitorUpdateInProgress) => { - // While a MonitorUpdateInProgress is an Err(_), the payment is still - // considered "in flight" and we shouldn't remove it from the - // PendingOutboundPayment set. - }, - Err(_) => { - let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap(); - if let Some(payment) = pending_outbounds.get_mut(&payment_id) { - let removed = payment.remove(&session_priv_bytes, Some(path)); - debug_assert!(removed, "This can't happen as the payment has an entry for this path added by callers"); - } else { - debug_assert!(false, "This can't happen as the payment was added by callers"); - path_res = Err(APIError::APIMisuseError { err: "Internal error: payment disappeared during processing. Please report this bug!".to_owned() }); - } - } - } results.push(path_res); } let mut has_ok = false; From f09d33b3eb7ebe2269306d8356ec968bffd13f38 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Tue, 14 Jan 2025 14:27:02 -0500 Subject: [PATCH 027/105] Reinstate ChannelManager::send_payment_with_route API Support more ergonomically sending payments to specific routes. We removed the original version of this API because it was hard to work with, but the concept of sending a payment to a specific route is still useful. Previously, users were able to do this via manually matching the payment id in their router, but that's cumbersome when we could just handle it internally. Trivial `use` conflicts resolved in: * lightning/src/ln/chanmon_update_fail_tests.rs * lightning/src/ln/functional_tests.rs Silent rebase conflicts resolved in: * lightning/src/routing/router.rs --- fuzz/src/chanmon_consistency.rs | 31 +++------- lightning/src/chain/channelmonitor.rs | 11 ++-- lightning/src/ln/chanmon_update_fail_tests.rs | 33 ++++------ lightning/src/ln/channelmanager.rs | 61 +++++++++++-------- lightning/src/ln/functional_test_utils.rs | 41 ++++++------- lightning/src/ln/functional_tests.rs | 42 ++++++------- lightning/src/ln/outbound_payment.rs | 17 ------ lightning/src/ln/payment_tests.rs | 43 ++++++++++--- lightning/src/ln/shutdown_tests.rs | 6 +- lightning/src/routing/router.rs | 40 ++++++++++++ 10 files changed, 179 insertions(+), 146 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index c2a49b8ee24..73e4f88f1f3 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -48,7 +48,7 @@ use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; use lightning::ln::channel_state::ChannelDetails; use lightning::ln::channelmanager::{ ChainParameters, ChannelManager, ChannelManagerReadArgs, PaymentId, RecentPaymentDetails, - RecipientOnionFields, Retry, + RecipientOnionFields, }; use lightning::ln::functional_test_utils::*; use lightning::ln::inbound_payment::ExpandedKey; @@ -82,7 +82,6 @@ use bitcoin::secp256k1::{self, Message, PublicKey, Scalar, Secp256k1, SecretKey} use lightning::io::Cursor; use std::cmp::{self, Ordering}; -use std::collections::VecDeque; use std::mem; use std::sync::atomic; use std::sync::{Arc, Mutex}; @@ -113,22 +112,14 @@ impl FeeEstimator for FuzzEstimator { } } -struct FuzzRouter { - pub next_routes: Mutex>, -} +struct FuzzRouter {} impl Router for FuzzRouter { fn find_route( &self, _payer: &PublicKey, _params: &RouteParameters, _first_hops: Option<&[&ChannelDetails]>, _inflight_htlcs: InFlightHtlcs, ) -> Result { - if let Some(route) = self.next_routes.lock().unwrap().pop_front() { - return Ok(route); - } - Err(msgs::LightningError { - err: String::from("Not implemented"), - action: msgs::ErrorAction::IgnoreError, - }) + unreachable!() } fn create_blinded_payment_paths( @@ -518,7 +509,7 @@ fn send_payment( PaymentParameters::from_node_id(source.get_our_node_id(), TEST_FINAL_CLTV), amt, ); - source.router.next_routes.lock().unwrap().push_back(Route { + let route = Route { paths: vec![Path { hops: vec![RouteHop { pubkey: dest.get_our_node_id(), @@ -532,11 +523,10 @@ fn send_payment( blinded_tail: None, }], route_params: Some(route_params.clone()), - }); + }; let onion = RecipientOnionFields::secret_only(payment_secret); let payment_id = PaymentId(payment_id); - let res = - source.send_payment(payment_hash, onion, payment_id, route_params, Retry::Attempts(0)); + let res = source.send_payment_with_route(route, payment_hash, onion, payment_id); match res { Err(err) => { panic!("Errored with {:?} on initial payment send", err); @@ -592,7 +582,7 @@ fn send_hop_payment( PaymentParameters::from_node_id(source.get_our_node_id(), TEST_FINAL_CLTV), amt, ); - source.router.next_routes.lock().unwrap().push_back(Route { + let route = Route { paths: vec![Path { hops: vec![ RouteHop { @@ -617,11 +607,10 @@ fn send_hop_payment( blinded_tail: None, }], route_params: Some(route_params.clone()), - }); + }; let onion = RecipientOnionFields::secret_only(payment_secret); let payment_id = PaymentId(payment_id); - let res = - source.send_payment(payment_hash, onion, payment_id, route_params, Retry::Attempts(0)); + let res = source.send_payment_with_route(route, payment_hash, onion, payment_id); match res { Err(err) => { panic!("Errored with {:?} on initial payment send", err); @@ -640,7 +629,7 @@ fn send_hop_payment( pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let out = SearchingOutput::new(underlying_out); let broadcast = Arc::new(TestBroadcaster {}); - let router = FuzzRouter { next_routes: Mutex::new(VecDeque::new()) }; + let router = FuzzRouter {}; macro_rules! make_node { ($node_id: expr, $fee_estimator: expr) => {{ diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 3f6bdc3f256..62207eeafbb 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -5092,7 +5092,7 @@ mod tests { use crate::chain::chaininterface::LowerBoundedFeeEstimator; use super::ChannelMonitorUpdateStep; - use crate::{check_added_monitors, check_spends, get_local_commitment_txn, get_monitor, get_route_and_payment_hash, unwrap_send_err}; + use crate::{check_added_monitors, check_spends, get_local_commitment_txn, get_monitor, get_route_and_payment_hash}; use crate::chain::{BestBlock, Confirm}; use crate::chain::channelmonitor::{ChannelMonitor, WithChannelMonitor}; use crate::chain::package::{weight_offered_htlc, weight_received_htlc, weight_revoked_offered_htlc, weight_revoked_received_htlc, WEIGHT_REVOKED_OUTPUT}; @@ -5102,10 +5102,9 @@ mod tests { use crate::types::payment::{PaymentPreimage, PaymentHash}; use crate::ln::channel_keys::{DelayedPaymentBasepoint, DelayedPaymentKey, HtlcBasepoint, RevocationBasepoint, RevocationKey}; use crate::ln::chan_utils::{self,HTLCOutputInCommitment, ChannelPublicKeys, ChannelTransactionParameters, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters}; - use crate::ln::channelmanager::{PaymentSendFailure, PaymentId, RecipientOnionFields}; + use crate::ln::channelmanager::{PaymentId, RecipientOnionFields}; use crate::ln::functional_test_utils::*; use crate::ln::script::ShutdownScript; - use crate::util::errors::APIError; use crate::util::test_utils::{TestLogger, TestBroadcaster, TestFeeEstimator}; use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::logger::Logger; @@ -5166,9 +5165,9 @@ mod tests { // If the ChannelManager tries to update the channel, however, the ChainMonitor will pass // the update through to the ChannelMonitor which will refuse it (as the channel is closed). let (route, payment_hash, _, payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], 100_000); - unwrap_send_err!(nodes[1].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) - ), false, APIError::MonitorUpdateInProgress, {}); + nodes[1].node.send_payment_with_route(route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) + ).unwrap(); check_added_monitors!(nodes[1], 1); // Build a new ChannelMonitorUpdate which contains both the failing commitment tx update diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index fcc1f8f5a64..2d01ece1158 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -19,13 +19,12 @@ use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor}; use crate::chain::transaction::OutPoint; use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch}; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination}; -use crate::ln::channelmanager::{RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields}; +use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields}; use crate::ln::channel::{AnnouncementSigsState, ChannelPhase}; use crate::ln::msgs; use crate::ln::types::ChannelId; use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler}; use crate::util::test_channel_signer::TestChannelSigner; -use crate::util::errors::APIError; use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::test_utils::TestBroadcaster; @@ -133,9 +132,9 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); { - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0) - ), false, APIError::MonitorUpdateInProgress, {}); + nodes[0].node.send_payment_with_route(route, payment_hash_1, + RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0) + ).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -190,9 +189,9 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000); { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0) - ), false, APIError::MonitorUpdateInProgress, {}); + nodes[0].node.send_payment_with_route(route, payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0) + ).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -257,9 +256,9 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0) - ), false, APIError::MonitorUpdateInProgress, {}); + nodes[0].node.send_payment_with_route(route, payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0) + ).unwrap(); check_added_monitors!(nodes[0], 1); } @@ -2004,16 +2003,10 @@ fn test_path_paused_mpp() { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - // Now check that we get the right return value, indicating that the first path succeeded but - // the second got a MonitorUpdateInProgress err. This implies - // PaymentSendFailure::PartialFailure as some paths succeeded, preventing retry. - if let Err(PaymentSendFailure::PartialFailure { results, ..}) = nodes[0].node.send_payment_with_route( + // The first path should have succeeded with the second getting a MonitorUpdateInProgress err. + nodes[0].node.send_payment_with_route( route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) - ) { - assert_eq!(results.len(), 2); - if let Ok(()) = results[0] {} else { panic!(); } - if let Err(APIError::MonitorUpdateInProgress) = results[1] {} else { panic!(); } - } else { panic!(); } + ).unwrap(); check_added_monitors!(nodes[0], 2); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index d58fbaab1af..a5978013c3b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -55,9 +55,7 @@ use crate::ln::channel_state::ChannelDetails; use crate::types::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; #[cfg(any(feature = "_test_utils", test))] use crate::types::features::Bolt11InvoiceFeatures; -use crate::routing::router::{BlindedTail, InFlightHtlcs, Path, Payee, PaymentParameters, RouteParameters, Router}; -#[cfg(test)] -use crate::routing::router::Route; +use crate::routing::router::{BlindedTail, FixedRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router}; use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundHTLCErr, NextPacketDetails}; use crate::ln::msgs; use crate::ln::onion_utils; @@ -2397,9 +2395,6 @@ where fee_estimator: LowerBoundedFeeEstimator, chain_monitor: M, tx_broadcaster: T, - #[cfg(fuzzing)] - pub router: R, - #[cfg(not(fuzzing))] router: R, message_router: MR, @@ -4622,21 +4617,31 @@ where } } - // Deprecated send method, for testing use [`Self::send_payment`] and - // [`TestRouter::expect_find_route`] instead. - // - // [`TestRouter::expect_find_route`]: crate::util::test_utils::TestRouter::expect_find_route - #[cfg(test)] - pub(crate) fn send_payment_with_route( - &self, route: Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, + /// Sends a payment along a given route. See [`Self::send_payment`] for more info. + /// + /// LDK will not automatically retry this payment, though it may be manually re-sent after an + /// [`Event::PaymentFailed`] is generated. + pub fn send_payment_with_route( + &self, mut route: Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId - ) -> Result<(), PaymentSendFailure> { + ) -> Result<(), RetryableSendFailure> { let best_block_height = self.best_block.read().unwrap().height; let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); + let route_params = route.route_params.clone().unwrap_or_else(|| { + // Create a dummy route params since they're a required parameter but unused in this case + let (payee_node_id, cltv_delta) = route.paths.first() + .and_then(|path| path.hops.last().map(|hop| (hop.pubkey, hop.cltv_expiry_delta as u32))) + .unwrap_or_else(|| (PublicKey::from_slice(&[2; 32]).unwrap(), MIN_FINAL_CLTV_EXPIRY_DELTA as u32)); + let dummy_payment_params = PaymentParameters::from_node_id(payee_node_id, cltv_delta); + RouteParameters::from_payment_params_and_value(dummy_payment_params, route.get_total_amount()) + }); + if route.route_params.is_none() { route.route_params = Some(route_params.clone()); } + let router = FixedRouter::new(route); self.pending_outbound_payments - .send_payment_with_route(&route, payment_hash, recipient_onion, payment_id, - &self.entropy_source, &self.node_signer, best_block_height, - |args| self.send_payment_along_path(args)) + .send_payment(payment_hash, recipient_onion, payment_id, Retry::Attempts(0), + route_params, &&router, self.list_usable_channels(), || self.compute_inflight_htlcs(), + &self.entropy_source, &self.node_signer, best_block_height, &self.logger, + &self.pending_events, |args| self.send_payment_along_path(args)) } /// Sends a payment to the route found using the provided [`RouteParameters`], retrying failed @@ -4665,7 +4670,8 @@ where /// [`ChannelManager::list_recent_payments`] for more information. /// /// Routes are automatically found using the [`Router] provided on startup. To fix a route for a - /// particular payment, match the [`PaymentId`] passed to [`Router::find_route_with_id`]. + /// particular payment, use [`Self::send_payment_with_route`] or match the [`PaymentId`] passed to + /// [`Router::find_route_with_id`]. /// /// [`Event::PaymentSent`]: events::Event::PaymentSent /// [`Event::PaymentFailed`]: events::Event::PaymentFailed @@ -14363,7 +14369,7 @@ mod tests { use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; use crate::ln::types::ChannelId; use crate::types::payment::{PaymentPreimage, PaymentHash, PaymentSecret}; - use crate::ln::channelmanager::{create_recv_pending_htlc_info, HTLCForwardInfo, inbound_payment, PaymentId, PaymentSendFailure, RecipientOnionFields, InterceptId}; + use crate::ln::channelmanager::{create_recv_pending_htlc_info, HTLCForwardInfo, inbound_payment, PaymentId, RecipientOnionFields, InterceptId}; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{self, ErrorAction}; use crate::ln::msgs::ChannelMessageHandler; @@ -14789,14 +14795,17 @@ mod tests { route.paths[1].hops[0].short_channel_id = chan_2_id; route.paths[1].hops[1].short_channel_id = chan_4_id; - match nodes[0].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0)) - .unwrap_err() { - PaymentSendFailure::ParameterError(APIError::APIMisuseError { ref err }) => { - assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err)) - }, - _ => panic!("unexpected error") + nodes[0].node.send_payment_with_route(route, payment_hash, + RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0)).unwrap(); + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentFailed { reason, .. } => { + assert_eq!(reason.unwrap(), crate::events::PaymentFailureReason::UnexpectedError); + } + _ => panic!() } + nodes[0].logger.assert_log_contains("lightning::ln::outbound_payment", "Payment secret is required for multi-path payments", 2); assert!(nodes[0].node.list_recent_payments().is_empty()); } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index b4f172b4a27..63341969326 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1064,30 +1064,27 @@ macro_rules! get_local_commitment_txn { /// Check the error from attempting a payment. #[macro_export] macro_rules! unwrap_send_err { - ($res: expr, $all_failed: expr, $type: pat, $check: expr) => { - match &$res { - &Err(PaymentSendFailure::AllFailedResendSafe(ref fails)) if $all_failed => { - assert_eq!(fails.len(), 1); - match fails[0] { - $type => { $check }, - _ => panic!(), - } - }, - &Err(PaymentSendFailure::PartialFailure { ref results, .. }) if !$all_failed => { - assert_eq!(results.len(), 1); - match results[0] { - Err($type) => { $check }, - _ => panic!(), - } - }, - &Err(PaymentSendFailure::PathParameterError(ref result)) if !$all_failed => { - assert_eq!(result.len(), 1); - match result[0] { - Err($type) => { $check }, - _ => panic!(), + ($node: expr, $res: expr, $all_failed: expr, $type: pat, $check: expr) => { + assert!($res.is_ok()); + let events = $node.node.get_and_clear_pending_events(); + assert!(events.len() == 2); + match &events[0] { + crate::events::Event::PaymentPathFailed { failure, .. } => { + match failure { + crate::events::PathFailure::InitialSend { err } => { + match err { + $type => { $check }, + _ => panic!() + } + }, + _ => panic!() } }, - _ => {panic!()}, + _ => panic!() + } + match &events[1] { + crate::events::Event::PaymentFailed { .. } => {}, + _ => panic!() } } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 853985311fe..9b7a6c83f18 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -23,7 +23,7 @@ use crate::events::{Event, FundingInfo, MessageSendEvent, MessageSendEventsProvi use crate::ln::types::ChannelId; use crate::types::payment::{PaymentPreimage, PaymentSecret, PaymentHash}; use crate::ln::channel::{CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase}; -use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA}; +use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA}; use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError}; use crate::ln::{chan_utils, onion_utils}; use crate::ln::chan_utils::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment}; @@ -1187,7 +1187,7 @@ fn holding_cell_htlc_counting() { // the holding cell waiting on B's RAA to send. At this point we should not be able to add // another HTLC. { - unwrap_send_err!(nodes[1].node.send_payment_with_route(route, payment_hash_1, + unwrap_send_err!(nodes[1], nodes[1].node.send_payment_with_route(route, payment_hash_1, RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0) ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1411,14 +1411,8 @@ fn test_basic_channel_reserve() { get_route_and_payment_hash!(nodes[0], nodes[1], max_can_send); route.paths[0].hops.last_mut().unwrap().fee_msat += 1; let err = nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).err().unwrap(); - match err { - PaymentSendFailure::AllFailedResendSafe(ref fails) => { - if let &APIError::ChannelUnavailable { .. } = &fails[0] {} - else { panic!("Unexpected error variant"); } - }, - _ => panic!("Unexpected error variant"), - } + RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)); + unwrap_send_err!(nodes[0], err, true, APIError::ChannelUnavailable { .. }, {} ); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); send_payment(&nodes[0], &vec![&nodes[1]], max_can_send); @@ -1604,7 +1598,7 @@ fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { } // However one more HTLC should be significantly over the reserve amount and fail. - unwrap_send_err!(nodes[1].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[1], nodes[1].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1704,7 +1698,7 @@ fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() { let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[1], nodes[0], dust_amt); route.paths[0].hops[0].fee_msat += 1; - unwrap_send_err!(nodes[1].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[1], nodes[1].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); } @@ -1916,7 +1910,7 @@ fn test_channel_reserve_holding_cell_htlcs() { route.paths[0].hops.last_mut().unwrap().fee_msat += 1; assert!(route.paths[0].hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat)); - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -1988,7 +1982,7 @@ fn test_channel_reserve_holding_cell_htlcs() { let mut route = route_1.clone(); route.paths[0].hops.last_mut().unwrap().fee_msat = recv_value_2 + 1; let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(nodes[2]); - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -2018,7 +2012,7 @@ fn test_channel_reserve_holding_cell_htlcs() { let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_22); route.paths[0].hops.last_mut().unwrap().fee_msat += 1; - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -6491,7 +6485,7 @@ fn test_payment_route_reaching_same_channel_twice() { let cloned_hops = route.paths[0].hops.clone(); route.paths[0].hops.extend_from_slice(&cloned_hops); - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), false, APIError::InvalidRoute { ref err }, assert_eq!(err, &"Path went through the same channel twice")); @@ -6514,7 +6508,7 @@ fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() { let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); route.paths[0].hops[0].fee_msat = 100; - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -6531,13 +6525,13 @@ fn test_update_add_htlc_bolt2_sender_zero_value_msat() { let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100000); route.paths[0].hops[0].fee_msat = 0; - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, "Cannot send 0-msat HTLC")); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send 0-msat HTLC", 1); + nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot send 0-msat HTLC", 2); } #[test] @@ -6578,7 +6572,7 @@ fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() { .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, 100000000); route.paths[0].hops.last_mut().unwrap().cltv_expiry_delta = 500000001; - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::InvalidRoute { ref err }, assert_eq!(err, &"Channel CLTV overflowed?")); @@ -6622,7 +6616,7 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_claimable!(nodes[1], our_payment_hash, our_payment_secret, 100000); } - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); @@ -6646,7 +6640,7 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() { // Manually create a route over our max in flight (which our router normally automatically // limits us to. route.paths[0].hops[0].fee_msat = max_in_flight + 1; - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, our_payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, our_payment_hash, RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -10305,11 +10299,11 @@ fn do_test_max_dust_htlc_exposure(dust_outbound_balance: bool, exposure_breach_e if on_holder_tx { dust_outbound_htlc_on_holder_tx_msat } else { dust_htlc_on_counterparty_tx_msat + 1 }; // With default dust exposure: 5000 sats if on_holder_tx { - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); } else { - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) ), true, APIError::ChannelUnavailable { .. }, {}); } diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index 996df77a69b..7b579b7a261 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -790,23 +790,6 @@ impl OutboundPayments { best_block_height, logger, pending_events, &send_payment_along_path) } - #[cfg(test)] - pub(super) fn send_payment_with_route( - &self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, - payment_id: PaymentId, entropy_source: &ES, node_signer: &NS, best_block_height: u32, - send_payment_along_path: F - ) -> Result<(), PaymentSendFailure> - where - ES::Target: EntropySource, - NS::Target: NodeSigner, - F: Fn(SendAlongPathArgs) -> Result<(), APIError> - { - let onion_session_privs = self.add_new_pending_payment(payment_hash, recipient_onion.clone(), payment_id, None, route, None, None, entropy_source, best_block_height)?; - self.pay_route_internal(route, payment_hash, &recipient_onion, None, None, payment_id, None, - &onion_session_privs, node_signer, best_block_height, &send_payment_along_path) - .map_err(|e| { self.remove_outbound_if_all_failed(payment_id, &e); e }) - } - pub(super) fn send_spontaneous_payment( &self, payment_preimage: Option, recipient_onion: RecipientOnionFields, payment_id: PaymentId, retry_strategy: Retry, route_params: RouteParameters, router: &R, diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 0b06a18eae7..0963ed0aa4f 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -16,7 +16,7 @@ use crate::chain::channelmonitor::{ANTI_REORG_DELAY, HTLC_FAIL_BACK_BUFFER, LATE use crate::sign::EntropySource; use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentFailureReason, PaymentPurpose}; use crate::ln::channel::{EXPIRE_PREV_CONFIG_TICKS, get_holder_selected_channel_reserve_satoshis, ANCHOR_OUTPUT_VALUE_SATOSHI}; -use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, PaymentSendFailure, RecentPaymentDetails, RecipientOnionFields, HTLCForwardInfo, PendingHTLCRouting, PendingAddHTLCInfo}; +use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, MPP_TIMEOUT_TICKS, MIN_CLTV_EXPIRY_DELTA, PaymentId, RecentPaymentDetails, RecipientOnionFields, HTLCForwardInfo, PendingHTLCRouting, PendingAddHTLCInfo}; use crate::types::features::{Bolt11InvoiceFeatures, ChannelTypeFeatures}; use crate::ln::msgs; use crate::ln::types::ChannelId; @@ -599,7 +599,7 @@ fn no_pending_leak_on_initial_send_failure() { nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); - unwrap_send_err!(nodes[0].node.send_payment_with_route(route, payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) ), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, "Peer for first hop currently disconnected")); @@ -946,7 +946,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { // confirming, we will fail as it's considered still-pending... let (new_route, _, _, _) = get_route_and_payment_hash!(nodes[0], nodes[2], if use_dust { 1_000 } else { 1_000_000 }); match nodes[0].node.send_payment_with_route(new_route.clone(), payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id) { - Err(PaymentSendFailure::DuplicatePayment) => {}, + Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected error") } assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -985,7 +985,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); match nodes[0].node.send_payment_with_route(new_route.clone(), payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id) { - Err(PaymentSendFailure::DuplicatePayment) => {}, + Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected error") } assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -1004,7 +1004,7 @@ fn do_test_completed_payment_not_retryable_on_reload(use_dust: bool) { reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); match nodes[0].node.send_payment_with_route(new_route, payment_hash, RecipientOnionFields::secret_only(payment_secret), payment_id) { - Err(PaymentSendFailure::DuplicatePayment) => {}, + Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected error") } assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -1548,7 +1548,7 @@ fn claimed_send_payment_idempotent() { let send_result = nodes[0].node.send_payment_with_route(route.clone(), second_payment_hash, RecipientOnionFields::secret_only(second_payment_secret), payment_id); match send_result { - Err(PaymentSendFailure::DuplicatePayment) => {}, + Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected send result: {:?}", send_result), } @@ -1627,7 +1627,7 @@ fn abandoned_send_payment_idempotent() { let send_result = nodes[0].node.send_payment_with_route(route.clone(), second_payment_hash, RecipientOnionFields::secret_only(second_payment_secret), payment_id); match send_result { - Err(PaymentSendFailure::DuplicatePayment) => {}, + Err(RetryableSendFailure::DuplicatePayment) => {}, _ => panic!("Unexpected send result: {:?}", send_result), } @@ -4450,3 +4450,32 @@ fn remove_pending_outbound_probe_on_buggy_path() { ); assert!(nodes[0].node.list_recent_payments().is_empty()); } + +#[test] +fn pay_route_without_params() { + // Make sure we can use ChannelManager::send_payment_with_route to pay a route where + // Route::route_parameters is None. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + create_announced_chan_between_nodes(&nodes, 0, 1); + + let amt_msat = 10_000; + let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id(), TEST_FINAL_CLTV) + .with_bolt11_features(nodes[1].node.bolt11_invoice_features()).unwrap(); + let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], payment_params, amt_msat); + route.route_params.take(); + nodes[0].node.send_payment_with_route( + route, payment_hash, RecipientOnionFields::secret_only(payment_secret), + PaymentId(payment_hash.0) + ).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); + pass_along_path(&nodes[0], &[&nodes[1]], amt_msat, payment_hash, Some(payment_secret), node_1_msgs, true, None); + claim_payment_along_route( + ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], payment_preimage) + ); +} diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 9fd428329af..960dc441ae5 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -15,7 +15,7 @@ use crate::chain::ChannelMonitorUpdateStatus; use crate::chain::transaction::OutPoint; use crate::events::{Event, MessageSendEvent, HTLCDestination, MessageSendEventsProvider, ClosureReason}; use crate::ln::channel_state::{ChannelDetails, ChannelShutdownState}; -use crate::ln::channelmanager::{self, PaymentSendFailure, PaymentId, RecipientOnionFields, Retry}; +use crate::ln::channelmanager::{self, PaymentId, RecipientOnionFields, Retry}; use crate::routing::router::{PaymentParameters, get_route, RouteParameters}; use crate::ln::msgs; use crate::ln::types::ChannelId; @@ -364,10 +364,10 @@ fn updates_shutdown_wait() { let route_params = RouteParameters::from_payment_params_and_value(payment_params_2, 100_000); let route_2 = get_route(&nodes[1].node.get_our_node_id(), &route_params, &nodes[1].network_graph.read_only(), None, &logger, &scorer, &Default::default(), &random_seed_bytes).unwrap(); - unwrap_send_err!(nodes[0].node.send_payment_with_route(route_1, payment_hash, + unwrap_send_err!(nodes[0], nodes[0].node.send_payment_with_route(route_1, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) ), true, APIError::ChannelUnavailable {..}, {}); - unwrap_send_err!(nodes[1].node.send_payment_with_route(route_2, payment_hash, + unwrap_send_err!(nodes[1], nodes[1].node.send_payment_with_route(route_2, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) ), true, APIError::ChannelUnavailable {..}, {}); diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 78a93aa0d39..04f55837267 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -25,6 +25,7 @@ use crate::offers::invoice::Bolt12Invoice; use crate::routing::gossip::{DirectedChannelInfo, EffectiveCapacity, ReadOnlyNetworkGraph, NetworkGraph, NodeId}; use crate::routing::scoring::{ChannelUsage, LockableScore, ScoreLookUp}; use crate::sign::EntropySource; +use crate::sync::Mutex; use crate::util::ser::{Writeable, Readable, ReadableArgs, Writer}; use crate::util::logger::{Level, Logger}; use crate::crypto::chacha20::ChaCha20; @@ -185,6 +186,45 @@ impl>, L: Deref, ES: Deref, S: Deref, SP: Size } } +/// A `Router` that returns a fixed route one time, erroring otherwise. Useful for +/// `ChannelManager::send_payment_with_route` to support sending to specific routes without +/// requiring a custom `Router` implementation. +pub(crate) struct FixedRouter { + // Use an `Option` to avoid needing to clone the route when `find_route` is called. + route: Mutex>, +} + +impl FixedRouter { + pub(crate) fn new(route: Route) -> Self { + Self { route: Mutex::new(Some(route)) } + } +} + +impl Router for FixedRouter { + fn find_route( + &self, _payer: &PublicKey, _route_params: &RouteParameters, + _first_hops: Option<&[&ChannelDetails]>, _inflight_htlcs: InFlightHtlcs + ) -> Result { + self.route.lock().unwrap().take().ok_or_else(|| { + LightningError { + err: "Can't use this router to return multiple routes".to_owned(), + action: ErrorAction::IgnoreError, + } + }) + } + + fn create_blinded_payment_paths< + T: secp256k1::Signing + secp256k1::Verification + > ( + &self, _recipient: PublicKey, _first_hops: Vec, _tlvs: ReceiveTlvs, + _amount_msats: u64, _secp_ctx: &Secp256k1 + ) -> Result, ()> { + // Should be unreachable as this router is only intended to provide a one-time payment route. + debug_assert!(false); + Err(()) + } +} + /// A trait defining behavior for routing a payment. pub trait Router { /// Finds a [`Route`] for a payment between the given `payer` and a payee. From a31d70d5793be6de38602f8a8eae721d5d6af8b9 Mon Sep 17 00:00:00 2001 From: Devrandom Date: Fri, 17 Jan 2025 10:52:08 +0100 Subject: [PATCH 028/105] RawBolt11Invoice to/from ascii utilities for remote signing of invoices --- lightning-invoice/src/lib.rs | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/lightning-invoice/src/lib.rs b/lightning-invoice/src/lib.rs index 07c8342b5ea..17cc41f9502 100644 --- a/lightning-invoice/src/lib.rs +++ b/lightning-invoice/src/lib.rs @@ -48,6 +48,7 @@ use core::iter::FilterMap; use core::num::ParseIntError; use core::ops::Deref; use core::slice::Iter; +use core::str::FromStr; use core::time::Duration; #[cfg(feature = "serde")] @@ -78,8 +79,12 @@ use crate::prelude::*; /// Re-export serialization traits #[cfg(fuzzing)] pub use crate::de::FromBase32; +#[cfg(not(fuzzing))] +use crate::de::FromBase32; #[cfg(fuzzing)] pub use crate::ser::Base32Iterable; +#[cfg(not(fuzzing))] +use crate::ser::Base32Iterable; /// Errors that indicate what is wrong with the invoice. They have some granularity for debug /// reasons, but should generally result in an "invalid BOLT11 invoice" message for the user. @@ -1086,9 +1091,6 @@ impl RawBolt11Invoice { /// Calculate the hash of the encoded `RawBolt11Invoice` which should be signed. pub fn signable_hash(&self) -> [u8; 32] { - #[cfg(not(fuzzing))] - use crate::ser::Base32Iterable; - Self::hash_from_parts(self.hrp.to_string().as_bytes(), self.data.fe_iter()) } @@ -1189,6 +1191,21 @@ impl RawBolt11Invoice { pub fn currency(&self) -> Currency { self.hrp.currency.clone() } + + /// Convert to HRP prefix and Fe32 encoded data part. + /// Can be used to transmit unsigned invoices for remote signing. + pub fn to_raw(&self) -> (String, Vec) { + (self.hrp.to_string(), self.data.fe_iter().collect()) + } + + /// Convert from HRP prefix and Fe32 encoded data part. + /// Can be used to receive unsigned invoices for remote signing. + pub fn from_raw(hrp: &str, data: &[Fe32]) -> Result { + let raw_hrp: RawHrp = RawHrp::from_str(hrp)?; + let data_part = RawDataPart::from_base32(data)?; + + Ok(Self { hrp: raw_hrp, data: data_part }) + } } impl PositiveTimestamp { From 710598df99a31b95369c84a929d487d3884b73ee Mon Sep 17 00:00:00 2001 From: Alec Chen Date: Sun, 19 Jan 2025 23:57:13 +0000 Subject: [PATCH 029/105] Add cltv expiry to PendingHTLCRouting::Forward In a coming commit we'll expire HTLCs backwards even if we haven't yet claimed them on-chain based on their inbound edge being close to causing a channel force-closure. Here we track the incoming edge's CLTV expiry in the pending-routing state so that we can include it in the `HTLCSource` in the next commit. Co-authored-by: Matt Corallo --- lightning/src/ln/channelmanager.rs | 15 +++++++++++++-- lightning/src/ln/onion_payment.rs | 1 + 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a5978013c3b..90c2ffd704f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -164,6 +164,8 @@ pub enum PendingHTLCRouting { short_channel_id: u64, // This should be NonZero eventually when we bump MSRV /// Set if this HTLC is being forwarded within a blinded path. blinded: Option, + /// The absolute CLTV of the inbound HTLC + incoming_cltv_expiry: Option, }, /// The onion indicates that this is a payment for an invoice (supposedly) generated by us. /// @@ -264,6 +266,14 @@ impl PendingHTLCRouting { _ => None, } } + + fn incoming_cltv_expiry(&self) -> Option { + match self { + Self::Forward { incoming_cltv_expiry, .. } => *incoming_cltv_expiry, + Self::Receive { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry), + Self::ReceiveKeysend { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry), + } + } } /// Information about an incoming HTLC, including the [`PendingHTLCRouting`] describing where it @@ -5541,9 +5551,9 @@ where })?; let routing = match payment.forward_info.routing { - PendingHTLCRouting::Forward { onion_packet, blinded, .. } => { + PendingHTLCRouting::Forward { onion_packet, blinded, incoming_cltv_expiry, .. } => { PendingHTLCRouting::Forward { - onion_packet, blinded, short_channel_id: next_hop_scid + onion_packet, blinded, incoming_cltv_expiry, short_channel_id: next_hop_scid, } }, _ => unreachable!() // Only `PendingHTLCRouting::Forward`s are intercepted @@ -12378,6 +12388,7 @@ impl_writeable_tlv_based_enum!(PendingHTLCRouting, (0, onion_packet, required), (1, blinded, option), (2, short_channel_id, required), + (3, incoming_cltv_expiry, option), }, (1, Receive) => { (0, payment_data, required), diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index 193cdd1582a..f9d4f371227 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -110,6 +110,7 @@ pub(super) fn create_fwd_pending_htlc_info( routing: PendingHTLCRouting::Forward { onion_packet: outgoing_packet, short_channel_id, + incoming_cltv_expiry: Some(msg.cltv_expiry), blinded: intro_node_blinding_point.or(msg.blinding_point) .map(|bp| BlindedForward { inbound_blinding_point: bp, From 9723b2eb18c4b8d4f504889c114a28bcef3d39d7 Mon Sep 17 00:00:00 2001 From: Alec Chen Date: Mon, 20 Jan 2025 00:09:00 +0000 Subject: [PATCH 030/105] Add cltv expiry to HTLCPreviousHopData In a coming commit we'll expire HTLCs backwards even if we haven't yet claimed them on-chain based on their inbound edge being close to causing a channel force-closure. Here we track and expose the incoming edge's CLTV expiry in the `HTLCSource`, giving `ChannelMonitor` access to it. Co-authored-by: Matt Corallo --- lightning/src/ln/channelmanager.rs | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 90c2ffd704f..b4003a7a50e 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -394,6 +394,9 @@ pub(crate) struct HTLCPreviousHopData { // channel with a preimage provided by the forward channel. outpoint: OutPoint, counterparty_node_id: Option, + /// Used to preserve our backwards channel by failing back in case an HTLC claim in the forward + /// channel remains unconfirmed for too long. + cltv_expiry: Option, } #[derive(PartialEq, Eq)] @@ -696,6 +699,15 @@ impl HTLCSource { true } } + + /// Returns the CLTV expiry of the inbound HTLC (i.e. the source referred to by this object), + /// if the source was a forwarded HTLC and the HTLC was first forwarded on LDK 0.1.1 or later. + pub(crate) fn inbound_htlc_expiry(&self) -> Option { + match self { + Self::PreviousHopData(HTLCPreviousHopData { cltv_expiry, .. }) => *cltv_expiry, + _ => None, + } + } } /// This enum is used to specify which error data to send to peers when failing back an HTLC @@ -5592,7 +5604,7 @@ where err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)) })?; - if let PendingHTLCRouting::Forward { short_channel_id, .. } = payment.forward_info.routing { + if let PendingHTLCRouting::Forward { short_channel_id, incoming_cltv_expiry, .. } = payment.forward_info.routing { let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: payment.prev_short_channel_id, user_channel_id: Some(payment.prev_user_channel_id), @@ -5603,6 +5615,7 @@ where incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret, phantom_shared_secret: None, blinded_failure: payment.forward_info.routing.blinded_failure(), + cltv_expiry: incoming_cltv_expiry, }); let failure_reason = HTLCFailReason::from_failure_code(0x4000 | 10); @@ -5777,6 +5790,7 @@ where outgoing_cltv_value, .. } }) => { + let cltv_expiry = routing.incoming_cltv_expiry(); macro_rules! failure_handler { ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => { let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id), Some(payment_hash)); @@ -5792,6 +5806,7 @@ where incoming_packet_shared_secret: incoming_shared_secret, phantom_shared_secret: $phantom_ss, blinded_failure: routing.blinded_failure(), + cltv_expiry, }); let reason = if $next_hop_unknown { @@ -5901,7 +5916,7 @@ where prev_user_channel_id, prev_counterparty_node_id, forward_info: PendingHTLCInfo { incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, routing: PendingHTLCRouting::Forward { - ref onion_packet, blinded, .. + ref onion_packet, blinded, incoming_cltv_expiry, .. }, skimmed_fee_msat, .. }, }) => { @@ -5916,6 +5931,7 @@ where // Phantom payments are only PendingHTLCRouting::Receive. phantom_shared_secret: None, blinded_failure: blinded.map(|b| b.failure), + cltv_expiry: incoming_cltv_expiry, }); let next_blinding_point = blinded.and_then(|b| { b.next_blinding_override.or_else(|| { @@ -6090,6 +6106,7 @@ where incoming_packet_shared_secret: incoming_shared_secret, phantom_shared_secret, blinded_failure, + cltv_expiry: Some(cltv_expiry), }, // We differentiate the received value from the sender intended value // if possible so that we don't prematurely mark MPP payments complete @@ -6123,6 +6140,7 @@ where incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret, phantom_shared_secret, blinded_failure, + cltv_expiry: Some(cltv_expiry), }), payment_hash, HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data), HTLCDestination::FailedPayment { payment_hash: $payment_hash }, @@ -8998,6 +9016,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ incoming_packet_shared_secret: forward_info.incoming_shared_secret, phantom_shared_secret: None, blinded_failure: forward_info.routing.blinded_failure(), + cltv_expiry: forward_info.routing.incoming_cltv_expiry(), }); failed_intercept_forwards.push((htlc_source, forward_info.payment_hash, @@ -11161,6 +11180,7 @@ where outpoint: htlc.prev_funding_outpoint, channel_id: htlc.prev_channel_id, blinded_failure: htlc.forward_info.routing.blinded_failure(), + cltv_expiry: htlc.forward_info.routing.incoming_cltv_expiry(), }); let requested_forward_scid /* intercept scid */ = match htlc.forward_info.routing { @@ -12504,6 +12524,7 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, { (2, outpoint, required), (3, blinded_failure, option), (4, htlc_id, required), + (5, cltv_expiry, option), (6, incoming_packet_shared_secret, required), (7, user_channel_id, option), // Note that by the time we get past the required read for type 2 above, outpoint will be From c90c394f7088a8166ec0f9823f9ad1287e9112a9 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 21 Jan 2025 22:00:51 +0000 Subject: [PATCH 031/105] Drop `Channel::historical_inbound_htlc_fulfills` This field was used to test that any HTLC failures didn't come in after an HTLC was fulfilled (indicating, somewhat dubiously, that there may be a bug causing us to fail when we shouldn't have). In the next commit, we'll be failing HTLCs based on on-chain HTLC expiry, but may ultimately receive the preimage thereafter. This would make the `historical_inbound_htlc_fulfills` checks potentially-brittle, so we just remove them as they have dubious value. --- lightning/src/ln/channel.rs | 86 ++++-------------------------- lightning/src/ln/channelmanager.rs | 1 - 2 files changed, 9 insertions(+), 78 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 5e3d72a6241..c15a4bee643 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -1453,15 +1453,6 @@ pub(super) struct ChannelContext where SP::Target: SignerProvider { /// [`msgs::RevokeAndACK`] message from the counterparty. sent_message_awaiting_response: Option, - #[cfg(any(test, fuzzing))] - // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the - // corresponding HTLC on the inbound path. If, then, the outbound path channel is - // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack - // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This - // is fine, but as a sanity check in our failure to generate the second claim, we check here - // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC. - historical_inbound_htlc_fulfills: HashSet, - /// This channel's type, as negotiated during channel open channel_type: ChannelTypeFeatures, @@ -2210,9 +2201,6 @@ impl ChannelContext where SP::Target: SignerProvider { funding_tx_broadcast_safe_event_emitted: false, channel_ready_event_emitted: false, - #[cfg(any(test, fuzzing))] - historical_inbound_htlc_fulfills: new_hash_set(), - channel_type, channel_keys_id, @@ -2443,9 +2431,6 @@ impl ChannelContext where SP::Target: SignerProvider { funding_tx_broadcast_safe_event_emitted: false, channel_ready_event_emitted: false, - #[cfg(any(test, fuzzing))] - historical_inbound_htlc_fulfills: new_hash_set(), - channel_type, channel_keys_id, @@ -4472,10 +4457,6 @@ impl Channel where } } if pending_idx == core::usize::MAX { - #[cfg(any(test, fuzzing))] - // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and - // this is simply a duplicate claim, not previously failed and we lost funds. - debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); return UpdateFulfillFetch::DuplicateClaim {}; } @@ -4505,8 +4486,6 @@ impl Channel where if htlc_id_arg == htlc_id { // Make sure we don't leave latest_monitor_update_id incremented here: self.context.latest_monitor_update_id -= 1; - #[cfg(any(test, fuzzing))] - debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); return UpdateFulfillFetch::DuplicateClaim {}; } }, @@ -4528,12 +4507,8 @@ impl Channel where self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg, }); - #[cfg(any(test, fuzzing))] - self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg); return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None }; } - #[cfg(any(test, fuzzing))] - self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg); { let htlc = &mut self.context.pending_inbound_htlcs[pending_idx]; @@ -4598,14 +4573,8 @@ impl Channel where } } - /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill - /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot, - /// however, fail more than once as we wait for an upstream failure to be irrevocably committed - /// before we fail backwards. - /// - /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always - /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be - /// [`ChannelError::Ignore`]. + /// Returns `Err` (always with [`ChannelError::Ignore`]) if the HTLC could not be failed (e.g. + /// if it was already resolved). Otherwise returns `Ok`. pub fn queue_fail_htlc(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L) -> Result<(), ChannelError> where L::Target: Logger { self.fail_htlc(htlc_id_arg, err_packet, true, logger) @@ -4623,14 +4592,8 @@ impl Channel where .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?")) } - /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill - /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot, - /// however, fail more than once as we wait for an upstream failure to be irrevocably committed - /// before we fail backwards. - /// - /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always - /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be - /// [`ChannelError::Ignore`]. + /// Returns `Err` (always with [`ChannelError::Ignore`]) if the HTLC could not be failed (e.g. + /// if it was already resolved). Otherwise returns `Ok`. fn fail_htlc( &mut self, htlc_id_arg: u64, err_contents: E, mut force_holding_cell: bool, logger: &L @@ -4648,12 +4611,8 @@ impl Channel where if htlc.htlc_id == htlc_id_arg { match htlc.state { InboundHTLCState::Committed => {}, - InboundHTLCState::LocalRemoved(ref reason) => { - if let &InboundHTLCRemovalReason::Fulfill(_) = reason { - } else { - debug_assert!(false, "Tried to fail an HTLC that was already failed"); - } - return Ok(None); + InboundHTLCState::LocalRemoved(_) => { + return Err(ChannelError::Ignore(format!("HTLC {} was already resolved", htlc.htlc_id))); }, _ => { debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to"); @@ -4664,11 +4623,7 @@ impl Channel where } } if pending_idx == core::usize::MAX { - #[cfg(any(test, fuzzing))] - // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this - // is simply a duplicate fail, not previously failed and we failed-back too early. - debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); - return Ok(None); + return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc_id_arg))); } if !self.context.channel_state.can_generate_new_commitment() { @@ -4682,17 +4637,14 @@ impl Channel where match pending_update { &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => { if htlc_id_arg == htlc_id { - #[cfg(any(test, fuzzing))] - debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg)); - return Ok(None); + return Err(ChannelError::Ignore(format!("HTLC {} was already claimed!", htlc_id))); } }, &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } | &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => { if htlc_id_arg == htlc_id { - debug_assert!(false, "Tried to fail an HTLC that was already failed"); - return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned())); + return Err(ChannelError::Ignore(format!("HTLC {} was already pending failure", htlc_id))); } }, _ => {} @@ -9543,13 +9495,6 @@ impl Writeable for Channel where SP::Target: SignerProvider { self.context.channel_update_status.write(writer)?; - #[cfg(any(test, fuzzing))] - (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?; - #[cfg(any(test, fuzzing))] - for htlc in self.context.historical_inbound_htlc_fulfills.iter() { - htlc.write(writer)?; - } - // If the channel type is something other than only-static-remote-key, then we need to have // older clients fail to deserialize this channel at all. If the type is // only-static-remote-key, we simply consider it "default" and don't write the channel type @@ -9883,16 +9828,6 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch let channel_update_status = Readable::read(reader)?; - #[cfg(any(test, fuzzing))] - let mut historical_inbound_htlc_fulfills = new_hash_set(); - #[cfg(any(test, fuzzing))] - { - let htlc_fulfills_len: u64 = Readable::read(reader)?; - for _ in 0..htlc_fulfills_len { - assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?)); - } - } - let pending_update_fee = if let Some(feerate) = pending_update_fee_value { Some((feerate, if channel_parameters.is_outbound_from_holder { FeeUpdateState::Outbound @@ -10233,9 +10168,6 @@ impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c Ch channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true), channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true), - #[cfg(any(test, fuzzing))] - historical_inbound_htlc_fulfills, - channel_type: channel_type.unwrap(), channel_keys_id, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b4003a7a50e..6c8f3139c6a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6047,7 +6047,6 @@ where // fail-backs are best-effort, we probably already have one // pending, and if not that's OK, if not, the channel is on // the chain and sending the HTLC-Timeout is their problem. - continue; } } } From d5fed87bd9e8f31afbe89207830f55fcd1d662c3 Mon Sep 17 00:00:00 2001 From: Alec Chen Date: Tue, 21 Jan 2025 22:03:01 +0000 Subject: [PATCH 032/105] Fail HTLC backwards before upstream claims on-chain Fail inbound HTLCs if they expire within a certain number of blocks from the current height. If we haven't seen the preimage for an HTLC by the time the previous hop's timeout expires, we've lost that HTLC, so we might as well fail it back instead of having our counterparty force-close the channel. Co-authored-by: Matt Corallo --- lightning/src/chain/channelmonitor.rs | 74 ++++++++++++ lightning/src/ln/functional_tests.rs | 158 ++++++++++++++++++++++++-- 2 files changed, 224 insertions(+), 8 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 62207eeafbb..7cf3a320508 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1023,6 +1023,12 @@ pub(crate) struct ChannelMonitorImpl { /// The first block height at which we had no remaining claimable balances. balances_empty_height: Option, + + /// In-memory only HTLC ids used to track upstream HTLCs that have been failed backwards due to + /// a downstream channel force-close remaining unconfirmed by the time the upstream timeout + /// expires. This is used to tell us we already generated an event to fail this HTLC back + /// during a previous block scan. + failed_back_htlc_ids: HashSet, } /// Transaction outputs to watch for on-chain spends. @@ -1445,6 +1451,8 @@ impl ChannelMonitor { counterparty_node_id: Some(counterparty_node_id), initial_counterparty_commitment_info: None, balances_empty_height: None, + + failed_back_htlc_ids: new_hash_set(), }) } @@ -4221,6 +4229,71 @@ impl ChannelMonitorImpl { } } + if self.lockdown_from_offchain || self.funding_spend_seen || self.holder_tx_signed { + // Fail back HTLCs on backwards channels if they expire within + // `LATENCY_GRACE_PERIOD_BLOCKS` blocks and the channel is closed (i.e. we're at a + // point where no further off-chain updates will be accepted). If we haven't seen the + // preimage for an HTLC by the time the previous hop's timeout expires, we've lost that + // HTLC, so we might as well fail it back instead of having our counterparty force-close + // the inbound channel. + let current_holder_htlcs = self.current_holder_commitment_tx.htlc_outputs.iter() + .map(|&(ref a, _, ref b)| (a, b.as_ref())); + + let current_counterparty_htlcs = if let Some(txid) = self.current_counterparty_commitment_txid { + if let Some(htlc_outputs) = self.counterparty_claimable_outpoints.get(&txid) { + Some(htlc_outputs.iter().map(|&(ref a, ref b)| (a, b.as_ref().map(|boxed| &**boxed)))) + } else { None } + } else { None }.into_iter().flatten(); + + let prev_counterparty_htlcs = if let Some(txid) = self.prev_counterparty_commitment_txid { + if let Some(htlc_outputs) = self.counterparty_claimable_outpoints.get(&txid) { + Some(htlc_outputs.iter().map(|&(ref a, ref b)| (a, b.as_ref().map(|boxed| &**boxed)))) + } else { None } + } else { None }.into_iter().flatten(); + + let htlcs = current_holder_htlcs + .chain(current_counterparty_htlcs) + .chain(prev_counterparty_htlcs); + + let height = self.best_block.height; + for (htlc, source_opt) in htlcs { + // Only check forwarded HTLCs' previous hops + let source = match source_opt { + Some(source) => source, + None => continue, + }; + let inbound_htlc_expiry = match source.inbound_htlc_expiry() { + Some(cltv_expiry) => cltv_expiry, + None => continue, + }; + let max_expiry_height = height.saturating_add(LATENCY_GRACE_PERIOD_BLOCKS); + if inbound_htlc_expiry > max_expiry_height { + continue; + } + let duplicate_event = self.pending_monitor_events.iter().any( + |update| if let &MonitorEvent::HTLCEvent(ref upd) = update { + upd.source == *source + } else { false }); + if duplicate_event { + continue; + } + if !self.failed_back_htlc_ids.insert(SentHTLCId::from_source(source)) { + continue; + } + if !duplicate_event { + log_error!(logger, "Failing back HTLC {} upstream to preserve the \ + channel as the forward HTLC hasn't resolved and our backward HTLC \ + expires soon at {}", log_bytes!(htlc.payment_hash.0), inbound_htlc_expiry); + self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate { + source: source.clone(), + payment_preimage: None, + payment_hash: htlc.payment_hash, + htlc_value_satoshis: Some(htlc.amount_msat / 1000), + })); + } + } + } + let conf_target = self.closure_conf_target(); self.onchain_tx_handler.update_claims_view_from_requests(claimable_outpoints, conf_height, self.best_block.height, broadcaster, conf_target, fee_estimator, logger); self.onchain_tx_handler.update_claims_view_from_matched_txn(&txn_matched, conf_height, conf_hash, self.best_block.height, broadcaster, conf_target, fee_estimator, logger); @@ -5066,6 +5139,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP counterparty_node_id, initial_counterparty_commitment_info, balances_empty_height, + failed_back_htlc_ids: new_hash_set(), }))) } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 9b7a6c83f18..b29ee99e077 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -2277,6 +2277,138 @@ fn channel_reserve_in_flight_removes() { claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3); } +enum PostFailBackAction { + TimeoutOnChain, + ClaimOnChain, + FailOffChain, + ClaimOffChain, +} + +#[test] +fn test_fail_back_before_backwards_timeout() { + do_test_fail_back_before_backwards_timeout(PostFailBackAction::TimeoutOnChain); + do_test_fail_back_before_backwards_timeout(PostFailBackAction::ClaimOnChain); + do_test_fail_back_before_backwards_timeout(PostFailBackAction::FailOffChain); + do_test_fail_back_before_backwards_timeout(PostFailBackAction::ClaimOffChain); +} + +fn do_test_fail_back_before_backwards_timeout(post_fail_back_action: PostFailBackAction) { + // Test that we fail an HTLC upstream if we are still waiting for confirmation downstream + // just before the upstream timeout expires + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + for node in nodes.iter() { + *node.fee_estimator.sat_per_kw.lock().unwrap() = 2000; + } + + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + create_announced_chan_between_nodes(&nodes, 0, 1); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); + + // Start every node on the same block height to make reasoning about timeouts easier + connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); + + let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3_000_000); + + // Force close the B<->C channel by timing out the HTLC + let timeout_blocks = TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1; + connect_blocks(&nodes[1], timeout_blocks); + let node_1_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT); + check_closed_event(&nodes[1], 1, ClosureReason::HTLCsTimedOut, false, &[node_c_id], 100_000); + check_closed_broadcast(&nodes[1], 1, true); + check_added_monitors(&nodes[1], 1); + + // After the A<->B HTLC gets within LATENCY_GRACE_PERIOD_BLOCKS we will fail the HTLC to avoid + // the channel force-closing. Note that we already connected `TEST_FINAL_CLTV + + // LATENCY_GRACE_PERIOD_BLOCKS` blocks above, so we subtract that from the HTLC expiry (which + // is `TEST_FINAL_CLTV` + `MIN_CLTV_EXPIRY_DELTA`). + let upstream_timeout_blocks = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS * 2; + connect_blocks(&nodes[1], upstream_timeout_blocks); + + // Connect blocks for nodes[0] to make sure they don't go on-chain + connect_blocks(&nodes[0], timeout_blocks + upstream_timeout_blocks); + + // Check that nodes[1] fails the HTLC upstream + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], + vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_2.2 + }]); + check_added_monitors!(nodes[1], 1); + let htlc_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); + let msgs::CommitmentUpdate { update_fail_htlcs, commitment_signed, .. } = htlc_updates; + + nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false); + expect_payment_failed_conditions(&nodes[0], payment_hash, false, + PaymentFailedConditions::new().blamed_chan_closed(true)); + + // Make sure we handle possible duplicate fails or extra messages after failing back + match post_fail_back_action { + PostFailBackAction::TimeoutOnChain => { + // Confirm nodes[1]'s claim with timeout, make sure we don't fail upstream again + mine_transaction(&nodes[1], &node_1_txn[0]); // Commitment + mine_transaction(&nodes[1], &node_1_txn[1]); // HTLC timeout + connect_blocks(&nodes[1], ANTI_REORG_DELAY); + // Expect handling another fail back event, but the HTLC is already gone + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], + vec![HTLCDestination::NextHopChannel { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_2.2 + }]); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }, + PostFailBackAction::ClaimOnChain => { + nodes[2].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); + check_added_monitors!(nodes[2], 1); + get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); + + connect_blocks(&nodes[2], TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + 2); + let node_2_txn = test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::SUCCESS); + check_closed_broadcast!(nodes[2], true); + check_closed_event(&nodes[2], 1, ClosureReason::HTLCsTimedOut, false, &[node_b_id], 100_000); + check_added_monitors!(nodes[2], 1); + + mine_transaction(&nodes[1], &node_2_txn[0]); // Commitment + mine_transaction(&nodes[1], &node_2_txn[1]); // HTLC success + connect_blocks(&nodes[1], ANTI_REORG_DELAY); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }, + PostFailBackAction::FailOffChain => { + nodes[2].node.fail_htlc_backwards(&payment_hash); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], + vec![HTLCDestination::FailedPayment { payment_hash }]); + check_added_monitors!(nodes[2], 1); + let commitment_update = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); + let update_fail = commitment_update.update_fail_htlcs[0].clone(); + + nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &update_fail); + let err_msg = get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id()); + assert_eq!(err_msg.channel_id, chan_2.2); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }, + PostFailBackAction::ClaimOffChain => { + nodes[2].node.claim_funds(payment_preimage); + expect_payment_claimed!(nodes[2], payment_hash, 3_000_000); + check_added_monitors!(nodes[2], 1); + let commitment_update = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); + let update_fulfill = commitment_update.update_fulfill_htlcs[0].clone(); + + nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &update_fulfill); + let err_msg = get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id()); + assert_eq!(err_msg.channel_id, chan_2.2); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + }, + }; +} + #[test] fn channel_monitor_network_test() { // Simple test which builds a network of ChannelManagers, connects them to each other, and @@ -2381,7 +2513,7 @@ fn channel_monitor_network_test() { let node2_commitment_txid; { let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::NONE); - connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + MIN_CLTV_EXPIRY_DELTA as u32 + 1); + connect_blocks(&nodes[2], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS); test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT); node2_commitment_txid = node_txn[0].compute_txid(); @@ -3319,8 +3451,8 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence mine_transaction(&nodes[1], &commitment_tx[0]); - check_closed_event!(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false - , [nodes[2].node.get_our_node_id()], 100000); + check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, + &[nodes[2].node.get_our_node_id()], 100000); let htlc_expiry = get_monitor!(nodes[1], chan_2.2).get_claimable_balances().iter().filter_map(|bal| if let Balance::MaybeTimeoutClaimableHTLC { claimable_height, .. } = bal { Some(*claimable_height) @@ -9792,6 +9924,8 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); *nodes[0].connect_style.borrow_mut() = ConnectStyle::BestBlockFirstSkippingBlocks; + let node_c_id = nodes[2].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let (chan_announce, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 1, 2); let (_, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); @@ -9807,7 +9941,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t let conf_height = nodes[1].best_block_info().1; if !test_height_before_timelock { - connect_blocks(&nodes[1], 24 * 6); + connect_blocks(&nodes[1], TEST_FINAL_CLTV - LATENCY_GRACE_PERIOD_BLOCKS); } nodes[1].chain_monitor.chain_monitor.transactions_confirmed( &nodes[1].get_block_header(conf_height), &[(0, &node_txn[0])], conf_height); @@ -9826,10 +9960,6 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t &spending_txn[0] }; check_spends!(htlc_tx, node_txn[0]); - // We should also generate a SpendableOutputs event with the to_self output (as its - // timelock is up). - let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); - assert_eq!(descriptor_spend_txn.len(), 1); // If we also discover that the HTLC-Timeout transaction was confirmed some time ago, we // should immediately fail-backwards the HTLC to the previous hop, without waiting for an @@ -9848,6 +9978,18 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true); expect_payment_failed_with_update!(nodes[0], payment_hash, false, chan_announce.contents.short_channel_id, true); + + // We should also generate a SpendableOutputs event with the to_self output (once the + // timelock is up). + connect_blocks(&nodes[1], (BREAKDOWN_TIMEOUT as u32) - TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS - 1); + let descriptor_spend_txn = check_spendable_outputs!(nodes[1], node_cfgs[1].keys_manager); + assert_eq!(descriptor_spend_txn.len(), 1); + + // When the HTLC times out on the A<->B edge, the B<->C channel will fail the HTLC back to + // avoid the A<->B channel closing (even though it already has). This will generate a + // spurious HTLCHandlingFailed event. + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], + vec![HTLCDestination::NextHopChannel { node_id: Some(node_c_id), channel_id }]); } } From c65d12d30739f72f57a025d97a8c98997fb124ff Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 21 Jan 2025 21:53:01 +0000 Subject: [PATCH 033/105] Fail all `ChannelMonitorUpdate`s after `holder_tx_signed` If we've signed the latest holder tx (i.e. we've force-closed and broadcasted our state), there's not much reason to accept counterparty-transaction-updating `ChannelMonitorUpdate`s, we should make sure the `ChannelManager` fails the channel as soon as possible. This standardizes the failure cases to also match those added to the previous commit, which makes things a bit more readable. --- lightning/src/chain/channelmonitor.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 7cf3a320508..e1f47060fbe 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -3282,7 +3282,7 @@ impl ChannelMonitorImpl { } } - if ret.is_ok() && (self.funding_spend_seen || self.lockdown_from_offchain) && is_pre_close_update { + if ret.is_ok() && (self.funding_spend_seen || self.lockdown_from_offchain || self.holder_tx_signed) && is_pre_close_update { log_error!(logger, "Refusing Channel Monitor Update as counterparty attempted to update commitment after funding was spent"); Err(()) } else { ret } From 15a567d5754ca438d3966878fea7d4ed10a340ed Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 24 Jan 2025 09:44:40 +0100 Subject: [PATCH 034/105] Introduce `SpendableOutputDescriptor::outpoint` accessor --- lightning/src/sign/mod.rs | 9 +++++++++ lightning/src/util/sweep.rs | 8 +------- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index 2be0cb39f4f..7aa41531ce2 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -538,6 +538,15 @@ impl SpendableOutputDescriptor { }; Ok((psbt, expected_max_weight)) } + + /// Returns the outpoint of the spendable output. + pub fn outpoint(&self) -> OutPoint { + match self { + Self::StaticOutput { outpoint, .. } => *outpoint, + Self::StaticPaymentOutput(descriptor) => descriptor.outpoint, + Self::DelayedPaymentOutput(descriptor) => descriptor.outpoint, + } + } } /// The parameters required to derive a channel signer via [`SignerProvider`]. diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index b61306194df..78acef9d727 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -71,13 +71,7 @@ impl TrackedSpendableOutput { /// Returns whether the output is spent in the given transaction. pub fn is_spent_in(&self, tx: &Transaction) -> bool { - let prev_outpoint = match &self.descriptor { - SpendableOutputDescriptor::StaticOutput { outpoint, .. } => *outpoint, - SpendableOutputDescriptor::DelayedPaymentOutput(output) => output.outpoint, - SpendableOutputDescriptor::StaticPaymentOutput(output) => output.outpoint, - } - .into_bitcoin_outpoint(); - + let prev_outpoint = self.descriptor.outpoint().into_bitcoin_outpoint(); tx.input.iter().any(|input| input.previous_output == prev_outpoint) } } From 6712b4e24ed6df03fb91b16b51c6a08d9543422e Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 24 Jan 2025 10:57:28 +0100 Subject: [PATCH 035/105] Prefactor: Make monior archival delay a `pub const` .. previously we just used the 4032 magic number, here we put it in a `pub const` that is reusable elsewhere. --- lightning/src/chain/channelmonitor.rs | 16 ++++++++++------ lightning/src/ln/monitor_tests.rs | 18 +++++++++--------- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index e1f47060fbe..8788fbb894d 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -256,6 +256,10 @@ pub(crate) const LATENCY_GRACE_PERIOD_BLOCKS: u32 = 3; // solved by a previous claim tx. What we want to avoid is reorg evicting our claim tx and us not // keep bumping another claim tx to solve the outpoint. pub const ANTI_REORG_DELAY: u32 = 6; +/// Number of blocks we wait before assuming a [`ChannelMonitor`] to be fully resolved and +/// considering it be safely archived. +// 4032 blocks are roughly four weeks +pub const ARCHIVAL_DELAY_BLOCKS: u32 = 4032; /// Number of blocks before confirmation at which we fail back an un-relayed HTLC or at which we /// refuse to accept a new HTLC. /// @@ -2023,10 +2027,11 @@ impl ChannelMonitor { /// /// This function returns a tuple of two booleans, the first indicating whether the monitor is /// fully resolved, and the second whether the monitor needs persistence to ensure it is - /// reliably marked as resolved within 4032 blocks. + /// reliably marked as resolved within [`ARCHIVAL_DELAY_BLOCKS`] blocks. /// - /// The first boolean is true only if [`Self::get_claimable_balances`] has been empty for at least - /// 4032 blocks as an additional protection against any bugs resulting in spuriously empty balance sets. + /// The first boolean is true only if [`Self::get_claimable_balances`] has been empty for at + /// least [`ARCHIVAL_DELAY_BLOCKS`] blocks as an additional protection against any bugs + /// resulting in spuriously empty balance sets. pub fn check_and_update_full_resolution_status(&self, logger: &L) -> (bool, bool) { let mut is_all_funds_claimed = self.get_claimable_balances().is_empty(); let current_height = self.current_best_block().height; @@ -2042,11 +2047,10 @@ impl ChannelMonitor { // once processed, implies the preimage exists in the corresponding inbound channel. let preimages_not_needed_elsewhere = inner.pending_monitor_events.is_empty(); - const BLOCKS_THRESHOLD: u32 = 4032; // ~four weeks match (inner.balances_empty_height, is_all_funds_claimed, preimages_not_needed_elsewhere) { (Some(balances_empty_height), true, true) => { // Claimed all funds, check if reached the blocks threshold. - (current_height >= balances_empty_height + BLOCKS_THRESHOLD, false) + (current_height >= balances_empty_height + ARCHIVAL_DELAY_BLOCKS, false) }, (Some(_), false, _)|(Some(_), _, false) => { // previously assumed we claimed all funds, but we have new funds to claim or @@ -2066,7 +2070,7 @@ impl ChannelMonitor { // None. It is set to the current block height. log_debug!(logger, "ChannelMonitor funded at {} is now fully resolved. It will become archivable in {} blocks", - inner.get_funding_txo().0, BLOCKS_THRESHOLD); + inner.get_funding_txo().0, ARCHIVAL_DELAY_BLOCKS); inner.balances_empty_height = Some(current_height); (false, true) }, diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 9556e988b4e..e2c76643348 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -10,7 +10,7 @@ //! Further functional tests which test blockchain reorganizations. use crate::sign::{ecdsa::EcdsaChannelSigner, OutputSpender, SpendableOutputDescriptor}; -use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS, Balance, BalanceSource, ChannelMonitorUpdateStep}; +use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ARCHIVAL_DELAY_BLOCKS,LATENCY_GRACE_PERIOD_BLOCKS, Balance, BalanceSource, ChannelMonitorUpdateStep}; use crate::chain::transaction::OutPoint; use crate::chain::chaininterface::{ConfirmationTarget, LowerBoundedFeeEstimator, compute_feerate_sat_per_1000_weight}; use crate::events::bump_transaction::{BumpTransactionEvent, WalletSource}; @@ -246,19 +246,19 @@ fn archive_fully_resolved_monitors() { // At this point, both nodes have no more `Balance`s, but nodes[0]'s `ChannelMonitor` still // hasn't had the `MonitorEvent` that contains the preimage claimed by the `ChannelManager`. - // Thus, calling `archive_fully_resolved_channel_monitors` and waiting 4032 blocks will not - // result in the `ChannelMonitor` being archived. + // Thus, calling `archive_fully_resolved_channel_monitors` and waiting `ARCHIVAL_DELAY_BLOCKS` + // blocks will not result in the `ChannelMonitor` being archived. nodes[0].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors(); assert_eq!(nodes[0].chain_monitor.chain_monitor.list_monitors().len(), 1); - connect_blocks(&nodes[0], 4032); + connect_blocks(&nodes[0], ARCHIVAL_DELAY_BLOCKS); nodes[0].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors(); assert_eq!(nodes[0].chain_monitor.chain_monitor.list_monitors().len(), 1); - // ...however, nodes[1]'s `ChannelMonitor` is ready to be archived, and will be in exactly 4032 - // blocks. + // ...however, nodes[1]'s `ChannelMonitor` is ready to be archived, and will be in exactly + // `ARCHIVAL_DELAY_BLOCKS` blocks. nodes[1].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors(); assert_eq!(nodes[1].chain_monitor.chain_monitor.list_monitors().len(), 1); - connect_blocks(&nodes[1], 4031); + connect_blocks(&nodes[1], ARCHIVAL_DELAY_BLOCKS - 1); nodes[1].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors(); assert_eq!(nodes[1].chain_monitor.chain_monitor.list_monitors().len(), 1); connect_blocks(&nodes[1], 1); @@ -266,11 +266,11 @@ fn archive_fully_resolved_monitors() { assert_eq!(nodes[1].chain_monitor.chain_monitor.list_monitors().len(), 0); // Finally, we process the pending `MonitorEvent` from nodes[0], allowing the `ChannelMonitor` - // to be archived 4032 blocks later. + // to be archived `ARCHIVAL_DELAY_BLOCKS` blocks later. expect_payment_sent(&nodes[0], payment_preimage, None, true, false); nodes[0].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors(); assert_eq!(nodes[0].chain_monitor.chain_monitor.list_monitors().len(), 1); - connect_blocks(&nodes[0], 4031); + connect_blocks(&nodes[0], ARCHIVAL_DELAY_BLOCKS - 1); nodes[0].chain_monitor.chain_monitor.archive_fully_resolved_channel_monitors(); assert_eq!(nodes[0].chain_monitor.chain_monitor.list_monitors().len(), 1); connect_blocks(&nodes[0], 1); From 8c49359ef5a067e6f9f9d31de09d2a7d5cc0f80b Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 24 Jan 2025 11:14:04 +0100 Subject: [PATCH 036/105] `OutputSweeper`: Delay pruning until monitors have likely been archived Previously, we would prune tracked descriptors once we see a spend hit `ANTI_REORG_DELAY = 6` confirmations. However, this could lead to a scenario where lingering `ChannelMonitor`s waiting to be archived would still regenerate and replay `Event::SpendableOutput`s, i.e., we would re-add the same (now unspendable due to be actually being already spent) outputs again after having intially pruned them. Here, we therefore keep the tracked descriptors around for longer, in particular at least `ARCHIVAL_DELAY_BLOCKS + ANTI_REORG_DELAY = 4038` confirmations, at which point we assume the lingering monitors to have been likely archived, and it's 'safe' for us to also forget about the descriptors. --- lightning-background-processor/src/lib.rs | 6 +++--- lightning/src/util/sweep.rs | 15 ++++++++++++--- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index af7c7ffb003..adcae564f56 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -1099,7 +1099,7 @@ mod tests { SCORER_PERSISTENCE_SECONDARY_NAMESPACE, }; use lightning::util::ser::Writeable; - use lightning::util::sweep::{OutputSpendStatus, OutputSweeper}; + use lightning::util::sweep::{OutputSpendStatus, OutputSweeper, PRUNE_DELAY_BLOCKS}; use lightning::util::test_utils; use lightning::{get_event, get_event_msg}; use lightning_persister::fs_store::FilesystemStore; @@ -2282,8 +2282,8 @@ mod tests { } // Check we stop tracking the spendable outputs when one of the txs reaches - // ANTI_REORG_DELAY confirmations. - confirm_transaction_depth(&mut nodes[0], &sweep_tx_0, ANTI_REORG_DELAY); + // PRUNE_DELAY_BLOCKS confirmations. + confirm_transaction_depth(&mut nodes[0], &sweep_tx_0, PRUNE_DELAY_BLOCKS); assert_eq!(nodes[0].sweeper.tracked_spendable_outputs().len(), 0); if !std::thread::panicking() { diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index 78acef9d727..0022e5286d2 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -9,7 +9,7 @@ //! sweeping them. use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; -use crate::chain::channelmonitor::ANTI_REORG_DELAY; +use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ARCHIVAL_DELAY_BLOCKS}; use crate::chain::{self, BestBlock, Confirm, Filter, Listen, WatchedOutput}; use crate::io; use crate::ln::msgs::DecodeError; @@ -32,6 +32,9 @@ use bitcoin::{BlockHash, Transaction, Txid}; use core::ops::Deref; +/// The number of blocks we wait before we prune the tracked spendable outputs. +pub const PRUNE_DELAY_BLOCKS: u32 = ARCHIVAL_DELAY_BLOCKS + ANTI_REORG_DELAY; + /// The state of a spendable output currently tracked by an [`OutputSweeper`]. #[derive(Clone, Debug, PartialEq, Eq)] pub struct TrackedSpendableOutput { @@ -101,7 +104,11 @@ pub enum OutputSpendStatus { latest_spending_tx: Transaction, }, /// A transaction spending the output has been confirmed on-chain but will be tracked until it - /// reaches [`ANTI_REORG_DELAY`] confirmations. + /// reaches at least [`PRUNE_DELAY_BLOCKS`] confirmations to ensure [`Event::SpendableOutputs`] + /// stemming from lingering [`ChannelMonitor`]s can safely be replayed. + /// + /// [`Event::SpendableOutputs`]: crate::events::Event::SpendableOutputs + /// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor PendingThresholdConfirmations { /// The hash of the chain tip when we first broadcast a transaction spending this output. first_broadcast_hash: BlockHash, @@ -524,7 +531,9 @@ where // Prune all outputs that have sufficient depth by now. sweeper_state.outputs.retain(|o| { if let Some(confirmation_height) = o.status.confirmation_height() { - if cur_height >= confirmation_height + ANTI_REORG_DELAY - 1 { + // We wait at least `PRUNE_DELAY_BLOCKS` as before that + // `Event::SpendableOutputs` from lingering monitors might get replayed. + if cur_height >= confirmation_height + PRUNE_DELAY_BLOCKS - 1 { log_debug!(self.logger, "Pruning swept output as sufficiently confirmed via spend in transaction {:?}. Pruned descriptor: {:?}", o.status.latest_spending_tx().map(|t| t.compute_txid()), o.descriptor From 941ed6d494d63f7c242b9019f41130a5b675f8d3 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 28 Jan 2025 16:54:53 +0000 Subject: [PATCH 037/105] Add CHANGELOG entry for 0.1.1 --- CHANGELOG.md | 36 ++++++++++++++++++++ pending_changelog/3531-buggy-router-leak.txt | 4 --- 2 files changed, 36 insertions(+), 4 deletions(-) delete mode 100644 pending_changelog/3531-buggy-router-leak.txt diff --git a/CHANGELOG.md b/CHANGELOG.md index a35f38d05a6..be12be58d1e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,39 @@ +# 0.1.1 - Jan 28, 2025 - "Onchain Matters" + +## API Updates + * A `ChannelManager::send_payment_with_route` was (re-)added, with semantics + similar to `ChannelManager::send_payment` (rather than like the pre-0.1 + `send_payent_with_route`, #3534). + * `RawBolt11Invoice::{to,from}_raw` were added (#3549). + +## Bug Fixes + * HTLCs which were forwarded where the inbound edge times out within the next + three blocks will have the inbound HTLC failed backwards irrespective of the + status of the outbound HTLC. This avoids the peer force-closing the channel + (and claiming the inbound edge HTLC on-chain) even if we have not yet managed + to claim the outbound edge on chain (#3556). + * On restart, replay of `Event::SpendableOutput`s could have caused + `OutputSweeper` to generate double-spending transactions, making it unable to + claim any delayed claims. This was resolved by retaining old claims for more + than four weeks after they are claimed on-chain to detect replays (#3559). + * Fixed the additional feerate we will pay each time we RBF on-chain claims to + match the Bitcoin Core policy (1 sat/vB) instead of 16 sats/vB (#3457). + * Fixed a cased where a custom `Router` which returns an invalid `Route`, + provided to `ChannelManager`, can result in an outbound payment remaining + pending forever despite no HTLCs being pending (#3531). + +## Security +0.1.1 fixes a denial-of-service vulnerability allowing channel counterparties to +cause force-closure of unrelated channels. + * If a malicious channel counterparty force-closes a channel, broadcasting a + revoked commitment transaction while the channel at closure time included + multiple non-dust forwarded outbound HTLCs with identical payment hashes and + amounts, failure to fail the HTLCs backwards could cause the channels on + which we recieved the corresponding inbound HTLCs to be force-closed. Note + that we'll receive, at a minimum, the malicious counterparty's reserve value + when they broadcast the stale commitment (#3556). Thanks to Matt Morehouse for + reporting this issue. + # 0.1 - Jan 15, 2025 - "Human Readable Version Numbers" The LDK 0.1 release represents an important milestone for the LDK project. While diff --git a/pending_changelog/3531-buggy-router-leak.txt b/pending_changelog/3531-buggy-router-leak.txt deleted file mode 100644 index 72714aa8a8b..00000000000 --- a/pending_changelog/3531-buggy-router-leak.txt +++ /dev/null @@ -1,4 +0,0 @@ -## Bug Fixes - -* Fixed a rare case where a custom router returning a buggy route could result in holding onto a - pending payment forever and in some cases failing to generate a PaymentFailed event (#3531). From 97c2dcd33de979a54b64317db54a35f99e249785 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 28 Jan 2025 16:57:19 +0000 Subject: [PATCH 038/105] Bump `lightning` to 0.1.1, `lightning-invoice` to 0.33.1 --- lightning-invoice/Cargo.toml | 2 +- lightning/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lightning-invoice/Cargo.toml b/lightning-invoice/Cargo.toml index ff8d88c5f63..f4629c1ad5c 100644 --- a/lightning-invoice/Cargo.toml +++ b/lightning-invoice/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lightning-invoice" description = "Data structures to parse and serialize BOLT11 lightning invoices" -version = "0.33.0" +version = "0.33.1" authors = ["Sebastian Geisler "] documentation = "https://docs.rs/lightning-invoice/" license = "MIT OR Apache-2.0" diff --git a/lightning/Cargo.toml b/lightning/Cargo.toml index 3e46c996147..6417d231f9e 100644 --- a/lightning/Cargo.toml +++ b/lightning/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning" -version = "0.1.0" +version = "0.1.1" authors = ["Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning/" From 2c2decf39fe2de9390d6de358060e0307dee7488 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 19 Feb 2025 15:03:15 +0000 Subject: [PATCH 039/105] Stop checking `ChannelLiquidity`'s in-memory size We generally expect `ChannelLiquidity` to be exactly three cache lines to ensure the first bytes we need are all one one cache line. This improves performance very marginally on some machines, but the assertions that this is true do not work on some Android 32-bit machines due to differing `Duration` sizes. Here we simply remove the assertions to fix build on platforms where the struct size isn't exactly on cache lines. This may marginally harm performance but it shouldn't be that critical. Fixes #3415 --- lightning/src/routing/scoring.rs | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/lightning/src/routing/scoring.rs b/lightning/src/routing/scoring.rs index cec2416422b..0755a21e1ae 100644 --- a/lightning/src/routing/scoring.rs +++ b/lightning/src/routing/scoring.rs @@ -821,16 +821,6 @@ struct ChannelLiquidity { offset_history_last_updated: Duration, } -// Check that the liquidity HashMap's entries sit on round cache lines. -// -// Specifically, the first cache line will have the key, the liquidity offsets, and the total -// points tracked in the historical tracker. -// -// The next two cache lines will have the historical points, which we only access last during -// scoring, followed by the last_updated `Duration`s (which we do not need during scoring). -const _LIQUIDITY_MAP_SIZING_CHECK: usize = 192 - ::core::mem::size_of::<(u64, ChannelLiquidity)>(); -const _LIQUIDITY_MAP_SIZING_CHECK_2: usize = ::core::mem::size_of::<(u64, ChannelLiquidity)>() - 192; - /// A snapshot of [`ChannelLiquidity`] in one direction assuming a certain channel capacity. struct DirectedChannelLiquidity, HT: Deref, T: Deref> { min_liquidity_offset_msat: L, From e8a426cf42c99000c17ab7c11e68a1879b9bf902 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Tue, 28 Jan 2025 15:00:27 -0600 Subject: [PATCH 040/105] Use fixed-size Vec allocations for BOLT12 messages Instead of using elaborate calculations to determine the exact amount of bytes need for a BOLT12 message are allocated, use a fixed size amount. This reduces the code complexity and potentially reduces heap fragmentation in the normal case. --- lightning/src/offers/invoice.rs | 53 ++++++------------------- lightning/src/offers/invoice_request.rs | 41 +++++-------------- lightning/src/offers/merkle.rs | 8 +--- lightning/src/offers/offer.rs | 3 +- lightning/src/offers/refund.rs | 3 +- lightning/src/offers/static_invoice.rs | 33 +++------------ 6 files changed, 34 insertions(+), 107 deletions(-) diff --git a/lightning/src/offers/invoice.rs b/lightning/src/offers/invoice.rs index 75095e058e7..25a722f73ad 100644 --- a/lightning/src/offers/invoice.rs +++ b/lightning/src/offers/invoice.rs @@ -122,7 +122,7 @@ use crate::offers::invoice_macros::{invoice_accessors_common, invoice_builder_me #[cfg(test)] use crate::offers::invoice_macros::invoice_builder_methods_test_common; use crate::offers::invoice_request::{EXPERIMENTAL_INVOICE_REQUEST_TYPES, ExperimentalInvoiceRequestTlvStream, ExperimentalInvoiceRequestTlvStreamRef, INVOICE_REQUEST_PAYER_ID_TYPE, INVOICE_REQUEST_TYPES, IV_BYTES as INVOICE_REQUEST_IV_BYTES, InvoiceRequest, InvoiceRequestContents, InvoiceRequestTlvStream, InvoiceRequestTlvStreamRef}; -use crate::offers::merkle::{SignError, SignFn, SignatureTlvStream, SignatureTlvStreamRef, TaggedHash, TlvStream, self, SIGNATURE_TLV_RECORD_SIZE}; +use crate::offers::merkle::{SignError, SignFn, SignatureTlvStream, SignatureTlvStreamRef, TaggedHash, TlvStream, self}; use crate::offers::nonce::Nonce; use crate::offers::offer::{Amount, EXPERIMENTAL_OFFER_TYPES, ExperimentalOfferTlvStream, ExperimentalOfferTlvStreamRef, OFFER_TYPES, OfferTlvStream, OfferTlvStreamRef, Quantity}; use crate::offers::parse::{Bolt12ParseError, Bolt12SemanticError, ParsedMessage}; @@ -520,19 +520,8 @@ impl UnsignedBolt12Invoice { let (_, _, _, invoice_tlv_stream, _, _, experimental_invoice_tlv_stream) = contents.as_tlv_stream(); - // Allocate enough space for the invoice, which will include: - // - all TLV records from `invreq_bytes` except signatures, - // - all invoice-specific TLV records, and - // - a signature TLV record once the invoice is signed. - // - // This assumes both the invoice request and the invoice will each only have one signature - // using SIGNATURE_TYPES.start as the TLV record. Thus, it is accounted for by invreq_bytes. - let mut bytes = Vec::with_capacity( - invreq_bytes.len() - + invoice_tlv_stream.serialized_length() - + if contents.is_for_offer() { 0 } else { SIGNATURE_TLV_RECORD_SIZE } - + experimental_invoice_tlv_stream.serialized_length(), - ); + const INVOICE_ALLOCATION_SIZE: usize = 1024; + let mut bytes = Vec::with_capacity(INVOICE_ALLOCATION_SIZE); // Use the invoice_request bytes instead of the invoice_request TLV stream as the latter may // have contained unknown TLV records, which are not stored in `InvoiceRequestContents` or @@ -545,23 +534,16 @@ impl UnsignedBolt12Invoice { invoice_tlv_stream.write(&mut bytes).unwrap(); - let mut experimental_tlv_stream = TlvStream::new(remaining_bytes) - .range(EXPERIMENTAL_TYPES) - .peekable(); - let mut experimental_bytes = Vec::with_capacity( - remaining_bytes.len() - - experimental_tlv_stream - .peek() - .map_or(remaining_bytes.len(), |first_record| first_record.start) - + experimental_invoice_tlv_stream.serialized_length(), - ); + const EXPERIMENTAL_TLV_ALLOCATION_SIZE: usize = 0; + let mut experimental_bytes = Vec::with_capacity(EXPERIMENTAL_TLV_ALLOCATION_SIZE); + let experimental_tlv_stream = TlvStream::new(remaining_bytes) + .range(EXPERIMENTAL_TYPES); for record in experimental_tlv_stream { record.write(&mut experimental_bytes).unwrap(); } experimental_invoice_tlv_stream.write(&mut experimental_bytes).unwrap(); - debug_assert_eq!(experimental_bytes.len(), experimental_bytes.capacity()); let tlv_stream = TlvStream::new(&bytes).chain(TlvStream::new(&experimental_bytes)); let tagged_hash = TaggedHash::from_tlv_stream(SIGNATURE_TAG, tlv_stream); @@ -592,14 +574,6 @@ macro_rules! unsigned_invoice_sign_method { ($self: ident, $self_type: ty $(, $s signature_tlv_stream.write(&mut $self.bytes).unwrap(); // Append the experimental bytes after the signature. - debug_assert_eq!( - // The two-byte overallocation results from SIGNATURE_TLV_RECORD_SIZE accommodating TLV - // records with types >= 253. - $self.bytes.len() - + $self.experimental_bytes.len() - + if $self.contents.is_for_offer() { 0 } else { 2 }, - $self.bytes.capacity(), - ); $self.bytes.extend_from_slice(&$self.experimental_bytes); Ok(Bolt12Invoice { @@ -965,13 +939,6 @@ impl Hash for Bolt12Invoice { } impl InvoiceContents { - fn is_for_offer(&self) -> bool { - match self { - InvoiceContents::ForOffer { .. } => true, - InvoiceContents::ForRefund { .. } => false, - } - } - /// Whether the original offer or refund has expired. #[cfg(feature = "std")] fn is_offer_or_refund_expired(&self) -> bool { @@ -1362,7 +1329,11 @@ pub(super) const EXPERIMENTAL_INVOICE_TYPES: core::ops::RangeFrom = 3_000_0 #[cfg(not(test))] tlv_stream!( - ExperimentalInvoiceTlvStream, ExperimentalInvoiceTlvStreamRef, EXPERIMENTAL_INVOICE_TYPES, {} + ExperimentalInvoiceTlvStream, ExperimentalInvoiceTlvStreamRef, EXPERIMENTAL_INVOICE_TYPES, { + // When adding experimental TLVs, update EXPERIMENTAL_TLV_ALLOCATION_SIZE accordingly in + // both UnsignedBolt12Invoice:new and UnsignedStaticInvoice::new to avoid unnecessary + // allocations. + } ); #[cfg(test)] diff --git a/lightning/src/offers/invoice_request.rs b/lightning/src/offers/invoice_request.rs index 957884f69d0..b481e976d82 100644 --- a/lightning/src/offers/invoice_request.rs +++ b/lightning/src/offers/invoice_request.rs @@ -77,7 +77,7 @@ use crate::ln::channelmanager::PaymentId; use crate::types::features::InvoiceRequestFeatures; use crate::ln::inbound_payment::{ExpandedKey, IV_LEN}; use crate::ln::msgs::DecodeError; -use crate::offers::merkle::{SignError, SignFn, SignatureTlvStream, SignatureTlvStreamRef, TaggedHash, TlvStream, self, SIGNATURE_TLV_RECORD_SIZE}; +use crate::offers::merkle::{SignError, SignFn, SignatureTlvStream, SignatureTlvStreamRef, TaggedHash, TlvStream, self}; use crate::offers::nonce::Nonce; use crate::offers::offer::{Amount, EXPERIMENTAL_OFFER_TYPES, ExperimentalOfferTlvStream, ExperimentalOfferTlvStreamRef, OFFER_TYPES, Offer, OfferContents, OfferId, OfferTlvStream, OfferTlvStreamRef}; use crate::offers::parse::{Bolt12ParseError, ParsedMessage, Bolt12SemanticError}; @@ -473,17 +473,8 @@ impl UnsignedInvoiceRequest { _experimental_offer_tlv_stream, experimental_invoice_request_tlv_stream, ) = contents.as_tlv_stream(); - // Allocate enough space for the invoice_request, which will include: - // - all TLV records from `offer.bytes`, - // - all invoice_request-specific TLV records, and - // - a signature TLV record once the invoice_request is signed. - let mut bytes = Vec::with_capacity( - offer.bytes.len() - + payer_tlv_stream.serialized_length() - + invoice_request_tlv_stream.serialized_length() - + SIGNATURE_TLV_RECORD_SIZE - + experimental_invoice_request_tlv_stream.serialized_length(), - ); + const INVOICE_REQUEST_ALLOCATION_SIZE: usize = 512; + let mut bytes = Vec::with_capacity(INVOICE_REQUEST_ALLOCATION_SIZE); payer_tlv_stream.write(&mut bytes).unwrap(); @@ -495,23 +486,16 @@ impl UnsignedInvoiceRequest { invoice_request_tlv_stream.write(&mut bytes).unwrap(); - let mut experimental_tlv_stream = TlvStream::new(remaining_bytes) - .range(EXPERIMENTAL_OFFER_TYPES) - .peekable(); - let mut experimental_bytes = Vec::with_capacity( - remaining_bytes.len() - - experimental_tlv_stream - .peek() - .map_or(remaining_bytes.len(), |first_record| first_record.start) - + experimental_invoice_request_tlv_stream.serialized_length(), - ); + const EXPERIMENTAL_TLV_ALLOCATION_SIZE: usize = 0; + let mut experimental_bytes = Vec::with_capacity(EXPERIMENTAL_TLV_ALLOCATION_SIZE); + let experimental_tlv_stream = TlvStream::new(remaining_bytes) + .range(EXPERIMENTAL_OFFER_TYPES); for record in experimental_tlv_stream { record.write(&mut experimental_bytes).unwrap(); } experimental_invoice_request_tlv_stream.write(&mut experimental_bytes).unwrap(); - debug_assert_eq!(experimental_bytes.len(), experimental_bytes.capacity()); let tlv_stream = TlvStream::new(&bytes).chain(TlvStream::new(&experimental_bytes)); let tagged_hash = TaggedHash::from_tlv_stream(SIGNATURE_TAG, tlv_stream); @@ -544,12 +528,6 @@ macro_rules! unsigned_invoice_request_sign_method { ( signature_tlv_stream.write(&mut $self.bytes).unwrap(); // Append the experimental bytes after the signature. - debug_assert_eq!( - // The two-byte overallocation results from SIGNATURE_TLV_RECORD_SIZE accommodating TLV - // records with types >= 253. - $self.bytes.len() + $self.experimental_bytes.len() + 2, - $self.bytes.capacity(), - ); $self.bytes.extend_from_slice(&$self.experimental_bytes); Ok(InvoiceRequest { @@ -1127,7 +1105,10 @@ pub(super) const EXPERIMENTAL_INVOICE_REQUEST_TYPES: core::ops::Range = #[cfg(not(test))] tlv_stream!( ExperimentalInvoiceRequestTlvStream, ExperimentalInvoiceRequestTlvStreamRef, - EXPERIMENTAL_INVOICE_REQUEST_TYPES, {} + EXPERIMENTAL_INVOICE_REQUEST_TYPES, { + // When adding experimental TLVs, update EXPERIMENTAL_TLV_ALLOCATION_SIZE accordingly in + // UnsignedInvoiceRequest::new to avoid unnecessary allocations. + } ); #[cfg(test)] diff --git a/lightning/src/offers/merkle.rs b/lightning/src/offers/merkle.rs index 8c3eaaed24d..db4f4a41094 100644 --- a/lightning/src/offers/merkle.rs +++ b/lightning/src/offers/merkle.rs @@ -11,7 +11,6 @@ use bitcoin::hashes::{Hash, HashEngine, sha256}; use bitcoin::secp256k1::{Message, PublicKey, Secp256k1, self}; -use bitcoin::secp256k1::constants::SCHNORR_SIGNATURE_SIZE; use bitcoin::secp256k1::schnorr::Signature; use crate::io; use crate::util::ser::{BigSize, Readable, Writeable, Writer}; @@ -26,10 +25,6 @@ tlv_stream!(SignatureTlvStream, SignatureTlvStreamRef<'a>, SIGNATURE_TYPES, { (240, signature: Signature), }); -/// Size of a TLV record in `SIGNATURE_TYPES` when the type is 1000. TLV types are encoded using -/// BigSize, so a TLV record with type 240 will use two less bytes. -pub(super) const SIGNATURE_TLV_RECORD_SIZE: usize = 3 + 1 + SCHNORR_SIGNATURE_SIZE; - /// A hash for use in a specific context by tweaking with a context-dependent tag as per [BIP 340] /// and computed over the merkle root of a TLV stream to sign as defined in [BOLT 12]. /// @@ -253,7 +248,6 @@ pub(super) struct TlvRecord<'a> { type_bytes: &'a [u8], // The entire TLV record. pub(super) record_bytes: &'a [u8], - pub(super) start: usize, pub(super) end: usize, } @@ -278,7 +272,7 @@ impl<'a> Iterator for TlvStream<'a> { self.data.set_position(end); Some(TlvRecord { - r#type, type_bytes, record_bytes, start: start as usize, end: end as usize, + r#type, type_bytes, record_bytes, end: end as usize, }) } else { None diff --git a/lightning/src/offers/offer.rs b/lightning/src/offers/offer.rs index 613f9accd47..0cd1cece6b7 100644 --- a/lightning/src/offers/offer.rs +++ b/lightning/src/offers/offer.rs @@ -438,7 +438,8 @@ macro_rules! offer_builder_methods { ( } } - let mut bytes = Vec::new(); + const OFFER_ALLOCATION_SIZE: usize = 512; + let mut bytes = Vec::with_capacity(OFFER_ALLOCATION_SIZE); $self.offer.write(&mut bytes).unwrap(); let id = OfferId::from_valid_offer_tlv_stream(&bytes); diff --git a/lightning/src/offers/refund.rs b/lightning/src/offers/refund.rs index a68d0eb658e..e562fb3f901 100644 --- a/lightning/src/offers/refund.rs +++ b/lightning/src/offers/refund.rs @@ -338,7 +338,8 @@ macro_rules! refund_builder_methods { ( $self.refund.payer.0 = metadata; } - let mut bytes = Vec::new(); + const REFUND_ALLOCATION_SIZE: usize = 512; + let mut bytes = Vec::with_capacity(REFUND_ALLOCATION_SIZE); $self.refund.write(&mut bytes).unwrap(); Ok(Refund { diff --git a/lightning/src/offers/static_invoice.rs b/lightning/src/offers/static_invoice.rs index 411ba3ff272..75b7dc6f9ca 100644 --- a/lightning/src/offers/static_invoice.rs +++ b/lightning/src/offers/static_invoice.rs @@ -25,7 +25,6 @@ use crate::offers::invoice_macros::{invoice_accessors_common, invoice_builder_me use crate::offers::invoice_request::InvoiceRequest; use crate::offers::merkle::{ self, SignError, SignFn, SignatureTlvStream, SignatureTlvStreamRef, TaggedHash, TlvStream, - SIGNATURE_TLV_RECORD_SIZE, }; use crate::offers::nonce::Nonce; use crate::offers::offer::{ @@ -288,16 +287,8 @@ impl UnsignedStaticInvoice { fn new(offer_bytes: &Vec, contents: InvoiceContents) -> Self { let (_, invoice_tlv_stream, _, experimental_invoice_tlv_stream) = contents.as_tlv_stream(); - // Allocate enough space for the invoice, which will include: - // - all TLV records from `offer_bytes`, - // - all invoice-specific TLV records, and - // - a signature TLV record once the invoice is signed. - let mut bytes = Vec::with_capacity( - offer_bytes.len() - + invoice_tlv_stream.serialized_length() - + SIGNATURE_TLV_RECORD_SIZE - + experimental_invoice_tlv_stream.serialized_length(), - ); + const INVOICE_ALLOCATION_SIZE: usize = 1024; + let mut bytes = Vec::with_capacity(INVOICE_ALLOCATION_SIZE); // Use the offer bytes instead of the offer TLV stream as the latter may have contained // unknown TLV records, which are not stored in `InvoiceContents`. @@ -309,22 +300,16 @@ impl UnsignedStaticInvoice { invoice_tlv_stream.write(&mut bytes).unwrap(); - let mut experimental_tlv_stream = - TlvStream::new(remaining_bytes).range(EXPERIMENTAL_OFFER_TYPES).peekable(); - let mut experimental_bytes = Vec::with_capacity( - remaining_bytes.len() - - experimental_tlv_stream - .peek() - .map_or(remaining_bytes.len(), |first_record| first_record.start) - + experimental_invoice_tlv_stream.serialized_length(), - ); + const EXPERIMENTAL_TLV_ALLOCATION_SIZE: usize = 0; + let mut experimental_bytes = Vec::with_capacity(EXPERIMENTAL_TLV_ALLOCATION_SIZE); + let experimental_tlv_stream = + TlvStream::new(remaining_bytes).range(EXPERIMENTAL_OFFER_TYPES); for record in experimental_tlv_stream { record.write(&mut experimental_bytes).unwrap(); } experimental_invoice_tlv_stream.write(&mut experimental_bytes).unwrap(); - debug_assert_eq!(experimental_bytes.len(), experimental_bytes.capacity()); let tlv_stream = TlvStream::new(&bytes).chain(TlvStream::new(&experimental_bytes)); let tagged_hash = TaggedHash::from_tlv_stream(SIGNATURE_TAG, tlv_stream); @@ -344,12 +329,6 @@ impl UnsignedStaticInvoice { signature_tlv_stream.write(&mut self.bytes).unwrap(); // Append the experimental bytes after the signature. - debug_assert_eq!( - // The two-byte overallocation results from SIGNATURE_TLV_RECORD_SIZE accommodating TLV - // records with types >= 253. - self.bytes.len() + self.experimental_bytes.len() + 2, - self.bytes.capacity(), - ); self.bytes.extend_from_slice(&self.experimental_bytes); Ok(StaticInvoice { bytes: self.bytes, contents: self.contents, signature }) From f80d82e835987e045928955d2c83baba86c445e9 Mon Sep 17 00:00:00 2001 From: Jeffrey Czyz Date: Thu, 6 Feb 2025 14:01:18 -0600 Subject: [PATCH 041/105] Drop use of reserve_exact in BOLT12 tests Now that the previous commit removed assertions on Vec capacities for BOLT12 messages, the use of reserve_exact in tests is no longer needed. --- lightning/src/offers/invoice.rs | 12 ------------ lightning/src/offers/invoice_request.rs | 20 -------------------- lightning/src/offers/static_invoice.rs | 12 ------------ 3 files changed, 44 deletions(-) diff --git a/lightning/src/offers/invoice.rs b/lightning/src/offers/invoice.rs index 25a722f73ad..9d2401e2ca0 100644 --- a/lightning/src/offers/invoice.rs +++ b/lightning/src/offers/invoice.rs @@ -2851,9 +2851,6 @@ mod tests { BigSize(32).write(&mut unknown_bytes).unwrap(); [42u8; 32].write(&mut unknown_bytes).unwrap(); - unsigned_invoice.bytes.reserve_exact( - unsigned_invoice.bytes.capacity() - unsigned_invoice.bytes.len() + unknown_bytes.len(), - ); unsigned_invoice.bytes.extend_from_slice(&unknown_bytes); unsigned_invoice.tagged_hash = TaggedHash::from_valid_tlv_stream_bytes(SIGNATURE_TAG, &unsigned_invoice.bytes); @@ -2888,9 +2885,6 @@ mod tests { BigSize(32).write(&mut unknown_bytes).unwrap(); [42u8; 32].write(&mut unknown_bytes).unwrap(); - unsigned_invoice.bytes.reserve_exact( - unsigned_invoice.bytes.capacity() - unsigned_invoice.bytes.len() + unknown_bytes.len(), - ); unsigned_invoice.bytes.extend_from_slice(&unknown_bytes); unsigned_invoice.tagged_hash = TaggedHash::from_valid_tlv_stream_bytes(SIGNATURE_TAG, &unsigned_invoice.bytes); @@ -2953,9 +2947,6 @@ mod tests { BigSize(32).write(&mut unknown_bytes).unwrap(); [42u8; 32].write(&mut unknown_bytes).unwrap(); - unsigned_invoice.bytes.reserve_exact( - unsigned_invoice.bytes.capacity() - unsigned_invoice.bytes.len() + unknown_bytes.len(), - ); unsigned_invoice.experimental_bytes.extend_from_slice(&unknown_bytes); let tlv_stream = TlvStream::new(&unsigned_invoice.bytes) @@ -2992,9 +2983,6 @@ mod tests { BigSize(32).write(&mut unknown_bytes).unwrap(); [42u8; 32].write(&mut unknown_bytes).unwrap(); - unsigned_invoice.bytes.reserve_exact( - unsigned_invoice.bytes.capacity() - unsigned_invoice.bytes.len() + unknown_bytes.len(), - ); unsigned_invoice.experimental_bytes.extend_from_slice(&unknown_bytes); let tlv_stream = TlvStream::new(&unsigned_invoice.bytes) diff --git a/lightning/src/offers/invoice_request.rs b/lightning/src/offers/invoice_request.rs index b481e976d82..8d7d25cf2b5 100644 --- a/lightning/src/offers/invoice_request.rs +++ b/lightning/src/offers/invoice_request.rs @@ -2403,11 +2403,6 @@ mod tests { BigSize(32).write(&mut unknown_bytes).unwrap(); [42u8; 32].write(&mut unknown_bytes).unwrap(); - unsigned_invoice_request.bytes.reserve_exact( - unsigned_invoice_request.bytes.capacity() - - unsigned_invoice_request.bytes.len() - + unknown_bytes.len(), - ); unsigned_invoice_request.bytes.extend_from_slice(&unknown_bytes); unsigned_invoice_request.tagged_hash = TaggedHash::from_valid_tlv_stream_bytes(SIGNATURE_TAG, &unsigned_invoice_request.bytes); @@ -2441,11 +2436,6 @@ mod tests { BigSize(32).write(&mut unknown_bytes).unwrap(); [42u8; 32].write(&mut unknown_bytes).unwrap(); - unsigned_invoice_request.bytes.reserve_exact( - unsigned_invoice_request.bytes.capacity() - - unsigned_invoice_request.bytes.len() - + unknown_bytes.len(), - ); unsigned_invoice_request.bytes.extend_from_slice(&unknown_bytes); unsigned_invoice_request.tagged_hash = TaggedHash::from_valid_tlv_stream_bytes(SIGNATURE_TAG, &unsigned_invoice_request.bytes); @@ -2489,11 +2479,6 @@ mod tests { BigSize(32).write(&mut unknown_bytes).unwrap(); [42u8; 32].write(&mut unknown_bytes).unwrap(); - unsigned_invoice_request.bytes.reserve_exact( - unsigned_invoice_request.bytes.capacity() - - unsigned_invoice_request.bytes.len() - + unknown_bytes.len(), - ); unsigned_invoice_request.experimental_bytes.extend_from_slice(&unknown_bytes); let tlv_stream = TlvStream::new(&unsigned_invoice_request.bytes) @@ -2530,11 +2515,6 @@ mod tests { BigSize(32).write(&mut unknown_bytes).unwrap(); [42u8; 32].write(&mut unknown_bytes).unwrap(); - unsigned_invoice_request.bytes.reserve_exact( - unsigned_invoice_request.bytes.capacity() - - unsigned_invoice_request.bytes.len() - + unknown_bytes.len(), - ); unsigned_invoice_request.experimental_bytes.extend_from_slice(&unknown_bytes); let tlv_stream = TlvStream::new(&unsigned_invoice_request.bytes) diff --git a/lightning/src/offers/static_invoice.rs b/lightning/src/offers/static_invoice.rs index 75b7dc6f9ca..4360582a14c 100644 --- a/lightning/src/offers/static_invoice.rs +++ b/lightning/src/offers/static_invoice.rs @@ -1371,9 +1371,6 @@ mod tests { BigSize(32).write(&mut unknown_bytes).unwrap(); [42u8; 32].write(&mut unknown_bytes).unwrap(); - unsigned_invoice.bytes.reserve_exact( - unsigned_invoice.bytes.capacity() - unsigned_invoice.bytes.len() + unknown_bytes.len(), - ); unsigned_invoice.bytes.extend_from_slice(&unknown_bytes); unsigned_invoice.tagged_hash = TaggedHash::from_valid_tlv_stream_bytes(SIGNATURE_TAG, &unsigned_invoice.bytes); @@ -1413,9 +1410,6 @@ mod tests { BigSize(32).write(&mut unknown_bytes).unwrap(); [42u8; 32].write(&mut unknown_bytes).unwrap(); - unsigned_invoice.bytes.reserve_exact( - unsigned_invoice.bytes.capacity() - unsigned_invoice.bytes.len() + unknown_bytes.len(), - ); unsigned_invoice.bytes.extend_from_slice(&unknown_bytes); unsigned_invoice.tagged_hash = TaggedHash::from_valid_tlv_stream_bytes(SIGNATURE_TAG, &unsigned_invoice.bytes); @@ -1490,9 +1484,6 @@ mod tests { BigSize(32).write(&mut unknown_bytes).unwrap(); [42u8; 32].write(&mut unknown_bytes).unwrap(); - unsigned_invoice.bytes.reserve_exact( - unsigned_invoice.bytes.capacity() - unsigned_invoice.bytes.len() + unknown_bytes.len(), - ); unsigned_invoice.experimental_bytes.extend_from_slice(&unknown_bytes); let tlv_stream = TlvStream::new(&unsigned_invoice.bytes) @@ -1534,9 +1525,6 @@ mod tests { BigSize(32).write(&mut unknown_bytes).unwrap(); [42u8; 32].write(&mut unknown_bytes).unwrap(); - unsigned_invoice.bytes.reserve_exact( - unsigned_invoice.bytes.capacity() - unsigned_invoice.bytes.len() + unknown_bytes.len(), - ); unsigned_invoice.experimental_bytes.extend_from_slice(&unknown_bytes); let tlv_stream = TlvStream::new(&unsigned_invoice.bytes) From 24731d400131426213ad4c3758ee1b00043abd91 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 3 Feb 2025 09:46:20 +0100 Subject: [PATCH 042/105] fix historical liquidity bucket decay The formula for applying half lives was incorrect. Test coverage added. Relatively straightforward merge conflicts (code added in 311a083f673cebe5af76b584b02071909234d9a0 which was not included neighbored new code added) fixed in: * lightning/src/routing/scoring.rs --- lightning/src/routing/scoring.rs | 46 ++++++++++++++++++++++++++------ 1 file changed, 38 insertions(+), 8 deletions(-) diff --git a/lightning/src/routing/scoring.rs b/lightning/src/routing/scoring.rs index 0755a21e1ae..e6d172b29ee 100644 --- a/lightning/src/routing/scoring.rs +++ b/lightning/src/routing/scoring.rs @@ -1795,13 +1795,21 @@ mod bucketed_history { self.buckets[bucket] = self.buckets[bucket].saturating_add(BUCKET_FIXED_POINT_ONE); } } + + /// Applies decay at the given half-life to all buckets. + fn decay(&mut self, half_lives: f64) { + let factor = (1024.0 * powf64(0.5, half_lives)) as u64; + for bucket in self.buckets.iter_mut() { + *bucket = ((*bucket as u64) * factor / 1024) as u16; + } + } } impl_writeable_tlv_based!(HistoricalBucketRangeTracker, { (0, buckets, required) }); impl_writeable_tlv_based!(LegacyHistoricalBucketRangeTracker, { (0, buckets, required) }); #[derive(Clone, Copy)] - #[repr(C)] // Force the fields in memory to be in the order we specify. + #[repr(C)]// Force the fields in memory to be in the order we specify. pub(super) struct HistoricalLiquidityTracker { // This struct sits inside a `(u64, ChannelLiquidity)` in memory, and we first read the // liquidity offsets in `ChannelLiquidity` when calculating the non-historical score. This @@ -1849,13 +1857,8 @@ mod bucketed_history { } pub(super) fn decay_buckets(&mut self, half_lives: f64) { - let divisor = powf64(2048.0, half_lives) as u64; - for bucket in self.min_liquidity_offset_history.buckets.iter_mut() { - *bucket = ((*bucket as u64) * 1024 / divisor) as u16; - } - for bucket in self.max_liquidity_offset_history.buckets.iter_mut() { - *bucket = ((*bucket as u64) * 1024 / divisor) as u16; - } + self.min_liquidity_offset_history.decay(half_lives); + self.max_liquidity_offset_history.decay(half_lives); self.recalculate_valid_point_count(); } @@ -2049,6 +2052,33 @@ mod bucketed_history { Some((cumulative_success_prob * (1024.0 * 1024.0 * 1024.0)) as u64) } } + + #[cfg(test)] + mod tests { + use super::HistoricalBucketRangeTracker; + + #[test] + fn historical_liquidity_bucket_decay() { + let mut bucket = HistoricalBucketRangeTracker::new(); + bucket.track_datapoint(100, 1000); + assert_eq!( + bucket.buckets, + [ + 0u16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0 + ] + ); + + bucket.decay(2.0); + assert_eq!( + bucket.buckets, + [ + 0u16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0 + ] + ); + } + } } use bucketed_history::{LegacyHistoricalBucketRangeTracker, HistoricalBucketRangeTracker, DirectedHistoricalLiquidityTracker, HistoricalLiquidityTracker}; From 58a7b7979ecd6f8cd52716f6d46990a4171fd1c0 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 27 Jan 2025 18:04:38 +0000 Subject: [PATCH 043/105] Drop `counterparty_spendable_height` accessor `counterparty_spendable_height` is not used outside of `package.rs` so there's not much reason to have an accessor for it. Also, in the next commit an issue with setting the correct value for revoked counterparty HTLC outputs is fixed, and the upgrade path causes the value to be 0 in some cases, making using the value in too many places somewhat fraught. --- lightning/src/chain/package.rs | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index 55214006d4c..a205ad65949 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -834,17 +834,17 @@ impl PackageTemplate { // Now check that we only merge packages if they are both unpinnable or both // pinnable. let self_pinnable = self_cluster == AggregationCluster::Pinnable || - self.counterparty_spendable_height() <= cur_height + COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE; + self.counterparty_spendable_height <= cur_height + COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE; let other_pinnable = other_cluster == AggregationCluster::Pinnable || - other.counterparty_spendable_height() <= cur_height + COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE; + other.counterparty_spendable_height <= cur_height + COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE; if self_pinnable && other_pinnable { return true; } let self_unpinnable = self_cluster == AggregationCluster::Unpinnable && - self.counterparty_spendable_height() > cur_height + COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE; + self.counterparty_spendable_height > cur_height + COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE; let other_unpinnable = other_cluster == AggregationCluster::Unpinnable && - other.counterparty_spendable_height() > cur_height + COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE; + other.counterparty_spendable_height > cur_height + COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE; if self_unpinnable && other_unpinnable { return true; } @@ -855,13 +855,6 @@ impl PackageTemplate { pub(crate) fn is_malleable(&self) -> bool { matches!(self.malleability, PackageMalleability::Malleable(..)) } - /// The height at which our counterparty may be able to spend this output. - /// - /// This is an important limit for aggregation as after this height our counterparty may be - /// able to pin transactions spending this output in the mempool. - pub(crate) fn counterparty_spendable_height(&self) -> u32 { - self.counterparty_spendable_height - } pub(crate) fn previous_feerate(&self) -> u64 { self.feerate_previous } From 2e80fbd7e825ebfb40f0d60dab536b5d5df37629 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 27 Jan 2025 18:07:05 +0000 Subject: [PATCH 044/105] Set correct `counterparty_spendable_height` on c.p. revoked HTLCs If the counterparty broadcasts a revoked transaction with offered HTLCs, the output is not immediately pinnable as the counterparty cannot claim the HTLC until the CLTV expires and they use an HTLC-Timeout path. Here we fix the `counterparty_spendable_height` value we set on counterparty revoked HTLC claims to match reality. Note that because we still consider these outputs `Pinnable` the value is not used. In the next commit we'll start making them `Unpinnable` which will actually change behavior. Note that when upgrading we have to wipe the `counterparty_spendable_height` value for non-offered HTLCs as otherwise we'd consider them `Unpinnable` when they are, in fact, `Pinnable`. --- lightning/src/chain/channelmonitor.rs | 7 ++++++- lightning/src/chain/package.rs | 22 ++++++++++++++++++---- 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 8788fbb894d..22aa84488ce 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -3564,11 +3564,16 @@ impl ChannelMonitorImpl { return (claimable_outpoints, to_counterparty_output_info); } let revk_htlc_outp = RevokedHTLCOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, htlc.amount_msat / 1000, htlc.clone(), &self.onchain_tx_handler.channel_transaction_parameters.channel_type_features); + let counterparty_spendable_height = if htlc.offered { + htlc.cltv_expiry + } else { + height + }; let justice_package = PackageTemplate::build_package( commitment_txid, transaction_output_index, PackageSolvingData::RevokedHTLCOutput(revk_htlc_outp), - htlc.cltv_expiry, + counterparty_spendable_height, ); claimable_outpoints.push(justice_package); } diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index a205ad65949..8ce22fc4764 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -771,10 +771,12 @@ pub struct PackageTemplate { /// Block height at which our counterparty can potentially claim this output as well (assuming /// they have the keys or information required to do so). /// - /// This is used primarily by external consumers to decide when an output becomes "pinnable" - /// because the counterparty can potentially spend it. It is also used internally by - /// [`Self::get_height_timer`] to identify when an output must be claimed by, depending on the - /// type of output. + /// This is used primarily to decide when an output becomes "pinnable" because the counterparty + /// can potentially spend it. It is also used internally by [`Self::get_height_timer`] to + /// identify when an output must be claimed by, depending on the type of output. + /// + /// Note that for revoked counterparty HTLC outputs the value may be zero in some cases where + /// we upgraded from LDK 0.1 or prior. counterparty_spendable_height: u32, // Cache of package feerate committed at previous (re)broadcast. If bumping resources // (either claimed output value or external utxo), it will keep increasing until holder @@ -1218,6 +1220,18 @@ impl Readable for PackageTemplate { (4, _height_original, option), // Written with a dummy value since 0.1 (6, height_timer, option), }); + for (_, input) in &inputs { + if let PackageSolvingData::RevokedHTLCOutput(RevokedHTLCOutput { htlc, .. }) = input { + // LDK versions through 0.1 set the wrong counterparty_spendable_height for + // non-offered revoked HTLCs (ie HTLCs we sent to our counterparty which they can + // claim with a preimage immediately). Here we detect this and reset the value to + // zero, as the value is unused except for merging decisions which doesn't care + // about any values below the current height. + if !htlc.offered && htlc.cltv_expiry == counterparty_spendable_height { + counterparty_spendable_height = 0; + } + } + } Ok(PackageTemplate { inputs, malleability, From 174f42ed9ac0e8b8d68a872d1ab7e9a65197c400 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 27 Jan 2025 18:07:11 +0000 Subject: [PATCH 045/105] Mark counterparty revoked offered HTLCs as `Unpinnable` If the counterparty broadcasts a revoked transaction with offered HTLCs, the output is not immediately pinnable as the counterparty cannot claim the HTLC until the CLTV expires and they use an HTLC-Timeout path. Here we properly set these packages as `Unpinnable`, changing some transaction generation during tests. --- lightning/src/chain/package.rs | 9 +++- lightning/src/ln/functional_tests.rs | 79 ++++++++++++++++------------ lightning/src/ln/monitor_tests.rs | 10 ++-- 3 files changed, 60 insertions(+), 38 deletions(-) diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index 8ce22fc4764..bd6912c21f8 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -699,8 +699,13 @@ impl PackageSolvingData { match self { PackageSolvingData::RevokedOutput(RevokedOutput { .. }) => PackageMalleability::Malleable(AggregationCluster::Unpinnable), - PackageSolvingData::RevokedHTLCOutput(..) => - PackageMalleability::Malleable(AggregationCluster::Pinnable), + PackageSolvingData::RevokedHTLCOutput(RevokedHTLCOutput { htlc, .. }) => { + if htlc.offered { + PackageMalleability::Malleable(AggregationCluster::Unpinnable) + } else { + PackageMalleability::Malleable(AggregationCluster::Pinnable) + } + }, PackageSolvingData::CounterpartyOfferedHTLCOutput(..) => PackageMalleability::Malleable(AggregationCluster::Unpinnable), PackageSolvingData::CounterpartyReceivedHTLCOutput(..) => diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index b29ee99e077..bdb1621771f 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -15,7 +15,7 @@ use crate::chain; use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch}; use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::chain::channelmonitor; -use crate::chain::channelmonitor::{Balance, ChannelMonitorUpdateStep, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; +use crate::chain::channelmonitor::{Balance, ChannelMonitorUpdateStep, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE}; use crate::chain::transaction::OutPoint; use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, OutputSpender, SignerProvider}; use crate::events::bump_transaction::WalletSource; @@ -2645,14 +2645,12 @@ fn test_justice_tx_htlc_timeout() { mine_transaction(&nodes[1], &revoked_local_txn[0]); { let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); - // The unpinnable, revoked to_self output, and the pinnable, revoked htlc output will - // be claimed in separate transactions. - assert_eq!(node_txn.len(), 2); - for tx in node_txn.iter() { - assert_eq!(tx.input.len(), 1); - check_spends!(tx, revoked_local_txn[0]); - } - assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output); + // The revoked HTLC output is not pinnable for another `TEST_FINAL_CLTV` blocks, and is + // thus claimed in the same transaction with the revoked to_self output. + assert_eq!(node_txn.len(), 1); + assert_eq!(node_txn[0].input.len(), 2); + check_spends!(node_txn[0], revoked_local_txn[0]); + assert_ne!(node_txn[0].input[0].previous_output, node_txn[0].input[1].previous_output); node_txn.clear(); } check_added_monitors!(nodes[1], 1); @@ -2872,28 +2870,26 @@ fn claim_htlc_outputs() { assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); - assert_eq!(node_txn.len(), 2); // Two penalty transactions: - assert_eq!(node_txn[0].input.len(), 1); // Claims the unpinnable, revoked output. - assert_eq!(node_txn[1].input.len(), 2); // Claims both pinnable, revoked HTLC outputs separately. - check_spends!(node_txn[0], revoked_local_txn[0]); - check_spends!(node_txn[1], revoked_local_txn[0]); - assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output); - assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[1].previous_output); - assert_ne!(node_txn[1].input[0].previous_output, node_txn[1].input[1].previous_output); + assert_eq!(node_txn.len(), 2); // ChannelMonitor: penalty txn + + // The ChannelMonitor should claim the accepted HTLC output separately from the offered + // HTLC and to_self outputs. + let accepted_claim = node_txn.iter().filter(|tx| tx.input.len() == 1).next().unwrap(); + let offered_to_self_claim = node_txn.iter().filter(|tx| tx.input.len() == 2).next().unwrap(); + check_spends!(accepted_claim, revoked_local_txn[0]); + check_spends!(offered_to_self_claim, revoked_local_txn[0]); + assert_eq!(accepted_claim.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); let mut witness_lens = BTreeSet::new(); - witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len()); - witness_lens.insert(node_txn[1].input[0].witness.last().unwrap().len()); - witness_lens.insert(node_txn[1].input[1].witness.last().unwrap().len()); - assert_eq!(witness_lens.len(), 3); + witness_lens.insert(offered_to_self_claim.input[0].witness.last().unwrap().len()); + witness_lens.insert(offered_to_self_claim.input[1].witness.last().unwrap().len()); + assert_eq!(witness_lens.len(), 2); assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local - assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC - assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC + assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); - // Finally, mine the penalty transactions and check that we get an HTLC failure after + // Finally, mine the penalty transaction and check that we get an HTLC failure after // ANTI_REORG_DELAY confirmations. - mine_transaction(&nodes[1], &node_txn[0]); - mine_transaction(&nodes[1], &node_txn[1]); + mine_transaction(&nodes[1], accepted_claim); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); expect_payment_failed!(nodes[1], payment_hash_2, false); } @@ -5056,8 +5052,7 @@ fn test_static_spendable_outputs_timeout_tx() { check_spends!(spend_txn[2], node_txn[0], commitment_tx[0]); // All outputs } -#[test] -fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() { +fn do_test_static_spendable_outputs_justice_tx_revoked_commitment_tx(split_tx: bool) { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -5073,20 +5068,28 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() { claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); + if split_tx { + connect_blocks(&nodes[1], TEST_FINAL_CLTV - COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE + 1); + } + mine_transaction(&nodes[1], &revoked_local_txn[0]); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000); - // The unpinnable, revoked to_self output and the pinnable, revoked HTLC output will be claimed - // in separate transactions. + // If the HTLC expires in more than COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE blocks, we'll + // claim both the revoked and HTLC outputs in one transaction, otherwise we'll split them as we + // consider the HTLC output as pinnable and want to claim pinnable and unpinnable outputs + // separately. let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); - assert_eq!(node_txn.len(), 2); + assert_eq!(node_txn.len(), if split_tx { 2 } else { 1 }); for tx in node_txn.iter() { - assert_eq!(tx.input.len(), 1); + assert_eq!(tx.input.len(), if split_tx { 1 } else { 2 }); check_spends!(tx, revoked_local_txn[0]); } - assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output); + if split_tx { + assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output); + } mine_transaction(&nodes[1], &node_txn[0]); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); @@ -5096,6 +5099,12 @@ fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() { check_spends!(spend_txn[0], node_txn[0]); } +#[test] +fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() { + do_test_static_spendable_outputs_justice_tx_revoked_commitment_tx(true); + do_test_static_spendable_outputs_justice_tx_revoked_commitment_tx(false); +} + #[test] fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { let mut chanmon_cfgs = create_chanmon_cfgs(2); @@ -5128,6 +5137,10 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { check_spends!(revoked_htlc_txn[0], revoked_local_txn[0]); assert_ne!(revoked_htlc_txn[0].lock_time, LockTime::ZERO); // HTLC-Timeout + // In order to connect `revoked_htlc_txn[0]` we must first advance the chain by + // `TEST_FINAL_CLTV` blocks as otherwise the transaction is consensus-invalid due to its + // locktime. + connect_blocks(&nodes[1], TEST_FINAL_CLTV); // B will generate justice tx from A's revoked commitment/HTLC tx connect_block(&nodes[1], &create_dummy_block(nodes[1].best_block_hash(), 42, vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()])); check_closed_broadcast!(nodes[1], true); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index e2c76643348..7a20a79159a 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -10,7 +10,7 @@ //! Further functional tests which test blockchain reorganizations. use crate::sign::{ecdsa::EcdsaChannelSigner, OutputSpender, SpendableOutputDescriptor}; -use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ARCHIVAL_DELAY_BLOCKS,LATENCY_GRACE_PERIOD_BLOCKS, Balance, BalanceSource, ChannelMonitorUpdateStep}; +use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ARCHIVAL_DELAY_BLOCKS,LATENCY_GRACE_PERIOD_BLOCKS, COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE, Balance, BalanceSource, ChannelMonitorUpdateStep}; use crate::chain::transaction::OutPoint; use crate::chain::chaininterface::{ConfirmationTarget, LowerBoundedFeeEstimator, compute_feerate_sat_per_1000_weight}; use crate::events::bump_transaction::{BumpTransactionEvent, WalletSource}; @@ -1734,6 +1734,12 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { assert_eq!(revoked_htlc_success.lock_time, LockTime::ZERO); assert_ne!(revoked_htlc_timeout.lock_time, LockTime::ZERO); + // First connect blocks until the HTLC expires with + // `COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE` blocks, making us consider all the HTLCs + // pinnable claims, which the remainder of the test assumes. + connect_blocks(&nodes[0], TEST_FINAL_CLTV - COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(&nodes[0], + [HTLCDestination::FailedPayment { payment_hash: failed_payment_hash }]); // A will generate justice tx from B's revoked commitment/HTLC tx mine_transaction(&nodes[0], &revoked_local_txn[0]); check_closed_broadcast!(nodes[0], true); @@ -1846,8 +1852,6 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); connect_blocks(&nodes[0], revoked_htlc_timeout.lock_time.to_consensus_u32() - nodes[0].best_block_info().1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(&nodes[0], - [HTLCDestination::FailedPayment { payment_hash: failed_payment_hash }]); // As time goes on A may split its revocation claim transaction into multiple. let as_fewer_input_rbf = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); for tx in as_fewer_input_rbf.iter() { From b5b64296a8d55da00e78724ab46d759f8416b45c Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 30 Jan 2025 20:45:34 +0000 Subject: [PATCH 046/105] Call `peer_disconnected` after a handler refuses a connection If one message handler refuses a connection by returning an `Err` from `peer_connected`, other handlers which already got the `peer_connected` will not see the corresponding `peer_disconnected`, leaving them in a potentially-inconsistent state. Here we ensure we call the `peer_disconnected` handler for all handlers which received a `peer_connected` event (except the one which refused the connection). --- lightning/src/ln/msgs.rs | 4 ++++ lightning/src/ln/peer_handler.rs | 5 +++++ 2 files changed, 9 insertions(+) diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index 659ec65f6cf..0c400fc36e0 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -1578,6 +1578,8 @@ pub trait ChannelMessageHandler : MessageSendEventsProvider { /// May return an `Err(())` if the features the peer supports are not sufficient to communicate /// with us. Implementors should be somewhat conservative about doing so, however, as other /// message handlers may still wish to communicate with this peer. + /// + /// [`Self::peer_disconnected`] will not be called if `Err(())` is returned. fn peer_connected(&self, their_node_id: PublicKey, msg: &Init, inbound: bool) -> Result<(), ()>; /// Handle an incoming `channel_reestablish` message from the given peer. fn handle_channel_reestablish(&self, their_node_id: PublicKey, msg: &ChannelReestablish); @@ -1707,6 +1709,8 @@ pub trait OnionMessageHandler { /// May return an `Err(())` if the features the peer supports are not sufficient to communicate /// with us. Implementors should be somewhat conservative about doing so, however, as other /// message handlers may still wish to communicate with this peer. + /// + /// [`Self::peer_disconnected`] will not be called if `Err(())` is returned. fn peer_connected(&self, their_node_id: PublicKey, init: &Init, inbound: bool) -> Result<(), ()>; /// Indicates a connection to the peer failed/an existing connection was lost. Allows handlers to diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index dbce9ca0498..9b649408423 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -88,6 +88,8 @@ pub trait CustomMessageHandler: wire::CustomMessageReader { /// May return an `Err(())` if the features the peer supports are not sufficient to communicate /// with us. Implementors should be somewhat conservative about doing so, however, as other /// message handlers may still wish to communicate with this peer. + /// + /// [`Self::peer_disconnected`] will not be called if `Err(())` is returned. fn peer_connected(&self, their_node_id: PublicKey, msg: &Init, inbound: bool) -> Result<(), ()>; /// Gets the node feature flags which this handler itself supports. All available handlers are @@ -1717,10 +1719,13 @@ impl Date: Fri, 31 Jan 2025 17:33:26 +0000 Subject: [PATCH 047/105] Avoid parsing `PublicKey`s when applying an unsigned chan update `PublicKey` parsing is relatively expensive as we have to check if the point is actually on the curve. To avoid it, our `NetworkGraph` uses `NodeId`s which don't have the validity requirement. Sadly, we were always parsing the broadcasting node's `PublicKey` from the `node_id` in the network graph whenever we see an update for that channel, whether we have a corresponding signature or not. Here we fix this, only parsing the public key (and hashing the message) if we're going to check a signature. --- lightning-background-processor/src/lib.rs | 4 ++-- lightning/src/routing/gossip.rs | 27 ++++++++++++++++++----- 2 files changed, 23 insertions(+), 8 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index adcae564f56..ef889d4e80f 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -2379,8 +2379,8 @@ mod tests { 42, 53, features, - $nodes[0].node.get_our_node_id(), - $nodes[1].node.get_our_node_id(), + $nodes[0].node.get_our_node_id().into(), + $nodes[1].node.get_our_node_id().into(), ) .expect("Failed to update channel from partial announcement"); let original_graph_description = $nodes[0].network_graph.to_string(); diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index c552005d9ca..e3a1e9d0878 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -2537,7 +2537,7 @@ where } }; - let node_pubkey; + let mut node_pubkey = None; { let channels = self.channels.read().unwrap(); match channels.get(&msg.short_channel_id) { @@ -2556,16 +2556,31 @@ where } else { channel.node_one.as_slice() }; - node_pubkey = PublicKey::from_slice(node_id).map_err(|_| LightningError { - err: "Couldn't parse source node pubkey".to_owned(), - action: ErrorAction::IgnoreAndLog(Level::Debug), - })?; + if sig.is_some() { + // PublicKey parsing isn't entirely trivial as it requires that we check + // that the provided point is on the curve. Thus, if we don't have a + // signature to verify, we want to skip the parsing step entirely. + // This represents a substantial speedup in applying RGS snapshots. + node_pubkey = + Some(PublicKey::from_slice(node_id).map_err(|_| LightningError { + err: "Couldn't parse source node pubkey".to_owned(), + action: ErrorAction::IgnoreAndLog(Level::Debug), + })?); + } }, } } - let msg_hash = hash_to_message!(&message_sha256d_hash(&msg)[..]); if let Some(sig) = sig { + let msg_hash = hash_to_message!(&message_sha256d_hash(&msg)[..]); + let node_pubkey = if let Some(pubkey) = node_pubkey { + pubkey + } else { + debug_assert!(false, "node_pubkey should have been decoded above"); + let err = "node_pubkey wasn't decoded but we need it to check a sig".to_owned(); + let action = ErrorAction::IgnoreAndLog(Level::Error); + return Err(LightningError { err, action }); + }; secp_verify_sig!(self.secp_ctx, &msg_hash, &sig, &node_pubkey, "channel_update"); } From 63fa918a21235e324188aedc6476f761990df76c Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 31 Jan 2025 17:42:51 +0000 Subject: [PATCH 048/105] Properly pre-allocate `NetworkGraph` channel/node maps When we build a new `NetworkGraph` from empty, we're generally doing an initial startup and will be syncing the graph very soon. Using an initially-empty `IndexedMap` for the `channels` and `nodes` results in quite some memory churn, with the initial RGS application benchmark showing 15% of its time in pagefault handling alone (i.e. allocating new memory from the OS, let alone the 23% of time in `memmove`). Further, when deserializing a `NetworkGraph`, we'd swapped the expected node and channel count constants, leaving the node map too small and causing map doubling as we read entries from disk. Finally, when deserializing, allocating only exactly the amount of map entries we need is likely to lead to at least one doubling, so we're better off just over-estimating the number of nodes and channels and allocating what we want. Here we just always allocate `channels` and `nodes` based on constants, leading to a 20%-ish speedup in the initial RGS application benchmark. --- lightning/src/routing/gossip.rs | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index e3a1e9d0878..fc097d5f915 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -1660,8 +1660,7 @@ where let chain_hash: ChainHash = Readable::read(reader)?; let channels_count: u64 = Readable::read(reader)?; - // In Nov, 2023 there were about 15,000 nodes; we cap allocations to 1.5x that. - let mut channels = IndexedMap::with_capacity(cmp::min(channels_count as usize, 22500)); + let mut channels = IndexedMap::with_capacity(CHAN_COUNT_ESTIMATE); for _ in 0..channels_count { let chan_id: u64 = Readable::read(reader)?; let chan_info: ChannelInfo = Readable::read(reader)?; @@ -1673,8 +1672,7 @@ where if nodes_count > u32::max_value() as u64 / 2 { return Err(DecodeError::InvalidValue); } - // In Nov, 2023 there were about 69K channels; we cap allocations to 1.5x that. - let mut nodes = IndexedMap::with_capacity(cmp::min(nodes_count as usize, 103500)); + let mut nodes = IndexedMap::with_capacity(NODE_COUNT_ESTIMATE); for i in 0..nodes_count { let node_id = Readable::read(reader)?; let mut node_info: NodeInfo = Readable::read(reader)?; @@ -1750,6 +1748,15 @@ where } } +// In Jan, 2025 there were about 49K channels. +// We over-allocate by a bit because 20% more is better than the double we get if we're slightly +// too low +const CHAN_COUNT_ESTIMATE: usize = 60_000; +// In Jan, 2025 there were about 15K nodes +// We over-allocate by a bit because 33% more is better than the double we get if we're slightly +// too low +const NODE_COUNT_ESTIMATE: usize = 20_000; + impl NetworkGraph where L::Target: Logger, @@ -1760,8 +1767,8 @@ where secp_ctx: Secp256k1::verification_only(), chain_hash: ChainHash::using_genesis_block(network), logger, - channels: RwLock::new(IndexedMap::new()), - nodes: RwLock::new(IndexedMap::new()), + channels: RwLock::new(IndexedMap::with_capacity(CHAN_COUNT_ESTIMATE)), + nodes: RwLock::new(IndexedMap::with_capacity(NODE_COUNT_ESTIMATE)), next_node_counter: AtomicUsize::new(0), removed_node_counters: Mutex::new(Vec::new()), last_rapid_gossip_sync_timestamp: Mutex::new(None), From bed125a0f1b34dd479eba9a835c18287f49f4207 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 3 Feb 2025 20:41:21 +0000 Subject: [PATCH 049/105] Ignore fees on first-hop channels from the public network graph If we have a first-hop channel from a first-hop hint, we'll ignore the fees on it as we won't charge ourselves fees. However, if we have a first-hop channel from the network graph, we should do the same. We do so here, also teeing up a coming commit which will remove much of the custom codepath for first-hop hints and start using this common codepath as well. --- lightning/src/routing/router.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 04f55837267..c92224bd658 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -2514,8 +2514,15 @@ where L::Target: Logger { let curr_min = cmp::max( $next_hops_path_htlc_minimum_msat, htlc_minimum_msat ); - let candidate_fees = $candidate.fees(); let src_node_counter = $candidate.src_node_counter(); + let mut candidate_fees = $candidate.fees(); + if src_node_counter == payer_node_counter { + // We do not charge ourselves a fee to use our own channels. + candidate_fees = RoutingFees { + proportional_millionths: 0, + base_msat: 0, + }; + } let path_htlc_minimum_msat = compute_fees_saturating(curr_min, candidate_fees) .saturating_add(curr_min); From fd8eab9241209c607817c26ded2176610532ea3f Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 3 Feb 2025 21:19:54 +0000 Subject: [PATCH 050/105] Clean up `long_mpp_route_test` and `mpp_cheaper_route_test` These tests are a bit annoying to deal with and ultimately work on almost the same graph subset, so it makes sense to combine their graph layout logic and then call it twice. We do that here, combining them and also cleaning up the possible paths as there actually are paths that the router could select which don't meet the tests requirements. --- lightning/src/routing/router.rs | 287 ++++++-------------------------- 1 file changed, 55 insertions(+), 232 deletions(-) diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index c92224bd658..806f494d1c5 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -5740,187 +5740,33 @@ mod tests { } #[test] - fn long_mpp_route_test() { - let (secp_ctx, network_graph, gossip_sync, _, logger) = build_graph(); - let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx); - let scorer = ln_test_utils::TestScorer::new(); - let random_seed_bytes = [42; 32]; - let config = UserConfig::default(); - let payment_params = PaymentParameters::from_node_id(nodes[3], 42) - .with_bolt11_features(channelmanager::provided_bolt11_invoice_features(&config)) - .unwrap(); - - // We need a route consisting of 3 paths: - // From our node to node3 via {node0, node2}, {node7, node2, node4} and {node7, node2}. - // Note that these paths overlap (channels 5, 12, 13). - // We will route 300 sats. - // Each path will have 100 sats capacity, those channels which - // are used twice will have 200 sats capacity. - - // Disable other potential paths. - update_channel(&gossip_sync, &secp_ctx, &our_privkey, UnsignedChannelUpdate { - chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 2, - timestamp: 2, - message_flags: 1, // Only must_be_one - channel_flags: 2, - cltv_expiry_delta: 0, - htlc_minimum_msat: 0, - htlc_maximum_msat: 100_000, - fee_base_msat: 0, - fee_proportional_millionths: 0, - excess_data: Vec::new() - }); - update_channel(&gossip_sync, &secp_ctx, &privkeys[2], UnsignedChannelUpdate { - chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 7, - timestamp: 2, - message_flags: 1, // Only must_be_one - channel_flags: 2, - cltv_expiry_delta: 0, - htlc_minimum_msat: 0, - htlc_maximum_msat: 100_000, - fee_base_msat: 0, - fee_proportional_millionths: 0, - excess_data: Vec::new() - }); - - // Path via {node0, node2} is channels {1, 3, 5}. - update_channel(&gossip_sync, &secp_ctx, &our_privkey, UnsignedChannelUpdate { - chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 1, - timestamp: 2, - message_flags: 1, // Only must_be_one - channel_flags: 0, - cltv_expiry_delta: 0, - htlc_minimum_msat: 0, - htlc_maximum_msat: 100_000, - fee_base_msat: 0, - fee_proportional_millionths: 0, - excess_data: Vec::new() - }); - update_channel(&gossip_sync, &secp_ctx, &privkeys[0], UnsignedChannelUpdate { - chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 3, - timestamp: 2, - message_flags: 1, // Only must_be_one - channel_flags: 0, - cltv_expiry_delta: 0, - htlc_minimum_msat: 0, - htlc_maximum_msat: 100_000, - fee_base_msat: 0, - fee_proportional_millionths: 0, - excess_data: Vec::new() - }); - - // Capacity of 200 sats because this channel will be used by 3rd path as well. - add_channel(&gossip_sync, &secp_ctx, &privkeys[2], &privkeys[3], ChannelFeatures::from_le_bytes(id_to_feature_flags(5)), 5); - update_channel(&gossip_sync, &secp_ctx, &privkeys[2], UnsignedChannelUpdate { - chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 5, - timestamp: 2, - message_flags: 1, // Only must_be_one - channel_flags: 0, - cltv_expiry_delta: 0, - htlc_minimum_msat: 0, - htlc_maximum_msat: 200_000, - fee_base_msat: 0, - fee_proportional_millionths: 0, - excess_data: Vec::new() - }); - update_channel(&gossip_sync, &secp_ctx, &privkeys[3], UnsignedChannelUpdate { - chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 5, - timestamp: 2, - message_flags: 1, // Only must_be_one - channel_flags: 3, // disable direction 1 - cltv_expiry_delta: 0, - htlc_minimum_msat: 0, - htlc_maximum_msat: 200_000, - fee_base_msat: 0, - fee_proportional_millionths: 0, - excess_data: Vec::new() - }); - - // Path via {node7, node2, node4} is channels {12, 13, 6, 11}. - // Add 100 sats to the capacities of {12, 13}, because these channels - // are also used for 3rd path. 100 sats for the rest. Total capacity: 100 sats. - update_channel(&gossip_sync, &secp_ctx, &our_privkey, UnsignedChannelUpdate { - chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 12, - timestamp: 2, - message_flags: 1, // Only must_be_one - channel_flags: 0, - cltv_expiry_delta: 0, - htlc_minimum_msat: 0, - htlc_maximum_msat: 200_000, - fee_base_msat: 0, - fee_proportional_millionths: 0, - excess_data: Vec::new() - }); - update_channel(&gossip_sync, &secp_ctx, &privkeys[7], UnsignedChannelUpdate { - chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 13, - timestamp: 2, - message_flags: 1, // Only must_be_one - channel_flags: 0, - cltv_expiry_delta: 0, - htlc_minimum_msat: 0, - htlc_maximum_msat: 200_000, - fee_base_msat: 0, - fee_proportional_millionths: 0, - excess_data: Vec::new() - }); - - update_channel(&gossip_sync, &secp_ctx, &privkeys[2], UnsignedChannelUpdate { - chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 6, - timestamp: 2, - message_flags: 1, // Only must_be_one - channel_flags: 0, - cltv_expiry_delta: 0, - htlc_minimum_msat: 0, - htlc_maximum_msat: 100_000, - fee_base_msat: 0, - fee_proportional_millionths: 0, - excess_data: Vec::new() - }); - update_channel(&gossip_sync, &secp_ctx, &privkeys[4], UnsignedChannelUpdate { - chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 11, - timestamp: 2, - message_flags: 1, // Only must_be_one - channel_flags: 0, - cltv_expiry_delta: 0, - htlc_minimum_msat: 0, - htlc_maximum_msat: 100_000, - fee_base_msat: 0, - fee_proportional_millionths: 0, - excess_data: Vec::new() - }); - - // Path via {node7, node2} is channels {12, 13, 5}. - // We already limited them to 200 sats (they are used twice for 100 sats). - // Nothing to do here. - + fn mpp_tests() { + let secp_ctx = Secp256k1::new(); + let (_, _, _, nodes) = get_nodes(&secp_ctx); { - // Attempt to route more than available results in a failure. - let route_params = RouteParameters::from_payment_params_and_value( - payment_params.clone(), 350_000); - if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route( - &our_id, &route_params, &network_graph.read_only(), None, Arc::clone(&logger), - &scorer, &Default::default(), &random_seed_bytes) { - assert_eq!(err, "Failed to find a sufficient route to the given destination"); - } else { panic!(); } - } + // Check that if we have two cheaper paths and a more expensive (fewer hops) path, we + // choose the two cheaper paths: + let route = do_mpp_route_tests(180_000).unwrap(); + assert_eq!(route.paths.len(), 2); + let mut total_value_transferred_msat = 0; + let mut total_paid_msat = 0; + for path in &route.paths { + assert_eq!(path.hops.last().unwrap().pubkey, nodes[3]); + total_value_transferred_msat += path.final_value_msat(); + for hop in &path.hops { + total_paid_msat += hop.fee_msat; + } + } + // If we paid fee, this would be higher. + assert_eq!(total_value_transferred_msat, 180_000); + let total_fees_paid = total_paid_msat - total_value_transferred_msat; + assert_eq!(total_fees_paid, 0); + } { - // Now, attempt to route 300 sats (exact amount we can route). - // Our algorithm should provide us with these 3 paths, 100 sats each. - let route_params = RouteParameters::from_payment_params_and_value( - payment_params, 300_000); - let route = get_route(&our_id, &route_params, &network_graph.read_only(), None, - Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap(); + // Check that if we use the same channels but need to send more than we could fit in + // the cheaper paths we select all three paths: + let route = do_mpp_route_tests(300_000).unwrap(); assert_eq!(route.paths.len(), 3); let mut total_amount_paid_msat = 0; @@ -5930,11 +5776,11 @@ mod tests { } assert_eq!(total_amount_paid_msat, 300_000); } - + // Check that trying to pay more than our available liquidity fails. + assert!(do_mpp_route_tests(300_001).is_err()); } - #[test] - fn mpp_cheaper_route_test() { + fn do_mpp_route_tests(amt: u64) -> Result { let (secp_ctx, network_graph, gossip_sync, _, logger) = build_graph(); let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx); let scorer = ln_test_utils::TestScorer::new(); @@ -5944,21 +5790,17 @@ mod tests { .with_bolt11_features(channelmanager::provided_bolt11_invoice_features(&config)) .unwrap(); - // This test checks that if we have two cheaper paths and one more expensive path, - // so that liquidity-wise any 2 of 3 combination is sufficient, - // two cheaper paths will be taken. - // These paths have equal available liquidity. - - // We need a combination of 3 paths: - // From our node to node3 via {node0, node2}, {node7, node2, node4} and {node7, node2}. - // Note that these paths overlap (channels 5, 12, 13). - // Each path will have 100 sats capacity, those channels which - // are used twice will have 200 sats capacity. + // Build a setup where we have three potential paths from us to node3: + // {node0, node2, node4} (channels 1, 3, 6, 11), fee 0 msat, + // {node7, node2, node4} (channels 12, 13, 6, 11), fee 0 msat, and + // {node1} (channel 2, then a new channel 16), fee 1000 msat. + // Note that these paths overlap on channels 6 and 11. + // Each channel will have 100 sats capacity except for 6 and 11, which have 200. // Disable other potential paths. - update_channel(&gossip_sync, &secp_ctx, &our_privkey, UnsignedChannelUpdate { + update_channel(&gossip_sync, &secp_ctx, &privkeys[2], UnsignedChannelUpdate { chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 2, + short_channel_id: 7, timestamp: 2, message_flags: 1, // Only must_be_one channel_flags: 2, @@ -5969,9 +5811,9 @@ mod tests { fee_proportional_millionths: 0, excess_data: Vec::new() }); - update_channel(&gossip_sync, &secp_ctx, &privkeys[2], UnsignedChannelUpdate { + update_channel(&gossip_sync, &secp_ctx, &privkeys[1], UnsignedChannelUpdate { chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 7, + short_channel_id: 4, timestamp: 2, message_flags: 1, // Only must_be_one channel_flags: 2, @@ -6011,31 +5853,30 @@ mod tests { excess_data: Vec::new() }); - // Capacity of 200 sats because this channel will be used by 3rd path as well. - add_channel(&gossip_sync, &secp_ctx, &privkeys[2], &privkeys[3], ChannelFeatures::from_le_bytes(id_to_feature_flags(5)), 5); - update_channel(&gossip_sync, &secp_ctx, &privkeys[2], UnsignedChannelUpdate { + add_channel(&gossip_sync, &secp_ctx, &privkeys[1], &privkeys[3], ChannelFeatures::from_le_bytes(id_to_feature_flags(16)), 16); + update_channel(&gossip_sync, &secp_ctx, &privkeys[1], UnsignedChannelUpdate { chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 5, + short_channel_id: 16, timestamp: 2, message_flags: 1, // Only must_be_one channel_flags: 0, cltv_expiry_delta: 0, htlc_minimum_msat: 0, - htlc_maximum_msat: 200_000, - fee_base_msat: 0, + htlc_maximum_msat: 100_000, + fee_base_msat: 1_000, fee_proportional_millionths: 0, excess_data: Vec::new() }); update_channel(&gossip_sync, &secp_ctx, &privkeys[3], UnsignedChannelUpdate { chain_hash: ChainHash::using_genesis_block(Network::Testnet), - short_channel_id: 5, + short_channel_id: 16, timestamp: 2, message_flags: 1, // Only must_be_one channel_flags: 3, // disable direction 1 cltv_expiry_delta: 0, htlc_minimum_msat: 0, - htlc_maximum_msat: 200_000, - fee_base_msat: 0, + htlc_maximum_msat: 100_000, + fee_base_msat: 1_000, fee_proportional_millionths: 0, excess_data: Vec::new() }); @@ -6051,7 +5892,7 @@ mod tests { channel_flags: 0, cltv_expiry_delta: 0, htlc_minimum_msat: 0, - htlc_maximum_msat: 200_000, + htlc_maximum_msat: 100_000, fee_base_msat: 0, fee_proportional_millionths: 0, excess_data: Vec::new() @@ -6064,7 +5905,7 @@ mod tests { channel_flags: 0, cltv_expiry_delta: 0, htlc_minimum_msat: 0, - htlc_maximum_msat: 200_000, + htlc_maximum_msat: 100_000, fee_base_msat: 0, fee_proportional_millionths: 0, excess_data: Vec::new() @@ -6078,8 +5919,8 @@ mod tests { channel_flags: 0, cltv_expiry_delta: 0, htlc_minimum_msat: 0, - htlc_maximum_msat: 100_000, - fee_base_msat: 1_000, + htlc_maximum_msat: 200_000, + fee_base_msat: 0, fee_proportional_millionths: 0, excess_data: Vec::new() }); @@ -6091,7 +5932,7 @@ mod tests { channel_flags: 0, cltv_expiry_delta: 0, htlc_minimum_msat: 0, - htlc_maximum_msat: 100_000, + htlc_maximum_msat: 200_000, fee_base_msat: 0, fee_proportional_millionths: 0, excess_data: Vec::new() @@ -6101,29 +5942,11 @@ mod tests { // We already limited them to 200 sats (they are used twice for 100 sats). // Nothing to do here. - { - // Now, attempt to route 180 sats. - // Our algorithm should provide us with these 2 paths. - let route_params = RouteParameters::from_payment_params_and_value( - payment_params, 180_000); - let route = get_route(&our_id, &route_params, &network_graph.read_only(), None, - Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap(); - assert_eq!(route.paths.len(), 2); - - let mut total_value_transferred_msat = 0; - let mut total_paid_msat = 0; - for path in &route.paths { - assert_eq!(path.hops.last().unwrap().pubkey, nodes[3]); - total_value_transferred_msat += path.final_value_msat(); - for hop in &path.hops { - total_paid_msat += hop.fee_msat; - } - } - // If we paid fee, this would be higher. - assert_eq!(total_value_transferred_msat, 180_000); - let total_fees_paid = total_paid_msat - total_value_transferred_msat; - assert_eq!(total_fees_paid, 0); - } + let route_params = RouteParameters::from_payment_params_and_value( + payment_params, amt); + let res = get_route(&our_id, &route_params, &network_graph.read_only(), None, + Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes); + res } #[test] From e46489dcdb05b89a9f6d1af4c38eb72898bf7bbb Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 3 Feb 2025 21:30:41 +0000 Subject: [PATCH 051/105] Track `node_counter` in `RouteGraphNode` In a coming commit we'll start calling `add_entries_to_cheapest_to_target_node` without always having a public-graph node entry in order to process last- and first-hops via a common codepath. In order to do so, we always need the `node_counter` for the node, however, and thus we track them in `RouteGraphNode` and pass them through to `add_entries_to_cheapest_to_target_node` here. We also take this opportunity to swap the node preference logic to look at the counters, which is slightly less computational work, though it does require some unrelated test changes. --- lightning/src/ln/reload_tests.rs | 1 + lightning/src/routing/router.rs | 15 ++++++++++----- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 28465a09660..eaeb3e7bac4 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -963,6 +963,7 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { // Ensure that the remaining channel is fully operation and not blocked (and that after a // cycle of commitment updates the payment preimage is ultimately pruned). + nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); send_payment(&nodes[0], &[&nodes[2], &nodes[3]], 100_000); assert!(!get_monitor!(nodes[3], chan_id_not_persisted).get_stored_preimages().contains_key(&payment_hash)); } diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 806f494d1c5..189f2ba498c 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -1164,6 +1164,7 @@ impl_writeable_tlv_based!(RouteHintHop, { #[repr(align(64))] // Force the size to 64 bytes struct RouteGraphNode { node_id: NodeId, + node_counter: u32, score: u64, // The maximum value a yet-to-be-constructed payment path might flow through this node. // This value is upper-bounded by us by: @@ -1178,7 +1179,7 @@ struct RouteGraphNode { impl cmp::Ord for RouteGraphNode { fn cmp(&self, other: &RouteGraphNode) -> cmp::Ordering { - other.score.cmp(&self.score).then_with(|| other.node_id.cmp(&self.node_id)) + other.score.cmp(&self.score).then_with(|| other.node_counter.cmp(&self.node_counter)) } } @@ -2625,6 +2626,7 @@ where L::Target: Logger { if !old_entry.was_processed && new_cost < old_cost { let new_graph_node = RouteGraphNode { node_id: src_node_id, + node_counter: src_node_counter, score: cmp::max(total_fee_msat, path_htlc_minimum_msat).saturating_add(path_penalty_msat), total_cltv_delta: hop_total_cltv_delta, value_contribution_msat, @@ -2703,7 +2705,7 @@ where L::Target: Logger { // meaning how much will be paid in fees after this node (to the best of our knowledge). // This data can later be helpful to optimize routing (pay lower fees). macro_rules! add_entries_to_cheapest_to_target_node { - ( $node: expr, $node_id: expr, $next_hops_value_contribution: expr, + ( $node: expr, $node_counter: expr, $node_id: expr, $next_hops_value_contribution: expr, $next_hops_cltv_delta: expr, $next_hops_path_length: expr ) => { let fee_to_target_msat; let next_hops_path_htlc_minimum_msat; @@ -2843,7 +2845,9 @@ where L::Target: Logger { // If not, targets.pop() will not even let us enter the loop in step 2. None => {}, Some(node) => { - add_entries_to_cheapest_to_target_node!(node, payee, path_value_msat, 0, 0); + add_entries_to_cheapest_to_target_node!( + node, node.node_counter, payee, path_value_msat, 0, 0 + ); }, }); @@ -3071,7 +3075,7 @@ where L::Target: Logger { // Both these cases (and other cases except reaching recommended_value_msat) mean that // paths_collection will be stopped because found_new_path==false. // This is not necessarily a routing failure. - 'path_construction: while let Some(RouteGraphNode { node_id, total_cltv_delta, mut value_contribution_msat, path_length_to_node, .. }) = targets.pop() { + 'path_construction: while let Some(RouteGraphNode { node_id, node_counter, total_cltv_delta, mut value_contribution_msat, path_length_to_node, .. }) = targets.pop() { // Since we're going payee-to-payer, hitting our node as a target means we should stop // traversing the graph and arrange the path out of what we found. @@ -3209,7 +3213,8 @@ where L::Target: Logger { match network_nodes.get(&node_id) { None => {}, Some(node) => { - add_entries_to_cheapest_to_target_node!(node, node_id, + add_entries_to_cheapest_to_target_node!( + node, node_counter, node_id, value_contribution_msat, total_cltv_delta, path_length_to_node); }, From b39aef6254bed2ce4e19f7c7e7b1d6e9c76bf5a5 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 3 Feb 2025 21:40:04 +0000 Subject: [PATCH 052/105] Prefer higher-value, shorter equal-cost paths when routing This likely only impacts very rare edge cases, but if we have two equal-cost paths, we should likely prefer ones which contribute more value (avoiding cases where we use paths which are amount-limited but equal fee to higher-amount paths) and then paths with fewer hops (which may complete faster). It does make test behavior more robust against router changes, which comes in handy over the coming commits. --- lightning/src/routing/router.rs | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 189f2ba498c..0e39110641f 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -1179,7 +1179,10 @@ struct RouteGraphNode { impl cmp::Ord for RouteGraphNode { fn cmp(&self, other: &RouteGraphNode) -> cmp::Ordering { - other.score.cmp(&self.score).then_with(|| other.node_counter.cmp(&self.node_counter)) + other.score.cmp(&self.score) + .then_with(|| self.value_contribution_msat.cmp(&other.value_contribution_msat)) + .then_with(|| other.path_length_to_node.cmp(&self.path_length_to_node)) + .then_with(|| other.node_counter.cmp(&self.node_counter)) } } @@ -1809,11 +1812,7 @@ struct PathBuildingHop<'a> { /// The value will be actually deducted from the counterparty balance on the previous link. hop_use_fee_msat: u64, - #[cfg(all(not(ldk_bench), any(test, fuzzing)))] - // In tests, we apply further sanity checks on cases where we skip nodes we already processed - // to ensure it is specifically in cases where the fee has gone down because of a decrease in - // value_contribution_msat, which requires tracking it here. See comments below where it is - // used for more info. + /// The quantity of funds we're willing to route over this channel value_contribution_msat: u64, } @@ -1834,9 +1833,7 @@ impl<'a> core::fmt::Debug for PathBuildingHop<'a> { .field("total_fee_msat - (next_hops_fee_msat + hop_use_fee_msat)", &(&self.total_fee_msat.saturating_sub(self.next_hops_fee_msat).saturating_sub(self.hop_use_fee_msat))) .field("path_penalty_msat", &self.path_penalty_msat) .field("path_htlc_minimum_msat", &self.path_htlc_minimum_msat) - .field("cltv_expiry_delta", &self.candidate.cltv_expiry_delta()); - #[cfg(all(not(ldk_bench), any(test, fuzzing)))] - let debug_struct = debug_struct + .field("cltv_expiry_delta", &self.candidate.cltv_expiry_delta()) .field("value_contribution_msat", &self.value_contribution_msat); debug_struct.finish() } @@ -2546,7 +2543,6 @@ where L::Target: Logger { path_penalty_msat: u64::max_value(), was_processed: false, is_first_hop_target: false, - #[cfg(all(not(ldk_bench), any(test, fuzzing)))] value_contribution_msat, }); dist_entry.as_mut().unwrap() @@ -2622,8 +2618,11 @@ where L::Target: Logger { .saturating_add(old_entry.path_penalty_msat); let new_cost = cmp::max(total_fee_msat, path_htlc_minimum_msat) .saturating_add(path_penalty_msat); + let should_replace = + new_cost < old_cost + || (new_cost == old_cost && old_entry.value_contribution_msat < value_contribution_msat); - if !old_entry.was_processed && new_cost < old_cost { + if !old_entry.was_processed && should_replace { let new_graph_node = RouteGraphNode { node_id: src_node_id, node_counter: src_node_counter, @@ -2640,10 +2639,7 @@ where L::Target: Logger { old_entry.fee_msat = 0; // This value will be later filled with hop_use_fee_msat of the following channel old_entry.path_htlc_minimum_msat = path_htlc_minimum_msat; old_entry.path_penalty_msat = path_penalty_msat; - #[cfg(all(not(ldk_bench), any(test, fuzzing)))] - { - old_entry.value_contribution_msat = value_contribution_msat; - } + old_entry.value_contribution_msat = value_contribution_msat; hop_contribution_amt_msat = Some(value_contribution_msat); } else if old_entry.was_processed && new_cost < old_cost { #[cfg(all(not(ldk_bench), any(test, fuzzing)))] @@ -2814,7 +2810,6 @@ where L::Target: Logger { path_penalty_msat: u64::max_value(), was_processed: false, is_first_hop_target: true, - #[cfg(all(not(ldk_bench), any(test, fuzzing)))] value_contribution_msat: 0, }); } From 1976fb83109c0da8c48dc0fcdd8408cb13540a68 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 3 Feb 2025 22:21:18 +0000 Subject: [PATCH 053/105] Move last-hop route handling to the common "normal" hop codepath When we handle the unblinded last-hop route hints from an invoice, we had a good bit of code dedicated to handling fee propagation through the (potentially) multiple last-hops and connecting them to potentially directly-connected first-hops. This was a good bit of code that was almost never used, and it turns out was also buggy - we could process a route hint with multiple hops, committing to one path through nodes A, B, to C, then process another route hint (or public channel) which changes our best path from B to C, making the A entry invalid. Here we remove the whole maze, utilizing the normal hop-processing logic in `add_entries_to_cheapest_to_target_node` for last-hops as well. It requires tracking which nodes connect to last-hop hints similar to the way we do with `is_first_hop_target` in `PathBuildingHop`, storing the `CandidateRouteHop`s in a new map, and always calling `add_entries_to_cheapest_to_target_node` on the payee node, whether its public or not. --- lightning/src/routing/router.rs | 381 +++++++++++++------------------- 1 file changed, 150 insertions(+), 231 deletions(-) diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 0e39110641f..005f3a19ef0 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -1792,6 +1792,8 @@ struct PathBuildingHop<'a> { /// updated after being initialized - it is set at the start of a route-finding pass and only /// read thereafter. is_first_hop_target: bool, + /// Identical to the above, but for handling unblinded last-hops rather than first-hops. + is_last_hop_target: bool, /// Used to compare channels when choosing the for routing. /// Includes paying for the use of a hop and the following hops, as well as /// an estimated cost of reaching this hop. @@ -1827,6 +1829,7 @@ impl<'a> core::fmt::Debug for PathBuildingHop<'a> { .field("target_node_id", &self.candidate.target()) .field("short_channel_id", &self.candidate.short_channel_id()) .field("is_first_hop_target", &self.is_first_hop_target) + .field("is_last_hop_target", &self.is_last_hop_target) .field("total_fee_msat", &self.total_fee_msat) .field("next_hops_fee_msat", &self.next_hops_fee_msat) .field("hop_use_fee_msat", &self.hop_use_fee_msat) @@ -2264,8 +2267,10 @@ where L::Target: Logger { // Step (1). Prepare first and last hop targets. // - // First cache all our direct channels so that we can insert them in the heap at startup. - // Then process any blinded routes, resolving their introduction node and caching it. + // For unblinded first- and last-hop channels, cache them in maps so that we can detect them as + // we walk the graph and incorporate them into our candidate set. + // For blinded last-hop paths, look up their introduction point and cache the node counters + // identifying them. let mut first_hop_targets: HashMap<_, (Vec<&ChannelDetails>, u32)> = hash_map_with_capacity(if first_hops.is_some() { first_hops.as_ref().unwrap().len() } else { 0 }); if let Some(hops) = first_hops { @@ -2297,6 +2302,48 @@ where L::Target: Logger { &payment_params, &node_counters, network_graph, &logger, our_node_id, &first_hop_targets, )?; + let mut last_hop_candidates = + hash_map_with_capacity(payment_params.payee.unblinded_route_hints().len()); + for route in payment_params.payee.unblinded_route_hints().iter() + .filter(|route| !route.0.is_empty()) + { + let hop_iter = route.0.iter().rev(); + let prev_hop_iter = core::iter::once(&maybe_dummy_payee_pk).chain( + route.0.iter().skip(1).rev().map(|hop| &hop.src_node_id)); + + for (hop, prev_hop_id) in hop_iter.zip(prev_hop_iter) { + let (target, private_target_node_counter) = + node_counters.private_node_counter_from_pubkey(&prev_hop_id) + .expect("node_counter_from_pubkey is called on all unblinded_route_hints keys above, so is always Some here"); + let (_src_id, private_source_node_counter) = + node_counters.private_node_counter_from_pubkey(&hop.src_node_id) + .expect("node_counter_from_pubkey is called on all unblinded_route_hints keys above, so is always Some here"); + + if let Some((first_channels, _)) = first_hop_targets.get(target) { + if first_channels.iter().any(|d| d.outbound_scid_alias == Some(hop.short_channel_id)) { + log_trace!(logger, "Ignoring route hint with SCID {} (and any previous) due to it being a direct channel of ours.", + hop.short_channel_id); + break; + } + } + + let candidate = network_channels + .get(&hop.short_channel_id) + .and_then(|channel| channel.as_directed_to(target)) + .map(|(info, _)| CandidateRouteHop::PublicHop(PublicHopCandidate { + info, + short_channel_id: hop.short_channel_id, + })) + .unwrap_or_else(|| CandidateRouteHop::PrivateHop(PrivateHopCandidate { + hint: hop, target_node_id: target, + source_node_counter: *private_source_node_counter, + target_node_counter: *private_target_node_counter, + })); + + last_hop_candidates.entry(private_target_node_counter).or_insert_with(Vec::new).push(candidate); + } + } + // The main heap containing all candidate next-hops sorted by their score (max(fee, // htlc_minimum)). Ideally this would be a heap which allowed cheap score reduction instead of // adding duplicate entries when we find a better path to a given node. @@ -2543,6 +2590,7 @@ where L::Target: Logger { path_penalty_msat: u64::max_value(), was_processed: false, is_first_hop_target: false, + is_last_hop_target: false, value_contribution_msat, }); dist_entry.as_mut().unwrap() @@ -2706,14 +2754,15 @@ where L::Target: Logger { let fee_to_target_msat; let next_hops_path_htlc_minimum_msat; let next_hops_path_penalty_msat; - let is_first_hop_target; - let skip_node = if let Some(elem) = &mut dist[$node.node_counter as usize] { + let (is_first_hop_target, is_last_hop_target); + let skip_node = if let Some(elem) = &mut dist[$node_counter as usize] { let was_processed = elem.was_processed; elem.was_processed = true; fee_to_target_msat = elem.total_fee_msat; next_hops_path_htlc_minimum_msat = elem.path_htlc_minimum_msat; next_hops_path_penalty_msat = elem.path_penalty_msat; is_first_hop_target = elem.is_first_hop_target; + is_last_hop_target = elem.is_last_hop_target; was_processed } else { // Entries are added to dist in add_entry!() when there is a channel from a node. @@ -2725,17 +2774,28 @@ where L::Target: Logger { next_hops_path_htlc_minimum_msat = 0; next_hops_path_penalty_msat = 0; is_first_hop_target = false; + is_last_hop_target = false; false }; if !skip_node { + if is_last_hop_target { + if let Some(candidates) = last_hop_candidates.get(&$node_counter) { + for candidate in candidates { + add_entry!(candidate, fee_to_target_msat, + $next_hops_value_contribution, + next_hops_path_htlc_minimum_msat, next_hops_path_penalty_msat, + $next_hops_cltv_delta, $next_hops_path_length); + } + } + } if is_first_hop_target { if let Some((first_channels, peer_node_counter)) = first_hop_targets.get(&$node_id) { for details in first_channels { - debug_assert_eq!(*peer_node_counter, $node.node_counter); + debug_assert_eq!(*peer_node_counter, $node_counter); let candidate = CandidateRouteHop::FirstHop(FirstHopCandidate { details, payer_node_id: &our_node_id, payer_node_counter, - target_node_counter: $node.node_counter, + target_node_counter: $node_counter, }); add_entry!(&candidate, fee_to_target_msat, $next_hops_value_contribution, @@ -2745,29 +2805,31 @@ where L::Target: Logger { } } - let features = if let Some(node_info) = $node.announcement_info.as_ref() { - &node_info.features() - } else { - &default_node_features - }; + if let Some(node) = $node { + let features = if let Some(node_info) = node.announcement_info.as_ref() { + &node_info.features() + } else { + &default_node_features + }; - if !features.requires_unknown_bits() { - for chan_id in $node.channels.iter() { - let chan = network_channels.get(chan_id).unwrap(); - if !chan.features.requires_unknown_bits() { - if let Some((directed_channel, source)) = chan.as_directed_to(&$node_id) { - if first_hops.is_none() || *source != our_node_id { - if directed_channel.direction().enabled { - let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate { - info: directed_channel, - short_channel_id: *chan_id, - }); - add_entry!(&candidate, - fee_to_target_msat, - $next_hops_value_contribution, - next_hops_path_htlc_minimum_msat, - next_hops_path_penalty_msat, - $next_hops_cltv_delta, $next_hops_path_length); + if !features.requires_unknown_bits() { + for chan_id in node.channels.iter() { + let chan = network_channels.get(chan_id).unwrap(); + if !chan.features.requires_unknown_bits() { + if let Some((directed_channel, source)) = chan.as_directed_to(&$node_id) { + if first_hops.is_none() || *source != our_node_id { + if directed_channel.direction().enabled { + let candidate = CandidateRouteHop::PublicHop(PublicHopCandidate { + info: directed_channel, + short_channel_id: *chan_id, + }); + add_entry!(&candidate, + fee_to_target_msat, + $next_hops_value_contribution, + next_hops_path_htlc_minimum_msat, + next_hops_path_penalty_msat, + $next_hops_cltv_delta, $next_hops_path_length); + } } } } @@ -2788,13 +2850,23 @@ where L::Target: Logger { for e in dist.iter_mut() { *e = None; } + + // Step (2). + // Add entries for first-hop and last-hop channel hints to `dist` and add the payee node as + // the best entry via `add_entry`. + // For first- and last-hop hints we need only add dummy entries in `dist` with the relevant + // flags set. As we walk the graph in `add_entries_to_cheapest_to_target_node` we'll check + // those flags and add the channels described by the hints. + // We then either add the payee using `add_entries_to_cheapest_to_target_node` or add the + // blinded paths to the payee using `add_entry`, filling `targets` and setting us up for + // our graph walk. for (_, (chans, peer_node_counter)) in first_hop_targets.iter() { // In order to avoid looking up whether each node is a first-hop target, we store a // dummy entry in dist for each first-hop target, allowing us to do this lookup for // free since we're already looking at the `was_processed` flag. // - // Note that all the fields (except `is_first_hop_target`) will be overwritten whenever - // we find a path to the target, so are left as dummies here. + // Note that all the fields (except `is_{first,last}_hop_target`) will be overwritten + // whenever we find a path to the target, so are left as dummies here. dist[*peer_node_counter as usize] = Some(PathBuildingHop { candidate: CandidateRouteHop::FirstHop(FirstHopCandidate { details: &chans[0], @@ -2810,46 +2882,56 @@ where L::Target: Logger { path_penalty_msat: u64::max_value(), was_processed: false, is_first_hop_target: true, + is_last_hop_target: false, value_contribution_msat: 0, }); } - hit_minimum_limit = false; - - // If first hop is a private channel and the only way to reach the payee, this is the only - // place where it could be added. - payee_node_id_opt.map(|payee| first_hop_targets.get(&payee).map(|(first_channels, peer_node_counter)| { - debug_assert_eq!(*peer_node_counter, payee_node_counter); - for details in first_channels { - let candidate = CandidateRouteHop::FirstHop(FirstHopCandidate { - details, payer_node_id: &our_node_id, payer_node_counter, - target_node_counter: payee_node_counter, + for (target_node_counter, candidates) in last_hop_candidates.iter() { + // In order to avoid looking up whether each node is a last-hop target, we store a + // dummy entry in dist for each last-hop target, allowing us to do this lookup for + // free since we're already looking at the `was_processed` flag. + // + // Note that all the fields (except `is_{first,last}_hop_target`) will be overwritten + // whenever we find a path to the target, so are left as dummies here. + debug_assert!(!candidates.is_empty()); + if candidates.is_empty() { continue } + let entry = &mut dist[**target_node_counter as usize]; + if let Some(hop) = entry { + hop.is_last_hop_target = true; + } else { + *entry = Some(PathBuildingHop { + candidate: candidates[0].clone(), + fee_msat: 0, + next_hops_fee_msat: u64::max_value(), + hop_use_fee_msat: u64::max_value(), + total_fee_msat: u64::max_value(), + path_htlc_minimum_msat: u64::max_value(), + path_penalty_msat: u64::max_value(), + was_processed: false, + is_first_hop_target: false, + is_last_hop_target: true, + value_contribution_msat: 0, }); - let added = add_entry!(&candidate, 0, path_value_msat, - 0, 0u64, 0, 0).is_some(); - log_trace!(logger, "{} direct route to payee via {}", - if added { "Added" } else { "Skipped" }, LoggedCandidateHop(&candidate)); } - })); + } + hit_minimum_limit = false; - // Add the payee as a target, so that the payee-to-payer - // search algorithm knows what to start with. - payee_node_id_opt.map(|payee| match network_nodes.get(&payee) { - // The payee is not in our network graph, so nothing to add here. - // There is still a chance of reaching them via last_hops though, - // so don't yet fail the payment here. - // If not, targets.pop() will not even let us enter the loop in step 2. - None => {}, - Some(node) => { - add_entries_to_cheapest_to_target_node!( - node, node.node_counter, payee, path_value_msat, 0, 0 - ); - }, - }); + if let Some(payee) = payee_node_id_opt { + if let Some(entry) = &mut dist[payee_node_counter as usize] { + // If we built a dummy entry above we need to reset the values to represent 0 fee + // from the target "to the target". + entry.next_hops_fee_msat = 0; + entry.hop_use_fee_msat = 0; + entry.total_fee_msat = 0; + entry.path_htlc_minimum_msat = 0; + entry.path_penalty_msat = 0; + entry.value_contribution_msat = path_value_msat; + } + add_entries_to_cheapest_to_target_node!( + network_nodes.get(&payee), payee_node_counter, payee, path_value_msat, 0, 0 + ); + } - // Step (2). - // If a caller provided us with last hops, add them to routing targets. Since this happens - // earlier than general path finding, they will be somewhat prioritized, although currently - // it matters only if the fees are exactly the same. debug_assert_eq!( payment_params.payee.blinded_route_hints().len(), introduction_node_id_cache.len(), @@ -2895,165 +2977,6 @@ where L::Target: Logger { } } } - for route in payment_params.payee.unblinded_route_hints().iter() - .filter(|route| !route.0.is_empty()) - { - let first_hop_src_id = NodeId::from_pubkey(&route.0.first().unwrap().src_node_id); - let first_hop_src_is_reachable = - // Only add the hops in this route to our candidate set if either we are part of - // the first hop, we have a direct channel to the first hop, or the first hop is in - // the regular network graph. - our_node_id == first_hop_src_id || - first_hop_targets.get(&first_hop_src_id).is_some() || - network_nodes.get(&first_hop_src_id).is_some(); - if first_hop_src_is_reachable { - // We start building the path from reverse, i.e., from payee - // to the first RouteHintHop in the path. - let hop_iter = route.0.iter().rev(); - let prev_hop_iter = core::iter::once(&maybe_dummy_payee_pk).chain( - route.0.iter().skip(1).rev().map(|hop| &hop.src_node_id)); - let mut hop_used = true; - let mut aggregate_next_hops_fee_msat: u64 = 0; - let mut aggregate_next_hops_path_htlc_minimum_msat: u64 = 0; - let mut aggregate_next_hops_path_penalty_msat: u64 = 0; - let mut aggregate_next_hops_cltv_delta: u32 = 0; - let mut aggregate_next_hops_path_length: u8 = 0; - let mut aggregate_path_contribution_msat = path_value_msat; - - for (idx, (hop, prev_hop_id)) in hop_iter.zip(prev_hop_iter).enumerate() { - let (target, private_target_node_counter) = - node_counters.private_node_counter_from_pubkey(&prev_hop_id) - .expect("node_counter_from_pubkey is called on all unblinded_route_hints keys during setup, so is always Some here"); - let (_src_id, private_source_node_counter) = - node_counters.private_node_counter_from_pubkey(&hop.src_node_id) - .expect("node_counter_from_pubkey is called on all unblinded_route_hints keys during setup, so is always Some here"); - - if let Some((first_channels, _)) = first_hop_targets.get(target) { - if first_channels.iter().any(|d| d.outbound_scid_alias == Some(hop.short_channel_id)) { - log_trace!(logger, "Ignoring route hint with SCID {} (and any previous) due to it being a direct channel of ours.", - hop.short_channel_id); - break; - } - } - - let candidate = network_channels - .get(&hop.short_channel_id) - .and_then(|channel| channel.as_directed_to(target)) - .map(|(info, _)| CandidateRouteHop::PublicHop(PublicHopCandidate { - info, - short_channel_id: hop.short_channel_id, - })) - .unwrap_or_else(|| CandidateRouteHop::PrivateHop(PrivateHopCandidate { - hint: hop, target_node_id: target, - source_node_counter: *private_source_node_counter, - target_node_counter: *private_target_node_counter, - })); - - if let Some(hop_used_msat) = add_entry!(&candidate, - aggregate_next_hops_fee_msat, aggregate_path_contribution_msat, - aggregate_next_hops_path_htlc_minimum_msat, aggregate_next_hops_path_penalty_msat, - aggregate_next_hops_cltv_delta, aggregate_next_hops_path_length) - { - aggregate_path_contribution_msat = hop_used_msat; - } else { - // If this hop was not used then there is no use checking the preceding - // hops in the RouteHint. We can break by just searching for a direct - // channel between last checked hop and first_hop_targets. - hop_used = false; - } - - let used_liquidity_msat = used_liquidities - .get(&candidate.id()).copied() - .unwrap_or(0); - let channel_usage = ChannelUsage { - amount_msat: final_value_msat + aggregate_next_hops_fee_msat, - inflight_htlc_msat: used_liquidity_msat, - effective_capacity: candidate.effective_capacity(), - }; - let channel_penalty_msat = scorer.channel_penalty_msat( - &candidate, channel_usage, score_params - ); - aggregate_next_hops_path_penalty_msat = aggregate_next_hops_path_penalty_msat - .saturating_add(channel_penalty_msat); - - aggregate_next_hops_cltv_delta = aggregate_next_hops_cltv_delta - .saturating_add(hop.cltv_expiry_delta as u32); - - aggregate_next_hops_path_length = aggregate_next_hops_path_length - .saturating_add(1); - - // Searching for a direct channel between last checked hop and first_hop_targets - if let Some((first_channels, peer_node_counter)) = first_hop_targets.get_mut(target) { - sort_first_hop_channels( - first_channels, &used_liquidities, recommended_value_msat, our_node_pubkey - ); - for details in first_channels { - let first_hop_candidate = CandidateRouteHop::FirstHop(FirstHopCandidate { - details, payer_node_id: &our_node_id, payer_node_counter, - target_node_counter: *peer_node_counter, - }); - add_entry!(&first_hop_candidate, - aggregate_next_hops_fee_msat, aggregate_path_contribution_msat, - aggregate_next_hops_path_htlc_minimum_msat, aggregate_next_hops_path_penalty_msat, - aggregate_next_hops_cltv_delta, aggregate_next_hops_path_length); - } - } - - if !hop_used { - break; - } - - // In the next values of the iterator, the aggregate fees already reflects - // the sum of value sent from payer (final_value_msat) and routing fees - // for the last node in the RouteHint. We need to just add the fees to - // route through the current node so that the preceding node (next iteration) - // can use it. - let hops_fee = compute_fees(aggregate_next_hops_fee_msat + final_value_msat, hop.fees) - .map_or(None, |inc| inc.checked_add(aggregate_next_hops_fee_msat)); - aggregate_next_hops_fee_msat = if let Some(val) = hops_fee { val } else { break; }; - - // The next channel will need to relay this channel's min_htlc *plus* the fees taken by - // this route hint's source node to forward said min over this channel. - aggregate_next_hops_path_htlc_minimum_msat = { - let curr_htlc_min = cmp::max( - candidate.htlc_minimum_msat(), aggregate_next_hops_path_htlc_minimum_msat - ); - let curr_htlc_min_fee = if let Some(val) = compute_fees(curr_htlc_min, hop.fees) { val } else { break }; - if let Some(min) = curr_htlc_min.checked_add(curr_htlc_min_fee) { min } else { break } - }; - - if idx == route.0.len() - 1 { - // The last hop in this iterator is the first hop in - // overall RouteHint. - // If this hop connects to a node with which we have a direct channel, - // ignore the network graph and, if the last hop was added, add our - // direct channel to the candidate set. - // - // Note that we *must* check if the last hop was added as `add_entry` - // always assumes that the third argument is a node to which we have a - // path. - if let Some((first_channels, peer_node_counter)) = first_hop_targets.get_mut(&NodeId::from_pubkey(&hop.src_node_id)) { - sort_first_hop_channels( - first_channels, &used_liquidities, recommended_value_msat, our_node_pubkey - ); - for details in first_channels { - let first_hop_candidate = CandidateRouteHop::FirstHop(FirstHopCandidate { - details, payer_node_id: &our_node_id, payer_node_counter, - target_node_counter: *peer_node_counter, - }); - add_entry!(&first_hop_candidate, - aggregate_next_hops_fee_msat, - aggregate_path_contribution_msat, - aggregate_next_hops_path_htlc_minimum_msat, - aggregate_next_hops_path_penalty_msat, - aggregate_next_hops_cltv_delta, - aggregate_next_hops_path_length); - } - } - } - } - } - } log_trace!(logger, "Starting main path collection loop with {} nodes pre-filled from first/last hops.", targets.len()); @@ -3205,15 +3128,11 @@ where L::Target: Logger { // Otherwise, since the current target node is not us, // keep "unrolling" the payment graph from payee to payer by // finding a way to reach the current target from the payer side. - match network_nodes.get(&node_id) { - None => {}, - Some(node) => { - add_entries_to_cheapest_to_target_node!( - node, node_counter, node_id, - value_contribution_msat, - total_cltv_delta, path_length_to_node); - }, - } + add_entries_to_cheapest_to_target_node!( + network_nodes.get(&node_id), node_counter, node_id, + value_contribution_msat, + total_cltv_delta, path_length_to_node + ); } if !allow_mpp { From 1beefb1aeb290bcfc6e27155667bbbe0d9b0cd91 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 2 Feb 2025 23:52:49 +0000 Subject: [PATCH 054/105] Add direct hops to intros after all blinded paths in pathfinding When we do pathfinding with blinded paths, we start each pathfinding iteration by inserting all the blinded paths into our nodes map as last-hops to the destination. As we do that, we check if any of the introduction points happen to be nodes we have direct chanels with, as we want to use the local info for such channels and support finding a path even if that channel is not publicly announced. However, as we iterate the blinded paths, we may find a second blinded path from the same introduction point which we prefer over the first. If this happens, we would already have added info from us over the local channel to that intro point and end up with calculations for the first hop to a blinded path that we no longer prefer. This is ultimately fixed here in two ways: (a) we process the first-hop channels to blinded path introduction points in a separate loop after we've processed all blinded paths, ensuring we only ever consider a channel to the blinded path we will ultimately prefer. (b) In the next commit, we add we add a new tracking bool in `PathBuildingHop` called `best_path_from_hop_selected` which we set when we process a channel backwards from a node, indicating that we've committed to the best path to the node and check when we add a new path to a node. This would have resulted in a much earlier debug-assertion in fuzzing or several tests. --- lightning/src/routing/router.rs | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 005f3a19ef0..2de85384fd2 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -2937,6 +2937,7 @@ where L::Target: Logger { introduction_node_id_cache.len(), "introduction_node_id_cache was built by iterating the blinded_route_hints, so they should be the same len" ); + let mut blind_intros_added = hash_map_with_capacity(payment_params.payee.blinded_route_hints().len()); for (hint_idx, hint) in payment_params.payee.blinded_route_hints().iter().enumerate() { // Only add the hops in this route to our candidate set if either // we have a direct channel to the first hop or the first hop is @@ -2951,12 +2952,21 @@ where L::Target: Logger { } else { CandidateRouteHop::Blinded(BlindedPathCandidate { source_node_counter, source_node_id, hint, hint_idx }) }; - let mut path_contribution_msat = path_value_msat; if let Some(hop_used_msat) = add_entry!(&candidate, - 0, path_contribution_msat, 0, 0_u64, 0, 0) + 0, path_value_msat, 0, 0_u64, 0, 0) { - path_contribution_msat = hop_used_msat; + blind_intros_added.insert(source_node_id, (hop_used_msat, candidate)); } else { continue } + } + // If we added a blinded path from an introduction node to the destination, where the + // introduction node is one of our direct peers, we need to scan our `first_channels` + // to detect this. However, doing so immediately after calling `add_entry`, above, could + // result in incorrect behavior if we, in a later loop iteration, update the fee from the + // same introduction point to the destination (due to a different blinded path with the + // same introduction point having a lower score). + // Thus, we track the nodes that we added paths from in `blind_intros_added` and scan for + // introduction points we have a channel with after processing all blinded paths. + for (source_node_id, (path_contribution_msat, candidate)) in blind_intros_added { if let Some((first_channels, peer_node_counter)) = first_hop_targets.get_mut(source_node_id) { sort_first_hop_channels( first_channels, &used_liquidities, recommended_value_msat, our_node_pubkey From ae4279d8d4903056aeedd6c38e67dccec870a8a6 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 3 Feb 2025 22:06:27 +0000 Subject: [PATCH 055/105] Add `PathBuildingHop::best_path_from_hop_selected` When we process a path backwards from a node during pathfinding, we implicitly commit to the path up to that node. Any changes to the preferred path up to that node will make the newly processed path's state invalid. In the previous few commits we fixed cases for this in last-hop paths (both blinded and unblinded). Here we add assertions to enforce this, tracked in a new bool in `PathBuildingHop`. --- lightning/src/routing/router.rs | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 2de85384fd2..5e3c4cc9241 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -1784,6 +1784,12 @@ struct PathBuildingHop<'a> { /// decrease as well. Thus, we have to explicitly track which nodes have been processed and /// avoid processing them again. was_processed: bool, + /// If we've already processed a channel backwards from a target node, we shouldn't update our + /// selected best path from that node to the destination. This should never happen, but with + /// multiple codepaths processing channels we've had issues here in the past, so in debug-mode + /// we track it and assert on it when processing a node. + #[cfg(all(not(ldk_bench), any(test, fuzzing)))] + best_path_from_hop_selected: bool, /// When processing a node as the next best-score candidate, we want to quickly check if it is /// a direct counterparty of ours, using our local channel information immediately if we can. /// @@ -2427,6 +2433,19 @@ where L::Target: Logger { // We "return" whether we updated the path at the end, and how much we can route via // this channel, via this: let mut hop_contribution_amt_msat = None; + + #[cfg(all(not(ldk_bench), any(test, fuzzing)))] + if let Some(counter) = $candidate.target_node_counter() { + // Once we are adding paths backwards from a given target, we've selected the best + // path from that target to the destination and it should no longer change. We thus + // set the best-path selected flag and check that it doesn't change below. + if let Some(node) = &mut dist[counter as usize] { + node.best_path_from_hop_selected = true; + } else if counter != payee_node_counter { + panic!("No dist entry for target node counter {}", counter); + } + } + // Channels to self should not be used. This is more of belt-and-suspenders, because in // practice these cases should be caught earlier: // - for regular channels at channel announcement (TODO) @@ -2591,6 +2610,8 @@ where L::Target: Logger { was_processed: false, is_first_hop_target: false, is_last_hop_target: false, + #[cfg(all(not(ldk_bench), any(test, fuzzing)))] + best_path_from_hop_selected: false, value_contribution_msat, }); dist_entry.as_mut().unwrap() @@ -2671,6 +2692,11 @@ where L::Target: Logger { || (new_cost == old_cost && old_entry.value_contribution_msat < value_contribution_msat); if !old_entry.was_processed && should_replace { + #[cfg(all(not(ldk_bench), any(test, fuzzing)))] + { + assert!(!old_entry.best_path_from_hop_selected); + } + let new_graph_node = RouteGraphNode { node_id: src_node_id, node_counter: src_node_counter, @@ -2884,6 +2910,8 @@ where L::Target: Logger { is_first_hop_target: true, is_last_hop_target: false, value_contribution_msat: 0, + #[cfg(all(not(ldk_bench), any(test, fuzzing)))] + best_path_from_hop_selected: false, }); } for (target_node_counter, candidates) in last_hop_candidates.iter() { @@ -2911,6 +2939,8 @@ where L::Target: Logger { is_first_hop_target: false, is_last_hop_target: true, value_contribution_msat: 0, + #[cfg(all(not(ldk_bench), any(test, fuzzing)))] + best_path_from_hop_selected: false, }); } } From 1788f80a0758e7913a3915f6c4562ae8f832c3ea Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 10 Feb 2025 20:22:28 +0000 Subject: [PATCH 056/105] Replace a few router `expect`s with `debug_assert` + `Err`-returns The router is a somewhat complicated beast, and though the last few commits removed some code from it, a complicated beast it remains. Thus, having `expect`s in it is somewhat risky, so we take this opportunity to replace some of them with `debug_assert!(false)`s and an `Err`-return. --- lightning/src/routing/router.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 5e3c4cc9241..8425b5b55c3 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -2320,10 +2320,16 @@ where L::Target: Logger { for (hop, prev_hop_id) in hop_iter.zip(prev_hop_iter) { let (target, private_target_node_counter) = node_counters.private_node_counter_from_pubkey(&prev_hop_id) - .expect("node_counter_from_pubkey is called on all unblinded_route_hints keys above, so is always Some here"); + .ok_or_else(|| { + debug_assert!(false); + LightningError { err: "We should always have private target node counters available".to_owned(), action: ErrorAction::IgnoreError } + })?; let (_src_id, private_source_node_counter) = node_counters.private_node_counter_from_pubkey(&hop.src_node_id) - .expect("node_counter_from_pubkey is called on all unblinded_route_hints keys above, so is always Some here"); + .ok_or_else(|| { + debug_assert!(false); + LightningError { err: "We should always have private source node counters available".to_owned(), action: ErrorAction::IgnoreError } + })?; if let Some((first_channels, _)) = first_hop_targets.get(target) { if first_channels.iter().any(|d| d.outbound_scid_alias == Some(hop.short_channel_id)) { From bd56ddf9db7a1a37de085098968ac28e4d9de148 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 10 Feb 2025 20:26:24 +0000 Subject: [PATCH 057/105] More completely ignore route hints which are for our own channels When we see a channel come into the router as a route-hint, but its for a direct channel of ours, we'd like to ignore the route-hint as we have more information in the first-hop channel info. We do this by matching SCIDs, but only considered outbound SCID aliases. Here we change to consider both outbound SCID aliases and the full channel SCID, which some nodes may use in their invoices. --- lightning/src/routing/router.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 8425b5b55c3..079b83563c3 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -2332,7 +2332,9 @@ where L::Target: Logger { })?; if let Some((first_channels, _)) = first_hop_targets.get(target) { - if first_channels.iter().any(|d| d.outbound_scid_alias == Some(hop.short_channel_id)) { + let matches_an_scid = |d: &&ChannelDetails| + d.outbound_scid_alias == Some(hop.short_channel_id) || d.short_channel_id == Some(hop.short_channel_id); + if first_channels.iter().any(matches_an_scid) { log_trace!(logger, "Ignoring route hint with SCID {} (and any previous) due to it being a direct channel of ours.", hop.short_channel_id); break; From fa8d59934b8b8b1d1b0c23ffb3cb9a83f7338161 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Fri, 24 Jan 2025 13:14:06 +0100 Subject: [PATCH 058/105] Add SemVer compatibility checks to CI We recently introduced release branches that need to remain backwards compatible. However, even small changes to item visibility during backporting fixes might introduce SemVer violations (see https://doc.rust-lang.org/cargo/reference/semver.html#change-categories for a list of changs that would be considered major/minor). To make sure we don't accidentally introduce such changes in the backports, we here add a new `semver-checks` CI job that utilizes `cargo-semver-checks` (https://github.com/obi1kenobi/cargo-semver-checks), and have it run on any push or pull requests towards anything else but `main`/`master` (i.e., all feature branches to come). --- .github/workflows/semver.yml | 57 ++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 .github/workflows/semver.yml diff --git a/.github/workflows/semver.yml b/.github/workflows/semver.yml new file mode 100644 index 00000000000..73723fe34c3 --- /dev/null +++ b/.github/workflows/semver.yml @@ -0,0 +1,57 @@ +name: SemVer checks +on: + push: + branches-ignore: + - master + pull_request: + branches-ignore: + - master + +jobs: + semver-checks: + runs-on: ubuntu-latest + steps: + - name: Checkout source code + uses: actions/checkout@v4 + - name: Install Rust stable toolchain + run: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain stable + rustup override set stable + - name: Check SemVer with default features + uses: obi1kenobi/cargo-semver-checks-action@v2 + with: + feature-group: default-features + - name: Check SemVer *without* default features + uses: obi1kenobi/cargo-semver-checks-action@v2 + with: + feature-group: only-explicit-features + - name: Check lightning-background-processor SemVer + uses: obi1kenobi/cargo-semver-checks-action@v2 + with: + package: lightning-background-processor + feature-group: only-explicit-features + features: futures + - name: Check lightning-block-sync SemVer + uses: obi1kenobi/cargo-semver-checks-action@v2 + with: + package: lightning-block-sync + feature-group: only-explicit-features + features: rpc-client,rest-client + - name: Check lightning-transaction-sync electrum SemVer + uses: obi1kenobi/cargo-semver-checks-action@v2 + with: + manifest-path: lightning-transaction-sync/Cargo.toml + feature-group: only-explicit-features + features: electrum + - name: Check lightning-transaction-sync esplora-blocking SemVer + uses: obi1kenobi/cargo-semver-checks-action@v2 + with: + manifest-path: lightning-transaction-sync/Cargo.toml + feature-group: only-explicit-features + features: esplora-blocking + - name: Check lightning-transaction-sync esplora-async SemVer + uses: obi1kenobi/cargo-semver-checks-action@v2 + with: + manifest-path: lightning-transaction-sync/Cargo.toml + feature-group: only-explicit-features + features: esplora-async From 9d2449a5305866e09092c4c27d4ff2e086f3b5de Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 23 Feb 2025 02:22:55 +0000 Subject: [PATCH 059/105] Fix overflow in historical scoring model point count summation In adb0afc523f9fea44cd42e02a1022510a9c83a52 we started raising bucket weights to the power four in the historical model. This improved our model's accuracy greatly, but resulted in a much larger `total_valid_points_tracked`. In the same commit we converted `total_valid_points_tracked` to a float, but retained the 64-bit integer math to build it out of integer bucket values. Sadly, 64 bits are not enough to sum 1024 bucket pairs of 16-bit integers multiplied together and then squared (we need 16*4 + 10 = 74 bits to avoid overflow). Thus, here we replace the summation with 128-bit integers. Fairly straightforward merge conflicts (code added in 311a083f673cebe5af76b584b02071909234d9a0 which was not included neighbored new code added plus references to new methods) fixed in: * lightning/src/routing/scoring.rs --- lightning/src/routing/scoring.rs | 35 ++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/lightning/src/routing/scoring.rs b/lightning/src/routing/scoring.rs index e6d172b29ee..506f70d05b4 100644 --- a/lightning/src/routing/scoring.rs +++ b/lightning/src/routing/scoring.rs @@ -1863,15 +1863,17 @@ mod bucketed_history { } fn recalculate_valid_point_count(&mut self) { - let mut total_valid_points_tracked = 0; + let mut total_valid_points_tracked = 0u128; for (min_idx, min_bucket) in self.min_liquidity_offset_history.buckets.iter().enumerate() { for max_bucket in self.max_liquidity_offset_history.buckets.iter().take(32 - min_idx) { // In testing, raising the weights of buckets to a high power led to better // scoring results. Thus, we raise the bucket weights to the 4th power here (by - // squaring the result of multiplying the weights). + // squaring the result of multiplying the weights). This results in + // bucket_weight having at max 64 bits, which means we have to do our summation + // in 128-bit math. let mut bucket_weight = (*min_bucket as u64) * (*max_bucket as u64); bucket_weight *= bucket_weight; - total_valid_points_tracked += bucket_weight; + total_valid_points_tracked += bucket_weight as u128; } } self.total_valid_points_tracked = total_valid_points_tracked as f64; @@ -1957,12 +1959,12 @@ mod bucketed_history { let total_valid_points_tracked = self.tracker.total_valid_points_tracked; #[cfg(debug_assertions)] { - let mut actual_valid_points_tracked = 0; + let mut actual_valid_points_tracked = 0u128; for (min_idx, min_bucket) in min_liquidity_offset_history_buckets.iter().enumerate() { for max_bucket in max_liquidity_offset_history_buckets.iter().take(32 - min_idx) { let mut bucket_weight = (*min_bucket as u64) * (*max_bucket as u64); bucket_weight *= bucket_weight; - actual_valid_points_tracked += bucket_weight; + actual_valid_points_tracked += bucket_weight as u128; } } assert_eq!(total_valid_points_tracked, actual_valid_points_tracked as f64); @@ -1989,7 +1991,7 @@ mod bucketed_history { // max-bucket with at least BUCKET_FIXED_POINT_ONE. let mut highest_max_bucket_with_points = 0; let mut highest_max_bucket_with_full_points = None; - let mut total_weight = 0; + let mut total_weight = 0u128; for (max_idx, max_bucket) in max_liquidity_offset_history_buckets.iter().enumerate() { if *max_bucket >= BUCKET_FIXED_POINT_ONE { highest_max_bucket_with_full_points = Some(cmp::max(highest_max_bucket_with_full_points.unwrap_or(0), max_idx)); @@ -2002,7 +2004,7 @@ mod bucketed_history { // squaring the result of multiplying the weights), matching the logic in // `recalculate_valid_point_count`. let bucket_weight = (*max_bucket as u64) * (min_liquidity_offset_history_buckets[0] as u64); - total_weight += bucket_weight * bucket_weight; + total_weight += (bucket_weight * bucket_weight) as u128; } debug_assert!(total_weight as f64 <= total_valid_points_tracked); // Use the highest max-bucket with at least BUCKET_FIXED_POINT_ONE, but if none is @@ -2055,7 +2057,7 @@ mod bucketed_history { #[cfg(test)] mod tests { - use super::HistoricalBucketRangeTracker; + use super::{HistoricalBucketRangeTracker, HistoricalLiquidityTracker, ProbabilisticScoringFeeParameters}; #[test] fn historical_liquidity_bucket_decay() { @@ -2078,6 +2080,23 @@ mod bucketed_history { ] ); } + + #[test] + fn historical_heavy_buckets_operations() { + // Checks that we don't hit overflows when working with tons of data (even an + // impossible-to-reach amount of data). + let mut tracker = HistoricalLiquidityTracker::new(); + tracker.min_liquidity_offset_history.buckets = [0xffff; 32]; + tracker.max_liquidity_offset_history.buckets = [0xffff; 32]; + tracker.recalculate_valid_point_count(); + + let mut directed = tracker.as_directed_mut(true); + let default_params = ProbabilisticScoringFeeParameters::default(); + directed.calculate_success_probability_times_billion(&default_params, 42, 1000); + directed.track_datapoint(42, 52, 1000); + + tracker.decay_buckets(1.0); + } } } use bucketed_history::{LegacyHistoricalBucketRangeTracker, HistoricalBucketRangeTracker, DirectedHistoricalLiquidityTracker, HistoricalLiquidityTracker}; From 7c56f2166c9914f00713bfbb7712d51fa259e22a Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 25 Feb 2025 01:13:21 +0000 Subject: [PATCH 060/105] Cancel claims signed by a remote `ChannelMonitor` when reorging In `ChannelMonitorImpl::cancel_prev_commitment_claims` we need to cancel any claims against a removed commitment transaction. We were checking if `holder_tx_signed` before checking if either the current or previous holder commitment transaction had pending claims against it, but (a) there's no need to do this, there's not a big performance cost to just always trying to remove claims and (b) we can't actually rely on `holder_tx_signed`. `holder_tx_signed` being set doesn't necessarily imply that the `ChannelMonitor` was persisted (i.e. it may simply be lost in a poorly-timed restart) but we also (somewhat theoretically) allow for multiple copies of a `ChannelMonitor` to exist, and a different one could have signed the commitment transaction which was confirmed (and then unconfirmed). Thus, we simply remove the additional check here. --- lightning/src/chain/channelmonitor.rs | 41 +++++++++++++-------------- 1 file changed, 19 insertions(+), 22 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 22aa84488ce..19e5ac27f03 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -3874,35 +3874,32 @@ impl ChannelMonitorImpl { } } } - if self.holder_tx_signed { - // If we've signed, we may have broadcast either commitment (prev or current), and - // attempted to claim from it immediately without waiting for a confirmation. - if self.current_holder_commitment_tx.txid != *confirmed_commitment_txid { + // Cancel any pending claims for any holder commitments in case they had previously + // confirmed or been signed (in which case we will start attempting to claim without + // waiting for confirmation). + if self.current_holder_commitment_tx.txid != *confirmed_commitment_txid { + log_trace!(logger, "Canceling claims for previously broadcast holder commitment {}", + self.current_holder_commitment_tx.txid); + let mut outpoint = BitcoinOutPoint { txid: self.current_holder_commitment_tx.txid, vout: 0 }; + for (htlc, _, _) in &self.current_holder_commitment_tx.htlc_outputs { + if let Some(vout) = htlc.transaction_output_index { + outpoint.vout = vout; + self.onchain_tx_handler.abandon_claim(&outpoint); + } + } + } + if let Some(prev_holder_commitment_tx) = &self.prev_holder_signed_commitment_tx { + if prev_holder_commitment_tx.txid != *confirmed_commitment_txid { log_trace!(logger, "Canceling claims for previously broadcast holder commitment {}", - self.current_holder_commitment_tx.txid); - let mut outpoint = BitcoinOutPoint { txid: self.current_holder_commitment_tx.txid, vout: 0 }; - for (htlc, _, _) in &self.current_holder_commitment_tx.htlc_outputs { + prev_holder_commitment_tx.txid); + let mut outpoint = BitcoinOutPoint { txid: prev_holder_commitment_tx.txid, vout: 0 }; + for (htlc, _, _) in &prev_holder_commitment_tx.htlc_outputs { if let Some(vout) = htlc.transaction_output_index { outpoint.vout = vout; self.onchain_tx_handler.abandon_claim(&outpoint); } } } - if let Some(prev_holder_commitment_tx) = &self.prev_holder_signed_commitment_tx { - if prev_holder_commitment_tx.txid != *confirmed_commitment_txid { - log_trace!(logger, "Canceling claims for previously broadcast holder commitment {}", - prev_holder_commitment_tx.txid); - let mut outpoint = BitcoinOutPoint { txid: prev_holder_commitment_tx.txid, vout: 0 }; - for (htlc, _, _) in &prev_holder_commitment_tx.htlc_outputs { - if let Some(vout) = htlc.transaction_output_index { - outpoint.vout = vout; - self.onchain_tx_handler.abandon_claim(&outpoint); - } - } - } - } - } else { - // No previous claim. } } From 653c482af37f68f7716ad3dc0f81031059577d1b Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 25 Feb 2025 01:31:38 +0000 Subject: [PATCH 061/105] Drop return value from `provide_latest_holder_commitment_tx` `provide_latest_holder_commitment_tx` is used to handle `ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo` updates and returns an `Err` if we've set `holder_tx_signed`. However, later in `ChannelMonitorImpl::update_monitor` (the only non-test place that `provide_latest_holder_commitment_tx` is called), we will fail the entire update if `holder_tx_signed` is (or a few other flags are) are set if the update contained a `LatestHolderCommitmentTXInfo` (or a few other update types). Thus, the check in `provide_latest_holder_commitment_tx` is entirely redundant and can be removed. --- lightning/src/chain/channelmonitor.rs | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 19e5ac27f03..13ee3e0e6a6 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1509,8 +1509,8 @@ impl ChannelMonitor { fn provide_latest_holder_commitment_tx( &self, holder_commitment_tx: HolderCommitmentTransaction, htlc_outputs: Vec<(HTLCOutputInCommitment, Option, Option)>, - ) -> Result<(), ()> { - self.inner.lock().unwrap().provide_latest_holder_commitment_tx(holder_commitment_tx, htlc_outputs, &Vec::new(), Vec::new()).map_err(|_| ()) + ) { + self.inner.lock().unwrap().provide_latest_holder_commitment_tx(holder_commitment_tx, htlc_outputs, &Vec::new(), Vec::new()) } /// This is used to provide payment preimage(s) out-of-band during startup without updating the @@ -2901,7 +2901,7 @@ impl ChannelMonitorImpl { /// is important that any clones of this channel monitor (including remote clones) by kept /// up-to-date as our holder commitment transaction is updated. /// Panics if set_on_holder_tx_csv has never been called. - fn provide_latest_holder_commitment_tx(&mut self, holder_commitment_tx: HolderCommitmentTransaction, mut htlc_outputs: Vec<(HTLCOutputInCommitment, Option, Option)>, claimed_htlcs: &[(SentHTLCId, PaymentPreimage)], nondust_htlc_sources: Vec) -> Result<(), &'static str> { + fn provide_latest_holder_commitment_tx(&mut self, holder_commitment_tx: HolderCommitmentTransaction, mut htlc_outputs: Vec<(HTLCOutputInCommitment, Option, Option)>, claimed_htlcs: &[(SentHTLCId, PaymentPreimage)], nondust_htlc_sources: Vec) { if htlc_outputs.iter().any(|(_, s, _)| s.is_some()) { // If we have non-dust HTLCs in htlc_outputs, ensure they match the HTLCs in the // `holder_commitment_tx`. In the future, we'll no longer provide the redundant data @@ -2978,10 +2978,6 @@ impl ChannelMonitorImpl { } self.counterparty_fulfilled_htlcs.insert(*claimed_htlc_id, *claimed_preimage); } - if self.holder_tx_signed { - return Err("Latest holder commitment signed has already been signed, update is rejected"); - } - Ok(()) } /// Provides a payment_hash->payment_preimage mapping. Will be automatically pruned when all @@ -3202,11 +3198,7 @@ impl ChannelMonitorImpl { ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo { commitment_tx, htlc_outputs, claimed_htlcs, nondust_htlc_sources } => { log_trace!(logger, "Updating ChannelMonitor with latest holder commitment transaction info"); if self.lockdown_from_offchain { panic!(); } - if let Err(e) = self.provide_latest_holder_commitment_tx(commitment_tx.clone(), htlc_outputs.clone(), &claimed_htlcs, nondust_htlc_sources.clone()) { - log_error!(logger, "Providing latest holder commitment transaction failed/was refused:"); - log_error!(logger, " {}", e); - ret = Err(()); - } + self.provide_latest_holder_commitment_tx(commitment_tx.clone(), htlc_outputs.clone(), &claimed_htlcs, nondust_htlc_sources.clone()); } ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo { commitment_txid, htlc_outputs, commitment_number, their_per_commitment_point, .. } => { log_trace!(logger, "Updating ChannelMonitor with latest counterparty commitment transaction info"); @@ -5386,7 +5378,7 @@ mod tests { let dummy_commitment_tx = HolderCommitmentTransaction::dummy(&mut htlcs); monitor.provide_latest_holder_commitment_tx(dummy_commitment_tx.clone(), - htlcs.into_iter().map(|(htlc, _)| (htlc, Some(dummy_sig), None)).collect()).unwrap(); + htlcs.into_iter().map(|(htlc, _)| (htlc, Some(dummy_sig), None)).collect()); monitor.provide_latest_counterparty_commitment_tx(Txid::from_byte_array(Sha256::hash(b"1").to_byte_array()), preimages_slice_to_htlc_outputs!(preimages[5..15]), 281474976710655, dummy_key, &logger); monitor.provide_latest_counterparty_commitment_tx(Txid::from_byte_array(Sha256::hash(b"2").to_byte_array()), @@ -5424,7 +5416,7 @@ mod tests { let mut htlcs = preimages_slice_to_htlcs!(preimages[0..5]); let dummy_commitment_tx = HolderCommitmentTransaction::dummy(&mut htlcs); monitor.provide_latest_holder_commitment_tx(dummy_commitment_tx.clone(), - htlcs.into_iter().map(|(htlc, _)| (htlc, Some(dummy_sig), None)).collect()).unwrap(); + htlcs.into_iter().map(|(htlc, _)| (htlc, Some(dummy_sig), None)).collect()); secret[0..32].clone_from_slice(&>::from_hex("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap()); monitor.provide_secret(281474976710653, secret.clone()).unwrap(); assert_eq!(monitor.inner.lock().unwrap().payment_preimages.len(), 12); @@ -5435,7 +5427,7 @@ mod tests { let mut htlcs = preimages_slice_to_htlcs!(preimages[0..3]); let dummy_commitment_tx = HolderCommitmentTransaction::dummy(&mut htlcs); monitor.provide_latest_holder_commitment_tx(dummy_commitment_tx, - htlcs.into_iter().map(|(htlc, _)| (htlc, Some(dummy_sig), None)).collect()).unwrap(); + htlcs.into_iter().map(|(htlc, _)| (htlc, Some(dummy_sig), None)).collect()); secret[0..32].clone_from_slice(&>::from_hex("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap()); monitor.provide_secret(281474976710652, secret.clone()).unwrap(); assert_eq!(monitor.inner.lock().unwrap().payment_preimages.len(), 5); From 6749a0767469dfe51378ef4dd0c5b29d0497fbd6 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 25 Feb 2025 01:42:30 +0000 Subject: [PATCH 062/105] Only generate a post-close lock ChannelMonitorUpdate if we need one If a channel is closed on startup, but we find that the `ChannelMonitor` isn't aware of this, we generate a `ChannelMonitorUpdate` containing a `ChannelMonitorUpdateStep::ChannelForceClosed`. This ensures that the `ChannelMonitor` will not accept any future updates in case we somehow load up a previous `ChannelManager` (though that really shouldn't happen). Previously, we'd apply this update only if we detected that the `ChannelManager` had not yet informed the `ChannelMonitor` about the channel's closure, even if the `ChannelMonitor` would already refuse any other updates because it detected a channel closure on chain. This doesn't accomplish anything but an extra I/O write, so we remove it here. Further, a user reported that, in regtest, they could: (a) coop close a channel (not generating a `ChannelMonitorUpdate`) (b) wait just under 4032 blocks (on regtest, taking only a day) (c) restart the `ChannelManager`, generating the above update (d) connect a block or two (during the startup sequence), making the `ChannelMonitor` eligible for archival, (d) restart the `ChannelManager` again (without applying the update from (c), but after having archived the `ChannelMonitor`, leading to a failure to deserialize as we have a pending `ChannelMonitorUpdate` for a `ChannelMonitor` that has been archived. Though it seems very unlikely this would happen on mainnet, it is theoretically possible. --- lightning/src/chain/channelmonitor.rs | 20 ++++++++++++++------ lightning/src/ln/channelmanager.rs | 4 ++-- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 13ee3e0e6a6..4c195b20a78 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1737,10 +1737,14 @@ impl ChannelMonitor { self.inner.lock().unwrap().get_cur_holder_commitment_number() } - /// Gets whether we've been notified that this channel is closed by the `ChannelManager` (i.e. - /// via a [`ChannelMonitorUpdateStep::ChannelForceClosed`]). - pub(crate) fn offchain_closed(&self) -> bool { - self.inner.lock().unwrap().lockdown_from_offchain + /// Fetches whether this monitor has marked the channel as closed and will refuse any further + /// updates to the commitment transactions. + /// + /// It can be marked closed in a few different ways, including via a + /// [`ChannelMonitorUpdateStep::ChannelForceClosed`] or if the channel has been closed + /// on-chain. + pub(crate) fn no_further_updates_allowed(&self) -> bool { + self.inner.lock().unwrap().no_further_updates_allowed() } /// Gets the `node_id` of the counterparty for this channel. @@ -3278,12 +3282,16 @@ impl ChannelMonitorImpl { } } - if ret.is_ok() && (self.funding_spend_seen || self.lockdown_from_offchain || self.holder_tx_signed) && is_pre_close_update { + if ret.is_ok() && self.no_further_updates_allowed() && is_pre_close_update { log_error!(logger, "Refusing Channel Monitor Update as counterparty attempted to update commitment after funding was spent"); Err(()) } else { ret } } + fn no_further_updates_allowed(&self) -> bool { + self.funding_spend_seen || self.lockdown_from_offchain || self.holder_tx_signed + } + fn get_latest_update_id(&self) -> u64 { self.latest_update_id } @@ -4227,7 +4235,7 @@ impl ChannelMonitorImpl { } } - if self.lockdown_from_offchain || self.funding_spend_seen || self.holder_tx_signed { + if self.no_further_updates_allowed() { // Fail back HTLCs on backwards channels if they expire within // `LATENCY_GRACE_PERIOD_BLOCKS` blocks and the channel is closed (i.e. we're at a // point where no further off-chain updates will be accepted). If we haven't seen the diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 6c8f3139c6a..9ede93c93d1 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -13364,8 +13364,8 @@ where // claim. // Note that a `ChannelMonitor` is created with `update_id` 0 and after we // provide it with a closure update its `update_id` will be at 1. - if !monitor.offchain_closed() || monitor.get_latest_update_id() > 1 { - should_queue_fc_update = !monitor.offchain_closed(); + if !monitor.no_further_updates_allowed() || monitor.get_latest_update_id() > 1 { + should_queue_fc_update = !monitor.no_further_updates_allowed(); let mut latest_update_id = monitor.get_latest_update_id(); if should_queue_fc_update { latest_update_id += 1; From 76131619b08307e02590773042b20251c927e7c5 Mon Sep 17 00:00:00 2001 From: Philip Kannegaard Hayes Date: Wed, 12 Mar 2025 17:26:13 -0700 Subject: [PATCH 063/105] lightning-invoice: explicitly enforce a 7089 B max length on deser The new `bech32-v0.11.0` version (prev: `v0.9.1`) now enforces a max length of 1023 bytes. Before there was no max. BOLT11 invoices can definitely exceed 1023 B with a long-ish description and 2 route hints, so this limit is likely too low. Having a limit is probably a good idea. What do other projects choose? Here's a brief survey: LDK (pre-0.1): (no limit) LDK (post-0.1): 1023 B LDK (post-PR): 7089 B LND[1]: 7089 B CLN[2]: (no limit) ACINQ[3][4]: (no limit) LND uses 7089 B, which was chosen to be "the max number of bytes that can fit in a QR code". LND's rationale is technically incorrect as QR codes actually have a max capacity of 7089 _numeric_ characters and only support up to 4296 all-uppercase alphanumeric characters. However, ecosystem-wide consistency is more important. A more conservative limit that would probably also suffice might be 2953 B, the QR code length limit for a lowercase bech32-encoded invoice. [1]: https://github.com/lightningnetwork/lnd/blob/6531d4505098eb14e6c24aedfd752fc15e85845d/zpay32/invoice.go#L87 [2]: https://github.com/ElementsProject/lightning/blob/0e7615b1b73eee161911763840d6260baf596755/common/bolt11.c#L683 [3]: https://github.com/ACINQ/lightning-kmp/blob/feda82c853660a792b911be518367a228ed6e0ee/modules/core/src/commonMain/kotlin/fr/acinq/lightning/payment/Bolt11Invoice.kt#L165 [4]: https://github.com/ACINQ/bitcoin-kmp/blob/master/src/commonMain/kotlin/fr/acinq/bitcoin/Bech32.kt#L122 --- lightning-invoice/src/de.rs | 245 ++++++++++++++++++++++++++++++++++- lightning-invoice/src/lib.rs | 28 +++- 2 files changed, 270 insertions(+), 3 deletions(-) diff --git a/lightning-invoice/src/de.rs b/lightning-invoice/src/de.rs index ee071d6349a..85a0924ce22 100644 --- a/lightning-invoice/src/de.rs +++ b/lightning-invoice/src/de.rs @@ -9,9 +9,10 @@ use core::str::FromStr; use std::error; use bech32::primitives::decode::{CheckedHrpstring, CheckedHrpstringError}; -use bech32::{Bech32, Fe32, Fe32IterExt}; +use bech32::{Fe32, Fe32IterExt}; use crate::prelude::*; +use crate::Bolt11Bech32; use bitcoin::hashes::sha256; use bitcoin::hashes::Hash; use bitcoin::{PubkeyHash, ScriptHash, WitnessVersion}; @@ -377,7 +378,7 @@ impl FromStr for SignedRawBolt11Invoice { type Err = Bolt11ParseError; fn from_str(s: &str) -> Result { - let parsed = CheckedHrpstring::new::(s)?; + let parsed = CheckedHrpstring::new::(s)?; let hrp = parsed.hrp(); // Access original non-packed 32 byte values (as Fe32s) // Note: the type argument is needed due to the API peculiarities, but it's not used @@ -1175,4 +1176,244 @@ mod test { ) ) } + + // Test some long invoice test vectors successfully roundtrip. Generated + // from Lexe proptest: . + #[test] + fn test_deser_long_test_vectors() { + use crate::Bolt11Invoice; + + #[track_caller] + fn parse_ok(invoice_str: &str) { + let invoice = Bolt11Invoice::from_str(invoice_str).unwrap(); + let invoice_str2 = invoice.to_string(); + if invoice_str != invoice_str2 { + panic!( + "Invoice does not roundtrip: invoice_str != invoice_str2\n\ + invoice_str: {invoice_str}\n\ + invoice_str2: {invoice_str2}\n\ + \n\ + {invoice:?}" + ); + } + } + + // 1024 B shrunk invoice just above previous limit of 1023 B from Lexe proptest + parse_ok( + "lnbc10000000000000000010p1qqqqqqqdtuxpqkzq8sjzqgps4pvyczqq8sjzqgpuysszq0pyyqsrp2zs0sjz\ + qgps4pxrcfpqyqc2slpyyqsqsv9gwz59s5zqpqyps5rc9qsrs2pqxz5ysyzcfqgysyzs0sjzqgqq8sjzqgps4p\ + xqqzps4pqpssqgzpxps5ruysszqrps4pg8p2zgpsc2snpuysszqzqsgqvys0pyyqsrcfpqyqvycv9gfqqrcfpq\ + yq7zggpq8q5zqyruysszqwpgyqxpsjqsgq7zggpqps7zggpq8sjzqgqgqq7zggpqpq7zggpq8q5zqqpuysszq0\ + pyyqsqs0pyyqspsnqgzpqpqlpyyqsqszpuysszqyzvzpvysrqq8sjzqgqvrp7zggpqpqxpsspp5mf45hs3cgph\ + h0074r5qmr74y82r26ac4pzdg4nd9mdmsvz6ffqpssp5vr4yra4pcv74h9hk3d0233nqu4gktpuykjamrafrdp\ + uedqugzh3q9q2sqqqqqysgqcqrpqqxq8pqqqqqqnp4qgvcxpme2q5lng36j9gruwlrtk2f86s3c5xmk87yhvyu\ + wdeh025q5r9yqwnqegv9hj9nzkhyxaeyq92wcrnqp36pyrc2qzrvswj5g96ey2dn6qqqqqqqqqqqqqqqqqqqqq\ + qqqqqqqqp9a5vs0t4z56p64xyma8s84yvdx7uhqj0gvrr424fea2wpztq2fwqqqqqqqqqqqqqqqqqqqqqqqqqq\ + qqqqmy9qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq\ + qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqpcnsxc32du9n7amlypuhclzqrt6lkegq\ + 0v3r7nczjv9tv30z7phq80r3dm7pvgykl7gwuenmem93h5xwdwac6ngsmzqc34khrg3qjgsq6qk6lc" + ); + // 1517 B mainnet invoice from Lexe proptest + parse_ok( + "lnbc8735500635020489010p1av5kfs8deupvyk4u5ynj03hmalhhhml0fxc2jlrv9z4lg6s4hnhkz69malhhe\ + t3x9yqpsxru4a3kwar2qtu2q2ughx367q600s5x7c7tln4k0fu78skxqevaqm8sayhuur377zgf3uf94n57xzh\ + dw99u42hwc089djn5xj723w7zageflsnzdmyte89tecf2ac7xhg4y3u9f4xpuv2hwxjlsarp0e24fu8tme6rgv\ + 0tqj08z9f4u30rw59k8emhtvs7wye0xfw6x5q5tju2p208rvtkunzwtwghtp22tlnh62gxwhfkxp4cnz7ts3rx\ + vlzszhv9y00h77lpdvcjyhjtmalh5dn5e8n5w8cqle0vunzduu4nza9y0734qhxday9hzywl0aa0vhzy0qmphc\ + 64d4hduj08dv2krpgqtc2v83gptk34reelxyc7wsgnze890c6nrv6p0cmepatc269eayzjjkqk30n52rfl5dg7\ + wztl96f7wc2tzx34q909xuajnyt4u4lnk87lwal7z0etdz5tmece0v3u796jfp68nccn05ty54ncfelts3v8g0\ + sn6v6hsu87zat4r03368ersu87252dd0nswymxzc2pyxl8yy844hspuyj47w0px4u4leefq568sk0rr9th4ql9\ + f9ykawrczkz5hp22nstg3lrlsa6u2q2ull3kzce2sh0h77sjv0zszhzy4hfh6u0pwux5l3gpthsn72mfu47sw9\ + zw3hzk7srznp27z0etdp0725me00sn72mgkf0fteehruk0lg6swh34z52puaekzmjlmalhhe6m8ug7z3c8g8zh\ + jjspp5zj0sm85g5ufng9w7s6p4ucdk80tyvz64sg54v0cy4vgnr37f78sqsp5l6azu2hv6we30er90jrslqpvd\ + trnrphhesca2wg5q83k52rsu2cq9q2sqqqqqysgqcqr8h2np4qw0ha2k282hm8jh5rcfq0hsp2zhddtlc5vs23\ + uphyv0lv3k8sqsfgfp4qyrk86tx5xg2aa7et4cdzhnvl5s4nd33ugytt7gamk9tugn9yransr9yq08gpwsn8t2\ + tq4ducjfhrcz707av0ss20urjh8vldrpmehqxa0stkesvuq82txyqzfhej7qccswy7k5wvcppk63c6zpjytfda\ + ccadacjtn52lpe6s85rjfqlxzp6frq33xshaz2nr9xjkhd3jj8qg39nmfzvpgmayakqmy9rseakwgcudug7hs4\ + 5wh430ywh7qhj3khczh8gle4cn93ymgfwa7rrvcw9lywyyz58k4p40a3nu9svthaf0qeg8f2ay4tw9p48p70qm\ + ayu3ejl2q8pj9e2l22h7775tl44hs6ke4sdfgcr6aj8wra4r2v9sj6xa5chd5ctpfg8chtrer3kkp0e6af88lk\ + rfxcklf2hyslv2hr0xl5lwrm5y5uttxn4ndfz8789znf78nspa3xy68" + ); + // 1804 B regtest invoice from Lexe proptest + parse_ok( + "lnbcrt17124979001314909880p1y6lkcwgd76tfnxksfk2atyy4tzw4nyg6jrx3282s2ygvcxyj64gevhxsjk\ + 2ymhzv3e0p5h5u3kfey92jt9ge44gsfnwycxynm2g3unw3ntt9qh25texe98jcfhxvcxuezxw9tngwrndpy9s4\ + p4x9eyze2tfe9rxm68tp5yj5jfduen2nny8prhsm6edegn2stww4n4gwp4vfjkvdthd43524n9fa8h262vwesk\ + g66nw3vnyafn29zhsvfeg9mxummtfp35uumzfqmhy3jwgdh55mt5xpvhgmjn25uku5e5g939wmmnvdfygnrdgd\ + h56uzcx4a92vfhgdcky3z9gfnrsvp4f4f55j68vak9yufhvdm8x5zrgc6955jvf429zumv89nh2a35wae5yntg\ + v985jumpxehyv7t92pjrwufs89yh23f5ddy5s568wgchve3cg9ek5nzewgcrzjz0dftxg3nvf4hngje52ac4zm\ + esxpvk6sfef4hkuetvd4vk6n29wftrw5rvg4yy2vjjwyexc5mnvfd8xknndpqkkenx0q642j35298hwve3dyc5\ + 25jrd3295sm9v9jrqup3wpykg7zd239ns7jgtqu95jz0deaxksjh2fu56n6n2f5x6mm8wa89qjfef385sam2x9\ + mxcs20gfpnq460d3axzknnf3e4sw2kvf25wjjxddpyg52dw4vx7nn2w9cyu5t8vfnyxjtpg33kssjp24ch536p\ + d938snmtx345x6r4x93kvv2tff855um3tfekxjted4kxys2kve5hvu6g89z4ynmjgfhnw7tv892rymejgvey77\ + rcfqe9xjr92d85636fvajxyajndfa92k2nxycx5jtjx4zxsm2y2dyn2up50f5ku3nrfdk4g5npxehkzjjv8y69\ + gveev4z56denddaxy7tfwe8xx42zgf6kzmnxxpk826ze2s6xk6jrwearw6ejvd8rsvj2fpg525jtd5pp5j2tlt\ + 28m4kakjr84w6ce4fd8e7awy6ncyswcyut760rdnem30ptssp5p5u3xgxxtr6aev8y2w9m30wcw3kyn7fgm8wm\ + f8qw8wzrqt34zcvq9q2sqqqqqysgqcqypmw9xq8lllllllnp4qt36twam2ca08m3s7vnhre3c0j89589wyw4vd\ + k7fln0lryxzkdcrur28qwqq3hnyt84vsasuldd2786eysdf4dyuggwsmvw2atftf7spkmpa9dd3efq5tenpqm2\ + v7vcz2a4s0s7jnqpjn0srysnstnw5y5z9taxn0ue37aqgufxcdsj6f8a2m4pm9udppdzc4shsdqzzx0u0rm4xl\ + js0dqz3c5zqyvglda7nsqvqfztmlyup7vyuadzav4zyuqwx90ev6nmk53nkhkt0sev9e745wxqtdvrqzgqkaka\ + zen7e2qmsdauk665g3llg5qtl79t3xulrhjnducehdn72gpmkjvtth7kh6ejpl9dv0qcsxv2jvzzvg0hzdmk3y\ + jsmydqksdk3h78kc63qnr265h8vyeslqexszppfm7y287t3gxvhw0ulg2wp0rsw3tevz03z50kpy77zdz9snxm\ + kkwxd76xvj4qvj2f89rrnuvdvzw947ay0kydc077pkec2jet9qwp2tud98s24u65uz07eaxk5jk3e4nggn2caa\ + ek2p5pkrc6mm6mxjm2ezpdu8p5jstg6tgvnttgac3ygt5ys04t4udujzlshpl7e4f3ff03xe6v24cp6aq4wa" + ); + // 1870 B testnet invoice from Lexe proptest + parse_ok( + "lntb5826417333454665580p1c5rwh5edlhf33hvkj5vav5z3t02a5hxvj3vfv5kuny2f3yzj6zwf9hx3nn2fk\ + 9gepc2a3ywvj6dax5v3jy2d5nxmp3gaxhycjkv38hx4z4d4vyznrp2p24xa6t2pg4w4rrxfens6tcxdhxvvfhx\ + a8xvvpkgat8xnpe2p44juz9g43hyur00989gvfhwd2kj72wfum4g4mgx5m5cs2rg9d9vnn6xe89ydnnvfpyy52\ + s2dxx2er4x4xxwstdd5cxwdrjw3nkxnnv2uexxnrxw4t56sjswfn52s2xv4t8xmjtwpn8xm6sfeh4q526dyu8x\ + 3r9gceyw6fhd934qjttvdk57az5w368zdrhwfjxxu35xcmrsmmpd4g8wwtev4tkzutdd32k56mxveuy6c6v2em\ + yv7zkfp39zjpjgd8hx7n4xph5kceswf6xxmnyfcuxca20fp24z7ncvfhyu5jf2exhw36nwf68s7rh2a6yzjf4d\ + gukcenfxpchqsjn2pt5x334tf98wsm6dvcrvvfcwapxvk2cdvmk2npcfe68zue3w4f9xc6s2fvrw6nrg3fkskt\ + e2ftxyc20ffckcd692964sdzjwdp4yvrfdfm9q72pxp3kwat5f4j9xee5da8rss60w92857tgwych55f5w3n8z\ + mzexpy4jwredejrqm6txf3nxm64ffh8x460dp9yjazhw4yx6dm5xerysnn5wa455k3h2d89ss2fd9axwjp3f4r\ + 9qdmfd4fx6stx2eg9sezrv369w7nvvfvhj4nnwaz5z3ny8qcxcdnvwd64jc2nx9uy2e2gxdrnx6r3w9ykxatxx\ + g6kk6rv2ekr2emwx5ehy362d3x82dzvddfxs5rcg4vn27npf564qdtg2anycc6523jnwe3e0p65unrpvccrs5m\ + 2fuexgmnj23ay5e34v4xk5jnrwpg4xemfwqe5vjjjw9qk76zsd9yrzu6xdpv5v5ntdejxg6jtv3kx65t6gdhrg\ + vj3fe34sj2vv3h5kegpp57hjf5kv6clw97y2e063yuz0psrz9a6l49v836dflum00rh8qtn8qsp5gd29qycuze\ + 08xls8l32zjaaf2uqv78v97lg9ss0c699huw980h2q9q2sqqqqqysgqcqr8ulnp4q26hcfwr7qxz7lwwlr2kjc\ + rws7m2u5j36mm0kxa45uxy6zvsqt2zzfppjdkrm2rlgadt9dq3d6jkv4r2cugmf2kamr28qwuleyzzyyly8a6t\ + u70eldahx7hzxx5x9gms7vjjr577ps8n4qyds5nern39j0v7czkch2letnt46895jupxgehf208xgxz8d6j8gu\ + 3h2qqtsk9nr9nuquhkqjxw40h2ucpldrawmktxzxdgtkt9a3p95g98nywved8s8laj2a0c98rq5zzdnzddz6nd\ + w0lvr6u0av9m7859844cgz9vpeq05gw79zqae2s7jzeq66wydyueqtp56qc67g7krv6lj5aahxtmq4y208q5qy\ + z38cnwl9ma6m5f4nhzqaj0tjxpfrk4nr5arv9d20lvxvddvffhzygmyuvwd959uhdcgcgjejchqt2qncuwpqqk\ + 5vws7dflw8x6esrfwhz7h3jwmhevf445k76nme926sr8drsdveqg7l7t7lnjvhaludqnwk4l2pmevkjf9pla92\ + 4p77v76r7x8jzyy7h59hmk0lgzfsk6c8dpj37hssj7jt4q7jzvy8hq25l3pag37axxanjqnq56c47gpgy6frsy\ + c0str9w2aahz4h6t7axaka4cwvhwg49r6qgj8kwz2mt6vcje25l9ekvmgq5spqtn" + ); + } + + // Generate a valid invoice of `MAX_LENGTH` bytes and ensure that it roundtrips. + #[test] + fn test_serde_long_invoice() { + use crate::TaggedField::*; + use crate::{ + Bolt11Invoice, Bolt11InvoiceFeatures, Bolt11InvoiceSignature, Currency, + PositiveTimestamp, RawBolt11Invoice, RawDataPart, RawHrp, RawTaggedField, Sha256, + SignedRawBolt11Invoice, + }; + use bitcoin::secp256k1::ecdsa::{RecoverableSignature, RecoveryId}; + use bitcoin::secp256k1::PublicKey; + use lightning_types::routing::{RouteHint, RouteHintHop, RoutingFees}; + + // Generate an `UnknownSemantics` field with a given length. + fn unknown_semantics_field(len: usize) -> Vec { + assert!(len <= 1023); + let mut field = Vec::with_capacity(len + 3); + // Big-endian encoded length prefix + field.push(Fe32::Q); + field.push(Fe32::try_from((len >> 5) as u8).unwrap()); + field.push(Fe32::try_from((len & 0x1f) as u8).unwrap()); + // Data + field.extend(std::iter::repeat(Fe32::P).take(len)); + field + } + + // Invoice fields + let payment_hash = sha256::Hash::from_str( + "0001020304050607080900010203040506070809000102030405060708090102", + ) + .unwrap(); + let description = std::iter::repeat("A").take(639).collect::(); + let fallback_addr = crate::Fallback::SegWitProgram { + version: bitcoin::WitnessVersion::V0, + program: vec![0; 32], + }; + let payee_pk = PublicKey::from_slice(&[ + 0x03, 0x24, 0x65, 0x3e, 0xac, 0x43, 0x44, 0x88, 0x00, 0x2c, 0xc0, 0x6b, 0xbf, 0xb7, + 0xf1, 0x0f, 0xe1, 0x89, 0x91, 0xe3, 0x5f, 0x9f, 0xe4, 0x30, 0x2d, 0xbe, 0xa6, 0xd2, + 0x35, 0x3d, 0xc0, 0xab, 0x1c, + ]) + .unwrap(); + let route_hints = std::iter::repeat(RouteHintHop { + src_node_id: payee_pk, + short_channel_id: 0x0102030405060708, + fees: RoutingFees { base_msat: 1, proportional_millionths: 20 }, + cltv_expiry_delta: 3, + htlc_minimum_msat: None, + htlc_maximum_msat: None, + }) + .take(12) + .collect::>(); + + // Build raw invoice + let raw_invoice = RawBolt11Invoice { + hrp: RawHrp { + currency: Currency::Bitcoin, + raw_amount: Some(10000000000000000010), + si_prefix: Some(crate::SiPrefix::Pico), + }, + data: RawDataPart { + timestamp: PositiveTimestamp::from_unix_timestamp(1496314658).unwrap(), + tagged_fields: vec![ + PaymentHash(Sha256(payment_hash)).into(), + Description(crate::Description::new(description).unwrap()).into(), + PayeePubKey(crate::PayeePubKey(payee_pk)).into(), + ExpiryTime(crate::ExpiryTime(std::time::Duration::from_secs(u64::MAX))).into(), + MinFinalCltvExpiryDelta(crate::MinFinalCltvExpiryDelta(u64::MAX)).into(), + Fallback(fallback_addr).into(), + PrivateRoute(crate::PrivateRoute(RouteHint(route_hints))).into(), + PaymentSecret(crate::PaymentSecret([17; 32])).into(), + PaymentMetadata(vec![0x69; 639]).into(), + Features(Bolt11InvoiceFeatures::from_le_bytes(vec![0xaa; 639])).into(), + // This invoice is 4458 B w/o unknown semantics fields. + // Need to add some non-standard fields to reach 7089 B limit. + RawTaggedField::UnknownSemantics(unknown_semantics_field(1023)), + RawTaggedField::UnknownSemantics(unknown_semantics_field(1023)), + RawTaggedField::UnknownSemantics(unknown_semantics_field(576)), + ], + }, + }; + + // Build signed invoice + let hash = [ + 0x75, 0x99, 0xe1, 0x51, 0x7f, 0xa1, 0x0e, 0xb5, 0xc0, 0x79, 0xb4, 0x6e, 0x8e, 0x62, + 0x0c, 0x4f, 0xb0, 0x72, 0x71, 0xd2, 0x81, 0xa1, 0x92, 0x65, 0x9c, 0x90, 0x89, 0x69, + 0xe1, 0xf3, 0xd6, 0x59, + ]; + let signature = &[ + 0x6c, 0xbe, 0xbe, 0xfe, 0xd3, 0xfb, 0x07, 0x68, 0xb5, 0x79, 0x98, 0x82, 0x29, 0xab, + 0x0e, 0xcc, 0x8d, 0x3a, 0x81, 0xee, 0xee, 0x07, 0xb3, 0x5d, 0x64, 0xca, 0xb4, 0x12, + 0x33, 0x99, 0x33, 0x2a, 0x31, 0xc2, 0x2c, 0x2b, 0x62, 0x96, 0x4e, 0x37, 0xd7, 0x96, + 0x50, 0x5e, 0xdb, 0xe9, 0xa9, 0x5b, 0x0b, 0x3b, 0x87, 0x22, 0x89, 0xed, 0x95, 0xf1, + 0xf1, 0xdf, 0x2d, 0xb6, 0xbd, 0xf5, 0x0a, 0x20, + ]; + let signature = Bolt11InvoiceSignature( + RecoverableSignature::from_compact(signature, RecoveryId::from_i32(1).unwrap()) + .unwrap(), + ); + let signed_invoice = SignedRawBolt11Invoice { raw_invoice, hash, signature }; + + // Ensure serialized invoice roundtrips + let invoice = Bolt11Invoice::from_signed(signed_invoice).unwrap(); + let invoice_str = invoice.to_string(); + assert_eq!(invoice_str.len(), crate::MAX_LENGTH); + assert_eq!(invoice, Bolt11Invoice::from_str(&invoice_str).unwrap()); + } + + // Test that invoices above the maximum length fail to parse with the expected error. + #[test] + fn test_deser_too_long_fails() { + use crate::{Bolt11Invoice, ParseOrSemanticError, MAX_LENGTH}; + use bech32::primitives::decode::{CheckedHrpstringError, ChecksumError}; + + fn parse_is_code_length_err(s: &str) -> bool { + // Need matches! b/c ChecksumError::CodeLength(_) is marked non-exhaustive + matches!( + Bolt11Invoice::from_str(s), + Err(ParseOrSemanticError::ParseError(Bolt11ParseError::Bech32Error( + CheckedHrpstringError::Checksum(ChecksumError::CodeLength(_)) + ))), + ) + } + + let mut too_long = String::from("lnbc1"); + too_long.push_str( + String::from_utf8(vec![b'x'; (MAX_LENGTH + 1) - too_long.len()]).unwrap().as_str(), + ); + assert!(parse_is_code_length_err(&too_long)); + assert!(!parse_is_code_length_err(&too_long[..too_long.len() - 1])); + } } diff --git a/lightning-invoice/src/lib.rs b/lightning-invoice/src/lib.rs index 17cc41f9502..b814210b390 100644 --- a/lightning-invoice/src/lib.rs +++ b/lightning-invoice/src/lib.rs @@ -31,7 +31,7 @@ extern crate serde; use std::time::SystemTime; use bech32::primitives::decode::CheckedHrpstringError; -use bech32::Fe32; +use bech32::{Checksum, Fe32}; use bitcoin::hashes::{sha256, Hash}; use bitcoin::{Address, Network, PubkeyHash, ScriptHash, WitnessProgram, WitnessVersion}; use lightning_types::features::Bolt11InvoiceFeatures; @@ -147,6 +147,32 @@ pub const DEFAULT_EXPIRY_TIME: u64 = 3600; /// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md pub const DEFAULT_MIN_FINAL_CLTV_EXPIRY_DELTA: u64 = 18; +/// lightning-invoice will reject BOLT11 invoices that are longer than 7089 bytes. +/// +/// ### Rationale +/// +/// This value matches LND's implementation, which was chosen to be "the max number +/// of bytes that can fit in a QR code". LND's rationale is technically incorrect +/// as QR codes actually have a max capacity of 7089 _numeric_ characters and only +/// support up to 4296 all-uppercase alphanumeric characters. However, ecosystem-wide +/// consistency is more important. +pub const MAX_LENGTH: usize = 7089; + +/// The [`bech32::Bech32`] checksum algorithm, with extended max length suitable +/// for BOLT11 invoices. +pub enum Bolt11Bech32 {} + +impl Checksum for Bolt11Bech32 { + /// Extend the max length from the 1023 bytes default. + const CODE_LENGTH: usize = MAX_LENGTH; + + // Inherit the other fields from `bech32::Bech32`. + type MidstateRepr = ::MidstateRepr; + const CHECKSUM_LENGTH: usize = bech32::Bech32::CHECKSUM_LENGTH; + const GENERATOR_SH: [Self::MidstateRepr; 5] = bech32::Bech32::GENERATOR_SH; + const TARGET_RESIDUE: Self::MidstateRepr = bech32::Bech32::TARGET_RESIDUE; +} + /// Builder for [`Bolt11Invoice`]s. It's the most convenient and advised way to use this library. It /// ensures that only a semantically and syntactically correct invoice can be built using it. /// From ff9f627aa7dfb20de70bef4f8897f4551279ce4f Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 17 Mar 2025 20:42:03 +0000 Subject: [PATCH 064/105] Re-export `lightning-invoice` as `bolt11-invoice` from `lightning` Now that `lightning` depends on `lightning-invoice`, we should re-export it like we do `bitcoin` and `types`. --- lightning/src/lib.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lightning/src/lib.rs b/lightning/src/lib.rs index fa9badf87fa..0041143dc79 100644 --- a/lightning/src/lib.rs +++ b/lightning/src/lib.rs @@ -54,6 +54,9 @@ extern crate alloc; pub extern crate lightning_types as types; pub extern crate bitcoin; + +pub extern crate lightning_invoice as bolt11_invoice; + #[cfg(any(test, feature = "std"))] extern crate core; From 74f9098e3f35f7e087e461a2ff73a88c5ae2c4df Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 2 Apr 2025 00:28:27 +0000 Subject: [PATCH 065/105] Make `TestChannelManager` `Sync` In a coming commit we'll need to hold references to `TestChannelManager` in threads, requiring that it be `Sync`. Fairly minor merge conflicts addressed in: * `lightning/src/util/test_utils.rs` --- lightning-persister/src/test_utils.rs | 2 +- lightning/src/ln/functional_test_utils.rs | 6 ++--- lightning/src/util/test_utils.rs | 33 ++++++++++++++++++----- 3 files changed, 30 insertions(+), 11 deletions(-) diff --git a/lightning-persister/src/test_utils.rs b/lightning-persister/src/test_utils.rs index e6ad42e5bcd..8af33cef55b 100644 --- a/lightning-persister/src/test_utils.rs +++ b/lightning-persister/src/test_utils.rs @@ -113,7 +113,7 @@ pub(crate) fn do_test_data_migration // Integration-test the given KVStore implementation. Test relaying a few payments and check that // the persisted data is updated the appropriate number of times. -pub(crate) fn do_test_store(store_0: &K, store_1: &K) { +pub(crate) fn do_test_store(store_0: &K, store_1: &K) { let chanmon_cfgs = create_chanmon_cfgs(2); let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let chain_mon_0 = test_utils::TestChainMonitor::new( diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 63341969326..f052cb9e965 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -10,7 +10,7 @@ //! A bunch of useful utilities for building networks of nodes and exchanging messages between //! nodes for functional tests. -use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen, Watch, chainmonitor::Persist}; +use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen, Watch}; use crate::chain::channelmonitor::ChannelMonitor; use crate::chain::transaction::OutPoint; use crate::events::{ClaimedHTLC, ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, PaymentFailureReason}; @@ -399,7 +399,7 @@ pub struct NodeCfg<'a> { pub override_init_features: Rc>>, } -type TestChannelManager<'node_cfg, 'chan_mon_cfg> = ChannelManager< +pub(crate) type TestChannelManager<'node_cfg, 'chan_mon_cfg> = ChannelManager< &'node_cfg TestChainMonitor<'chan_mon_cfg>, &'chan_mon_cfg test_utils::TestBroadcaster, &'node_cfg test_utils::TestKeysInterface, @@ -3259,7 +3259,7 @@ pub fn create_node_cfgs<'a>(node_count: usize, chanmon_cfgs: &'a Vec(node_count: usize, chanmon_cfgs: &'a Vec, persisters: Vec<&'a impl Persist>) -> Vec> { +pub fn create_node_cfgs_with_persisters<'a>(node_count: usize, chanmon_cfgs: &'a Vec, persisters: Vec<&'a impl test_utils::SyncPersist>) -> Vec> { let mut nodes = Vec::new(); for i in 0..node_count { diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 07b2b19b0d6..7095ac9c0cc 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -335,11 +335,29 @@ impl SignerProvider for OnlyReadsKeysInterface { fn get_shutdown_scriptpubkey(&self) -> Result { Err(()) } } +#[cfg(feature = "std")] +pub trait SyncBroadcaster: chaininterface::BroadcasterInterface + Sync {} +#[cfg(feature = "std")] +pub trait SyncPersist: chainmonitor::Persist + Sync {} +#[cfg(feature = "std")] +impl SyncBroadcaster for T {} +#[cfg(feature = "std")] +impl + Sync> SyncPersist for T {} + +#[cfg(not(feature = "std"))] +pub trait SyncBroadcaster: chaininterface::BroadcasterInterface {} +#[cfg(not(feature = "std"))] +pub trait SyncPersist: chainmonitor::Persist {} +#[cfg(not(feature = "std"))] +impl SyncBroadcaster for T {} +#[cfg(not(feature = "std"))] +impl> SyncPersist for T {} + pub struct TestChainMonitor<'a> { pub added_monitors: Mutex)>>, pub monitor_updates: Mutex>>, pub latest_monitor_update_id: Mutex>, - pub chain_monitor: chainmonitor::ChainMonitor>, + pub chain_monitor: chainmonitor::ChainMonitor, pub keys_manager: &'a TestKeysInterface, /// If this is set to Some(), the next update_channel call (not watch_channel) must be a /// ChannelForceClosed event for the given channel_id with should_broadcast set to the given @@ -350,7 +368,7 @@ pub struct TestChainMonitor<'a> { pub expect_monitor_round_trip_fail: Mutex>, } impl<'a> TestChainMonitor<'a> { - pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a dyn chaininterface::BroadcasterInterface, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a dyn chainmonitor::Persist, keys_manager: &'a TestKeysInterface) -> Self { + pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a dyn SyncBroadcaster, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a dyn SyncPersist, keys_manager: &'a TestKeysInterface) -> Self { Self { added_monitors: Mutex::new(Vec::new()), monitor_updates: Mutex::new(new_hash_map()), @@ -1448,18 +1466,19 @@ impl Drop for TestChainSource { pub struct TestScorer { /// Stores a tuple of (scid, ChannelUsage) - scorer_expectations: RefCell>>, + scorer_expectations: Mutex>>, } impl TestScorer { pub fn new() -> Self { Self { - scorer_expectations: RefCell::new(None), + scorer_expectations: Mutex::new(None), } } pub fn expect_usage(&self, scid: u64, expectation: ChannelUsage) { - self.scorer_expectations.borrow_mut().get_or_insert_with(|| VecDeque::new()).push_back((scid, expectation)); + let mut expectations = self.scorer_expectations.lock().unwrap(); + expectations.get_or_insert_with(|| VecDeque::new()).push_back((scid, expectation)); } } @@ -1477,7 +1496,7 @@ impl ScoreLookUp for TestScorer { Some(scid) => scid, None => return 0, }; - if let Some(scorer_expectations) = self.scorer_expectations.borrow_mut().as_mut() { + if let Some(scorer_expectations) = self.scorer_expectations.lock().unwrap().as_mut() { match scorer_expectations.pop_front() { Some((scid, expectation)) => { assert_eq!(expectation, usage); @@ -1511,7 +1530,7 @@ impl Drop for TestScorer { return; } - if let Some(scorer_expectations) = self.scorer_expectations.borrow().as_ref() { + if let Some(scorer_expectations) = self.scorer_expectations.lock().unwrap().as_ref() { if !scorer_expectations.is_empty() { panic!("Unsatisfied scorer expectations: {:?}", scorer_expectations) } From 1c50b9ef729b12007d6f4ef9695fc8b98683b5fb Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 21 Mar 2025 23:25:07 +0000 Subject: [PATCH 066/105] Use fair unlocking (via `parking_lot`) in tests In a comming commit we'll add a test that relies heavily on lock fairness, which is not provided by the default Rust `Mutex`. Luckily, `parking_lot` provided an `unlock_fair`, which we use here, though it implies we have to manually implement lock poisoning. Trivial merge conflict resolved in `lightning/Cargo.toml` --- lightning-liquidity/Cargo.toml | 1 + lightning/Cargo.toml | 1 + lightning/src/lib.rs | 2 + lightning/src/ln/monitor_tests.rs | 12 ++--- lightning/src/sync/debug_sync.rs | 85 ++++++++++++++++++++----------- 5 files changed, 65 insertions(+), 36 deletions(-) diff --git a/lightning-liquidity/Cargo.toml b/lightning-liquidity/Cargo.toml index ed229b8b69a..f6bebca3d15 100644 --- a/lightning-liquidity/Cargo.toml +++ b/lightning-liquidity/Cargo.toml @@ -38,6 +38,7 @@ lightning-background-processor = { version = "0.1.0", path = "../lightning-backg proptest = "1.0.0" tokio = { version = "1.35", default-features = false, features = [ "rt-multi-thread", "time", "sync", "macros" ] } +parking_lot = { version = "0.12", default-features = false } [lints.rust.unexpected_cfgs] level = "forbid" diff --git a/lightning/Cargo.toml b/lightning/Cargo.toml index 6417d231f9e..e4f3eed900b 100644 --- a/lightning/Cargo.toml +++ b/lightning/Cargo.toml @@ -51,6 +51,7 @@ libm = { version = "0.2", default-features = false } [dev-dependencies] regex = "1.5.6" lightning-types = { version = "0.2.0", path = "../lightning-types", features = ["_test_utils"] } +parking_lot = { version = "0.12", default-features = false } [dev-dependencies.bitcoin] version = "0.32.2" diff --git a/lightning/src/lib.rs b/lightning/src/lib.rs index 0041143dc79..4fa2871ddcb 100644 --- a/lightning/src/lib.rs +++ b/lightning/src/lib.rs @@ -66,6 +66,8 @@ extern crate core; #[cfg(ldk_bench)] extern crate criterion; +#[cfg(all(feature = "std", test))] extern crate parking_lot; + #[macro_use] pub mod util; pub mod chain; diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 7a20a79159a..92b19790be5 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -3300,10 +3300,10 @@ fn test_update_replay_panics() { // Ensure applying the force-close update skipping the last normal update fails let poisoned_monitor = monitor.clone(); - std::panic::catch_unwind(|| { + std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { let _ = poisoned_monitor.update_monitor(&updates[1], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger); // We should panic, rather than returning an error here. - }).unwrap_err(); + })).unwrap_err(); // Then apply the last normal and force-close update and make sure applying the preimage // updates out-of-order fails. @@ -3311,17 +3311,17 @@ fn test_update_replay_panics() { monitor.update_monitor(&updates[1], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger).unwrap(); let poisoned_monitor = monitor.clone(); - std::panic::catch_unwind(|| { + std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { let _ = poisoned_monitor.update_monitor(&updates[3], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger); // We should panic, rather than returning an error here. - }).unwrap_err(); + })).unwrap_err(); // Make sure re-applying the force-close update fails let poisoned_monitor = monitor.clone(); - std::panic::catch_unwind(|| { + std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { let _ = poisoned_monitor.update_monitor(&updates[1], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger); // We should panic, rather than returning an error here. - }).unwrap_err(); + })).unwrap_err(); // ...and finally ensure that applying all the updates succeeds. monitor.update_monitor(&updates[2], &nodes[1].tx_broadcaster, &nodes[1].fee_estimator, &nodes[1].logger).unwrap(); diff --git a/lightning/src/sync/debug_sync.rs b/lightning/src/sync/debug_sync.rs index f142328e45c..991a71ffbe0 100644 --- a/lightning/src/sync/debug_sync.rs +++ b/lightning/src/sync/debug_sync.rs @@ -5,15 +5,16 @@ use core::time::Duration; use std::cell::RefCell; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::Condvar as StdCondvar; -use std::sync::Mutex as StdMutex; -use std::sync::MutexGuard as StdMutexGuard; +use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use std::sync::RwLock as StdRwLock; use std::sync::RwLockReadGuard as StdRwLockReadGuard; use std::sync::RwLockWriteGuard as StdRwLockWriteGuard; -pub use std::sync::WaitTimeoutResult; +use parking_lot::Condvar as StdCondvar; +use parking_lot::Mutex as StdMutex; +use parking_lot::MutexGuard as StdMutexGuard; + +pub use parking_lot::WaitTimeoutResult; use crate::prelude::*; @@ -46,10 +47,9 @@ impl Condvar { &'a self, guard: MutexGuard<'a, T>, condition: F, ) -> LockResult> { let mutex: &'a Mutex = guard.mutex; - self.inner - .wait_while(guard.into_inner(), condition) - .map(|lock| MutexGuard { mutex, lock }) - .map_err(|_| ()) + let mut lock = guard.into_inner(); + self.inner.wait_while(&mut lock, condition); + Ok(MutexGuard { mutex, lock: Some(lock) }) } #[allow(unused)] @@ -57,10 +57,9 @@ impl Condvar { &'a self, guard: MutexGuard<'a, T>, dur: Duration, condition: F, ) -> LockResult<(MutexGuard<'a, T>, WaitTimeoutResult)> { let mutex = guard.mutex; - self.inner - .wait_timeout_while(guard.into_inner(), dur, condition) - .map_err(|_| ()) - .map(|(lock, e)| (MutexGuard { mutex, lock }, e)) + let mut lock = guard.into_inner(); + let e = self.inner.wait_while_for(&mut lock, condition, dur); + Ok((MutexGuard { mutex, lock: Some(lock) }, e)) } pub fn notify_all(&self) { @@ -150,7 +149,7 @@ impl LockMetadata { LOCKS_INIT.call_once(|| unsafe { LOCKS = Some(StdMutex::new(new_hash_map())); }); - let mut locks = unsafe { LOCKS.as_ref() }.unwrap().lock().unwrap(); + let mut locks = unsafe { LOCKS.as_ref() }.unwrap().lock(); match locks.entry(lock_constr_location) { hash_map::Entry::Occupied(e) => { assert_eq!(lock_constr_colno, @@ -185,7 +184,7 @@ impl LockMetadata { } } for (_locked_idx, locked) in held.borrow().iter() { - for (locked_dep_idx, _locked_dep) in locked.locked_before.lock().unwrap().iter() { + for (locked_dep_idx, _locked_dep) in locked.locked_before.lock().iter() { let is_dep_this_lock = *locked_dep_idx == this.lock_idx; let has_same_construction = *locked_dep_idx == locked.lock_idx; if is_dep_this_lock && !has_same_construction { @@ -210,7 +209,7 @@ impl LockMetadata { } } // Insert any already-held locks in our locked-before set. - let mut locked_before = this.locked_before.lock().unwrap(); + let mut locked_before = this.locked_before.lock(); if !locked_before.contains_key(&locked.lock_idx) { let lockdep = LockDep { lock: Arc::clone(locked), _lockdep_trace: Backtrace::new() }; locked_before.insert(lockdep.lock.lock_idx, lockdep); @@ -237,7 +236,7 @@ impl LockMetadata { // Since a try-lock will simply fail if the lock is held already, we do not // consider try-locks to ever generate lockorder inversions. However, if a try-lock // succeeds, we do consider it to have created lockorder dependencies. - let mut locked_before = this.locked_before.lock().unwrap(); + let mut locked_before = this.locked_before.lock(); for (locked_idx, locked) in held.borrow().iter() { if !locked_before.contains_key(locked_idx) { let lockdep = @@ -252,11 +251,17 @@ impl LockMetadata { pub struct Mutex { inner: StdMutex, + poisoned: AtomicBool, deps: Arc, } + impl Mutex { pub(crate) fn into_inner(self) -> LockResult { - self.inner.into_inner().map_err(|_| ()) + if self.poisoned.load(Ordering::Acquire) { + Err(()) + } else { + Ok(self.inner.into_inner()) + } } } @@ -278,14 +283,14 @@ impl fmt::Debug for Mutex { #[must_use = "if unused the Mutex will immediately unlock"] pub struct MutexGuard<'a, T: Sized + 'a> { mutex: &'a Mutex, - lock: StdMutexGuard<'a, T>, + lock: Option>, } impl<'a, T: Sized> MutexGuard<'a, T> { fn into_inner(self) -> StdMutexGuard<'a, T> { // Somewhat unclear why we cannot move out of self.lock, but doing so gets E0509. unsafe { - let v: StdMutexGuard<'a, T> = std::ptr::read(&self.lock); + let v: StdMutexGuard<'a, T> = std::ptr::read(self.lock.as_ref().unwrap()); std::mem::forget(self); v } @@ -297,6 +302,10 @@ impl Drop for MutexGuard<'_, T> { LOCKS_HELD.with(|held| { held.borrow_mut().remove(&self.mutex.deps.lock_idx); }); + if std::thread::panicking() { + self.mutex.poisoned.store(true, Ordering::Release); + } + StdMutexGuard::unlock_fair(self.lock.take().unwrap()); } } @@ -304,37 +313,52 @@ impl Deref for MutexGuard<'_, T> { type Target = T; fn deref(&self) -> &T { - &self.lock.deref() + &self.lock.as_ref().unwrap().deref() } } impl DerefMut for MutexGuard<'_, T> { fn deref_mut(&mut self) -> &mut T { - self.lock.deref_mut() + self.lock.as_mut().unwrap().deref_mut() } } impl Mutex { pub fn new(inner: T) -> Mutex { - Mutex { inner: StdMutex::new(inner), deps: LockMetadata::new() } + Mutex { + inner: StdMutex::new(inner), + poisoned: AtomicBool::new(false), + deps: LockMetadata::new(), + } } pub fn lock<'a>(&'a self) -> LockResult> { LockMetadata::pre_lock(&self.deps, false); - self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ()) + let lock = self.inner.lock(); + if self.poisoned.load(Ordering::Acquire) { + Err(()) + } else { + Ok(MutexGuard { mutex: self, lock: Some(lock) }) + } } pub fn try_lock<'a>(&'a self) -> LockResult> { - let res = - self.inner.try_lock().map(|lock| MutexGuard { mutex: self, lock }).map_err(|_| ()); + let res = self.inner.try_lock().ok_or(()); if res.is_ok() { + if self.poisoned.load(Ordering::Acquire) { + return Err(()); + } LockMetadata::try_locked(&self.deps); } - res + res.map(|lock| MutexGuard { mutex: self, lock: Some(lock) }) } pub fn get_mut<'a>(&'a mut self) -> LockResult<&'a mut T> { - self.inner.get_mut().map_err(|_| ()) + if self.poisoned.load(Ordering::Acquire) { + Err(()) + } else { + Ok(self.inner.get_mut()) + } } } @@ -345,9 +369,10 @@ impl<'a, T: 'a> LockTestExt<'a> for Mutex { } type ExclLock = MutexGuard<'a, T>; #[inline] - fn unsafe_well_ordered_double_lock_self(&'a self) -> MutexGuard { + fn unsafe_well_ordered_double_lock_self(&'a self) -> MutexGuard<'a, T> { LockMetadata::pre_lock(&self.deps, true); - self.inner.lock().map(|lock| MutexGuard { mutex: self, lock }).unwrap() + let lock = self.inner.lock(); + MutexGuard { mutex: self, lock: Some(lock) } } } From 5006c6b805871a6283a3358e0589510d72bbc1c9 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 28 Mar 2025 21:56:17 +0000 Subject: [PATCH 067/105] Do not track HTLC IDs as separate MPP parts which need claiming When we claim an MPP payment, we need to track which channels have had the preimage durably added to their `ChannelMonitor` to ensure we don't remove the preimage from any `ChannelMonitor`s until all `ChannelMonitor`s have the preimage. Previously, we tracked each MPP part, down to the HTLC ID, as a part which we needed to get the preimage on disk for. However, this is not necessary - once a `ChannelMonitor` has a preimage, it applies it to all inbound HTLCs with the same payment hash. Further, this can cause a channel to wait on itself in cases of high-latency synchronous persistence - * If we have receive an MPP payment for which multiple parts came to us over the same channel, * and claim the MPP payment, creating a `ChannelMonitorUpdate` for the first part but enqueueing the remaining HTLC claim(s) in the channel's holding cell, * and we receive a `revoke_and_ack` for the same channel before the `ChannelManager::claim_payment` method completes (as each claim waits for the `ChannelMonitorUpdate` persistence), * we will cause the `ChannelMonitorUpdate` for that `revoke_and_ack` to go into the blocked set, waiting on the MPP parts to be fully claimed, * but when `claim_payment` goes to add the next `ChannelMonitorUpdate` for the MPP claim, it will be placed in the blocked set, since the blocked set is non-empty. Thus, we'll end up with a `ChannelMonitorUpdate` in the blocked set which is needed to unblock the channel since it is a part of the MPP set which blocked the channel. Trivial conflicts resolved in `lightning/src/util/test_utils.rs` --- lightning/src/ln/chanmon_update_fail_tests.rs | 222 ++++++++++++++++++ lightning/src/ln/channelmanager.rs | 60 +++-- lightning/src/ln/functional_test_utils.rs | 20 ++ lightning/src/util/test_utils.rs | 14 ++ 4 files changed, 290 insertions(+), 26 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 2d01ece1158..ad1e6c26b98 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -3819,3 +3819,225 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { nodes[1].chain_monitor.complete_sole_pending_chan_update(&chan_a.2); expect_payment_claimed!(nodes[1], payment_hash, 1_000_000); } + +#[test] +#[cfg(all(feature = "std", not(target_os = "windows")))] +fn test_single_channel_multiple_mpp() { + use std::sync::atomic::{AtomicBool, Ordering}; + + // Test what happens when we attempt to claim an MPP with many parts that came to us through + // the same channel with a synchronous persistence interface which has very high latency. + // + // Previously, if a `revoke_and_ack` came in while we were still running in + // `ChannelManager::claim_payment` we'd end up hanging waiting to apply a + // `ChannelMonitorUpdate` until after it completed. See the commit which introduced this test + // for more info. + let chanmon_cfgs = create_chanmon_cfgs(9); + let node_cfgs = create_node_cfgs(9, &chanmon_cfgs); + let configs = [None, None, None, None, None, None, None, None, None]; + let node_chanmgrs = create_node_chanmgrs(9, &node_cfgs, &configs); + let mut nodes = create_network(9, &node_cfgs, &node_chanmgrs); + + let node_7_id = nodes[7].node.get_our_node_id(); + let node_8_id = nodes[8].node.get_our_node_id(); + + // Send an MPP payment in six parts along the path shown from top to bottom + // 0 + // 1 2 3 4 5 6 + // 7 + // 8 + // + // We can in theory reproduce this issue with fewer channels/HTLCs, but getting this test + // robust is rather challenging. We rely on having the main test thread wait on locks held in + // the background `claim_funds` thread and unlocking when the `claim_funds` thread completes a + // single `ChannelMonitorUpdate`. + // This thread calls `get_and_clear_pending_msg_events()` and `handle_revoke_and_ack()`, both + // of which require `ChannelManager` locks, but we have to make sure this thread gets a chance + // to be blocked on the mutexes before we let the background thread wake `claim_funds` so that + // the mutex can switch to this main thread. + // This relies on our locks being fair, but also on our threads getting runtime during the test + // run, which can be pretty competitive. Thus we do a dumb dance to be as conservative as + // possible - we have a background thread which completes a `ChannelMonitorUpdate` (by sending + // into the `write_blocker` mpsc) but it doesn't run until a mpsc channel sends from this main + // thread to the background thread, and then we let it sleep a while before we send the + // `ChannelMonitorUpdate` unblocker. + // Further, we give ourselves two chances each time, needing 4 HTLCs just to unlock our two + // `ChannelManager` calls. We then need a few remaining HTLCs to actually trigger the bug, so + // we use 6 HTLCs. + // Finaly, we do not run this test on Winblowz because it, somehow, in 2025, does not implement + // actual preemptive multitasking and thinks that cooperative multitasking somehow is + // acceptable in the 21st century, let alone a quarter of the way into it. + const MAX_THREAD_INIT_TIME: std::time::Duration = std::time::Duration::from_secs(1); + + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 0, 3, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 0, 4, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 0, 5, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 0, 6, 100_000, 0); + + create_announced_chan_between_nodes_with_value(&nodes, 1, 7, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 2, 7, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 3, 7, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 4, 7, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 5, 7, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 6, 7, 100_000, 0); + create_announced_chan_between_nodes_with_value(&nodes, 7, 8, 1_000_000, 0); + + let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[8], 50_000_000); + + send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[7], &nodes[8]], &[&nodes[2], &nodes[7], &nodes[8]], &[&nodes[3], &nodes[7], &nodes[8]], &[&nodes[4], &nodes[7], &nodes[8]], &[&nodes[5], &nodes[7], &nodes[8]], &[&nodes[6], &nodes[7], &nodes[8]]], 50_000_000, payment_hash, payment_secret); + + let (do_a_write, blocker) = std::sync::mpsc::sync_channel(0); + *nodes[8].chain_monitor.write_blocker.lock().unwrap() = Some(blocker); + + // Until we have std::thread::scoped we have to unsafe { turn off the borrow checker }. + // We do this by casting a pointer to a `TestChannelManager` to a pointer to a + // `TestChannelManager` with different (in this case 'static) lifetime. + // This is even suggested in the second example at + // https://doc.rust-lang.org/std/mem/fn.transmute.html#examples + let claim_node: &'static TestChannelManager<'static, 'static> = + unsafe { std::mem::transmute(nodes[8].node as &TestChannelManager) }; + let thrd = std::thread::spawn(move || { + // Initiate the claim in a background thread as it will immediately block waiting on the + // `write_blocker` we set above. + claim_node.claim_funds(payment_preimage); + }); + + // First unlock one monitor so that we have a pending + // `update_fulfill_htlc`/`commitment_signed` pair to pass to our counterparty. + do_a_write.send(()).unwrap(); + + // Then fetch the `update_fulfill_htlc`/`commitment_signed`. Note that the + // `get_and_clear_pending_msg_events` will immediately hang trying to take a peer lock which + // `claim_funds` is holding. Thus, we release a second write after a small sleep in the + // background to give `claim_funds` a chance to step forward, unblocking + // `get_and_clear_pending_msg_events`. + let do_a_write_background = do_a_write.clone(); + let block_thrd2 = AtomicBool::new(true); + let block_thrd2_read: &'static AtomicBool = unsafe { std::mem::transmute(&block_thrd2) }; + let thrd2 = std::thread::spawn(move || { + while block_thrd2_read.load(Ordering::Acquire) { + std::thread::yield_now(); + } + std::thread::sleep(MAX_THREAD_INIT_TIME); + do_a_write_background.send(()).unwrap(); + std::thread::sleep(MAX_THREAD_INIT_TIME); + do_a_write_background.send(()).unwrap(); + }); + block_thrd2.store(false, Ordering::Release); + let first_updates = get_htlc_update_msgs(&nodes[8], &nodes[7].node.get_our_node_id()); + thrd2.join().unwrap(); + + // Disconnect node 6 from all its peers so it doesn't bother to fail the HTLCs back + nodes[7].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[7].node.peer_disconnected(nodes[2].node.get_our_node_id()); + nodes[7].node.peer_disconnected(nodes[3].node.get_our_node_id()); + nodes[7].node.peer_disconnected(nodes[4].node.get_our_node_id()); + nodes[7].node.peer_disconnected(nodes[5].node.get_our_node_id()); + nodes[7].node.peer_disconnected(nodes[6].node.get_our_node_id()); + + nodes[7].node.handle_update_fulfill_htlc(node_8_id, &first_updates.update_fulfill_htlcs[0]); + check_added_monitors(&nodes[7], 1); + expect_payment_forwarded!(nodes[7], nodes[1], nodes[8], Some(1000), false, false); + nodes[7].node.handle_commitment_signed(node_8_id, &first_updates.commitment_signed); + check_added_monitors(&nodes[7], 1); + let (raa, cs) = get_revoke_commit_msgs(&nodes[7], &node_8_id); + + // Now, handle the `revoke_and_ack` from node 5. Note that `claim_funds` is still blocked on + // our peer lock, so we have to release a write to let it process. + // After this call completes, the channel previously would be locked up and should not be able + // to make further progress. + let do_a_write_background = do_a_write.clone(); + let block_thrd3 = AtomicBool::new(true); + let block_thrd3_read: &'static AtomicBool = unsafe { std::mem::transmute(&block_thrd3) }; + let thrd3 = std::thread::spawn(move || { + while block_thrd3_read.load(Ordering::Acquire) { + std::thread::yield_now(); + } + std::thread::sleep(MAX_THREAD_INIT_TIME); + do_a_write_background.send(()).unwrap(); + std::thread::sleep(MAX_THREAD_INIT_TIME); + do_a_write_background.send(()).unwrap(); + }); + block_thrd3.store(false, Ordering::Release); + nodes[8].node.handle_revoke_and_ack(node_7_id, &raa); + thrd3.join().unwrap(); + assert!(!thrd.is_finished()); + + let thrd4 = std::thread::spawn(move || { + do_a_write.send(()).unwrap(); + do_a_write.send(()).unwrap(); + }); + + thrd4.join().unwrap(); + thrd.join().unwrap(); + + expect_payment_claimed!(nodes[8], payment_hash, 50_000_000); + + // At the end, we should have 7 ChannelMonitorUpdates - 6 for HTLC claims, and one for the + // above `revoke_and_ack`. + check_added_monitors(&nodes[8], 7); + + // Now drive everything to the end, at least as far as node 7 is concerned... + *nodes[8].chain_monitor.write_blocker.lock().unwrap() = None; + nodes[8].node.handle_commitment_signed(node_7_id, &cs); + check_added_monitors(&nodes[8], 1); + + let (updates, raa) = get_updates_and_revoke(&nodes[8], &nodes[7].node.get_our_node_id()); + + nodes[7].node.handle_update_fulfill_htlc(node_8_id, &updates.update_fulfill_htlcs[0]); + expect_payment_forwarded!(nodes[7], nodes[2], nodes[8], Some(1000), false, false); + nodes[7].node.handle_update_fulfill_htlc(node_8_id, &updates.update_fulfill_htlcs[1]); + expect_payment_forwarded!(nodes[7], nodes[3], nodes[8], Some(1000), false, false); + let mut next_source = 4; + if let Some(update) = updates.update_fulfill_htlcs.get(2) { + nodes[7].node.handle_update_fulfill_htlc(node_8_id, update); + expect_payment_forwarded!(nodes[7], nodes[4], nodes[8], Some(1000), false, false); + next_source += 1; + } + + nodes[7].node.handle_commitment_signed(node_8_id, &updates.commitment_signed); + nodes[7].node.handle_revoke_and_ack(node_8_id, &raa); + if updates.update_fulfill_htlcs.get(2).is_some() { + check_added_monitors(&nodes[7], 5); + } else { + check_added_monitors(&nodes[7], 4); + } + + let (raa, cs) = get_revoke_commit_msgs(&nodes[7], &node_8_id); + + nodes[8].node.handle_revoke_and_ack(node_7_id, &raa); + nodes[8].node.handle_commitment_signed(node_7_id, &cs); + check_added_monitors(&nodes[8], 2); + + let (updates, raa) = get_updates_and_revoke(&nodes[8], &node_7_id); + + nodes[7].node.handle_update_fulfill_htlc(node_8_id, &updates.update_fulfill_htlcs[0]); + expect_payment_forwarded!(nodes[7], nodes[next_source], nodes[8], Some(1000), false, false); + next_source += 1; + nodes[7].node.handle_update_fulfill_htlc(node_8_id, &updates.update_fulfill_htlcs[1]); + expect_payment_forwarded!(nodes[7], nodes[next_source], nodes[8], Some(1000), false, false); + next_source += 1; + if let Some(update) = updates.update_fulfill_htlcs.get(2) { + nodes[7].node.handle_update_fulfill_htlc(node_8_id, update); + expect_payment_forwarded!(nodes[7], nodes[next_source], nodes[8], Some(1000), false, false); + } + + nodes[7].node.handle_commitment_signed(node_8_id, &updates.commitment_signed); + nodes[7].node.handle_revoke_and_ack(node_8_id, &raa); + if updates.update_fulfill_htlcs.get(2).is_some() { + check_added_monitors(&nodes[7], 5); + } else { + check_added_monitors(&nodes[7], 4); + } + + let (raa, cs) = get_revoke_commit_msgs(&nodes[7], &node_8_id); + nodes[8].node.handle_revoke_and_ack(node_7_id, &raa); + nodes[8].node.handle_commitment_signed(node_7_id, &cs); + check_added_monitors(&nodes[8], 2); + + let raa = get_event_msg!(nodes[8], MessageSendEvent::SendRevokeAndACK, node_7_id); + nodes[7].node.handle_revoke_and_ack(node_8_id, &raa); + check_added_monitors(&nodes[7], 1); +} diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 9ede93c93d1..14d3c0ea5cb 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1105,7 +1105,7 @@ pub(crate) enum MonitorUpdateCompletionAction { /// A pending MPP claim which hasn't yet completed. /// /// Not written to disk. - pending_mpp_claim: Option<(PublicKey, ChannelId, u64, PendingMPPClaimPointer)>, + pending_mpp_claim: Option<(PublicKey, ChannelId, PendingMPPClaimPointer)>, }, /// Indicates an [`events::Event`] should be surfaced to the user and possibly resume the /// operation of another channel. @@ -1207,10 +1207,16 @@ impl From<&MPPClaimHTLCSource> for HTLCClaimSource { } } +#[derive(Debug)] +pub(crate) struct PendingMPPClaim { + channels_without_preimage: Vec<(PublicKey, OutPoint, ChannelId)>, + channels_with_preimage: Vec<(PublicKey, OutPoint, ChannelId)>, +} + #[derive(Clone, Debug, Hash, PartialEq, Eq)] /// The source of an HTLC which is being claimed as a part of an incoming payment. Each part is -/// tracked in [`PendingMPPClaim`] as well as in [`ChannelMonitor`]s, so that it can be converted -/// to an [`HTLCClaimSource`] for claim replays on startup. +/// tracked in [`ChannelMonitor`]s, so that it can be converted to an [`HTLCClaimSource`] for claim +/// replays on startup. struct MPPClaimHTLCSource { counterparty_node_id: PublicKey, funding_txo: OutPoint, @@ -1225,12 +1231,6 @@ impl_writeable_tlv_based!(MPPClaimHTLCSource, { (6, htlc_id, required), }); -#[derive(Debug)] -pub(crate) struct PendingMPPClaim { - channels_without_preimage: Vec, - channels_with_preimage: Vec, -} - #[derive(Clone, Debug, PartialEq, Eq)] /// When we're claiming a(n MPP) payment, we want to store information about that payment in the /// [`ChannelMonitor`] so that we can replay the claim without any information from the @@ -7017,8 +7017,15 @@ where } }).collect(); let pending_mpp_claim_ptr_opt = if sources.len() > 1 { + let mut channels_without_preimage = Vec::with_capacity(mpp_parts.len()); + for part in mpp_parts.iter() { + let chan = (part.counterparty_node_id, part.funding_txo, part.channel_id); + if !channels_without_preimage.contains(&chan) { + channels_without_preimage.push(chan); + } + } Some(Arc::new(Mutex::new(PendingMPPClaim { - channels_without_preimage: mpp_parts.clone(), + channels_without_preimage, channels_with_preimage: Vec::new(), }))) } else { @@ -7029,7 +7036,7 @@ where let this_mpp_claim = pending_mpp_claim_ptr_opt.as_ref().and_then(|pending_mpp_claim| if let Some(cp_id) = htlc.prev_hop.counterparty_node_id { let claim_ptr = PendingMPPClaimPointer(Arc::clone(pending_mpp_claim)); - Some((cp_id, htlc.prev_hop.channel_id, htlc.prev_hop.htlc_id, claim_ptr)) + Some((cp_id, htlc.prev_hop.channel_id, claim_ptr)) } else { None } @@ -7375,7 +7382,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ for action in actions.into_iter() { match action { MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim } => { - if let Some((counterparty_node_id, chan_id, htlc_id, claim_ptr)) = pending_mpp_claim { + if let Some((counterparty_node_id, chan_id, claim_ptr)) = pending_mpp_claim { let per_peer_state = self.per_peer_state.read().unwrap(); per_peer_state.get(&counterparty_node_id).map(|peer_state_mutex| { let mut peer_state = peer_state_mutex.lock().unwrap(); @@ -7386,24 +7393,17 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if *pending_claim == claim_ptr { let mut pending_claim_state_lock = pending_claim.0.lock().unwrap(); let pending_claim_state = &mut *pending_claim_state_lock; - pending_claim_state.channels_without_preimage.retain(|htlc_info| { + pending_claim_state.channels_without_preimage.retain(|(cp, op, cid)| { let this_claim = - htlc_info.counterparty_node_id == counterparty_node_id - && htlc_info.channel_id == chan_id - && htlc_info.htlc_id == htlc_id; + *cp == counterparty_node_id && *cid == chan_id; if this_claim { - pending_claim_state.channels_with_preimage.push(htlc_info.clone()); + pending_claim_state.channels_with_preimage.push((*cp, *op, *cid)); false } else { true } }); if pending_claim_state.channels_without_preimage.is_empty() { - for htlc_info in pending_claim_state.channels_with_preimage.iter() { - let freed_chan = ( - htlc_info.counterparty_node_id, - htlc_info.funding_txo, - htlc_info.channel_id, - blocker.clone() - ); + for (cp, op, cid) in pending_claim_state.channels_with_preimage.iter() { + let freed_chan = (*cp, *op, *cid, blocker.clone()); freed_channels.push(freed_chan); } } @@ -14232,8 +14232,16 @@ where if payment_claim.mpp_parts.is_empty() { return Err(DecodeError::InvalidValue); } + let mut channels_without_preimage = payment_claim.mpp_parts.iter() + .map(|htlc_info| (htlc_info.counterparty_node_id, htlc_info.funding_txo, htlc_info.channel_id)) + .collect::>(); + // If we have multiple MPP parts which were received over the same channel, + // we only track it once as once we get a preimage durably in the + // `ChannelMonitor` it will be used for all HTLCs with a matching hash. + channels_without_preimage.sort_unstable(); + channels_without_preimage.dedup(); let pending_claims = PendingMPPClaim { - channels_without_preimage: payment_claim.mpp_parts.clone(), + channels_without_preimage, channels_with_preimage: Vec::new(), }; let pending_claim_ptr_opt = Some(Arc::new(Mutex::new(pending_claims))); @@ -14266,7 +14274,7 @@ where for part in payment_claim.mpp_parts.iter() { let pending_mpp_claim = pending_claim_ptr_opt.as_ref().map(|ptr| ( - part.counterparty_node_id, part.channel_id, part.htlc_id, + part.counterparty_node_id, part.channel_id, PendingMPPClaimPointer(Arc::clone(&ptr)) )); let pending_claim_ptr = pending_claim_ptr_opt.as_ref().map(|ptr| diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index f052cb9e965..be77547b79c 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -779,6 +779,26 @@ pub fn get_revoke_commit_msgs>(node: & }) } +/// Gets a `UpdateHTLCs` and `revoke_and_ack` (i.e. after we get a responding `commitment_signed` +/// while we have updates in the holding cell). +pub fn get_updates_and_revoke>(node: &H, recipient: &PublicKey) -> (msgs::CommitmentUpdate, msgs::RevokeAndACK) { + let events = node.node().get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + (match events[0] { + MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => { + assert_eq!(node_id, recipient); + (*updates).clone() + }, + _ => panic!("Unexpected event"), + }, match events[1] { + MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { + assert_eq!(node_id, recipient); + (*msg).clone() + }, + _ => panic!("Unexpected event"), + }) +} + #[macro_export] /// Gets an RAA and CS which were sent in response to a commitment update /// diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 7095ac9c0cc..5bd5acaf176 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -366,6 +366,8 @@ pub struct TestChainMonitor<'a> { /// If this is set to Some(), the next round trip serialization check will not hold after an /// update_channel call (not watch_channel) for the given channel_id. pub expect_monitor_round_trip_fail: Mutex>, + #[cfg(feature = "std")] + pub write_blocker: Mutex>>, } impl<'a> TestChainMonitor<'a> { pub fn new(chain_source: Option<&'a TestChainSource>, broadcaster: &'a dyn SyncBroadcaster, logger: &'a TestLogger, fee_estimator: &'a TestFeeEstimator, persister: &'a dyn SyncPersist, keys_manager: &'a TestKeysInterface) -> Self { @@ -377,6 +379,8 @@ impl<'a> TestChainMonitor<'a> { keys_manager, expect_channel_force_closed: Mutex::new(None), expect_monitor_round_trip_fail: Mutex::new(None), + #[cfg(feature = "std")] + write_blocker: Mutex::new(None), } } @@ -387,6 +391,11 @@ impl<'a> TestChainMonitor<'a> { } impl<'a> chain::Watch for TestChainMonitor<'a> { fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result { + #[cfg(feature = "std")] + if let Some(blocker) = &*self.write_blocker.lock().unwrap() { + blocker.recv().unwrap(); + } + // At every point where we get a monitor update, we should be able to send a useful monitor // to a watchtower and disk... let mut w = TestVecWriter(Vec::new()); @@ -401,6 +410,11 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { } fn update_channel(&self, funding_txo: OutPoint, update: &channelmonitor::ChannelMonitorUpdate) -> chain::ChannelMonitorUpdateStatus { + #[cfg(feature = "std")] + if let Some(blocker) = &*self.write_blocker.lock().unwrap() { + blocker.recv().unwrap(); + } + // Every monitor update should survive roundtrip let mut w = TestVecWriter(Vec::new()); update.write(&mut w).unwrap(); From 5209557a112fff702efb4df61a21fe4d26da1449 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 1 Apr 2025 19:52:07 +0000 Subject: [PATCH 068/105] Add release notes for LDK 0.1.2 --- CHANGELOG.md | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index be12be58d1e..db55e7904bd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,40 @@ +# 0.1.2 - Apr 02, 2025 - "Foolishly Edgy Cases" + +## API Updates + * `lightning-invoice` is now re-exported as `lightning::bolt11_invoice` + (#3671). + +## Performance Improvements + * `rapid-gossip-sync` graph parsing is substantially faster, resolving a + regression in 0.1 (#3581). + * `NetworkGraph` loading is now substantially faster and does fewer + allocations, resulting in a 20% further improvement in `rapid-gossip-sync` + loading when initializing from scratch (#3581). + * `ChannelMonitor`s for closed channels are no longer always re-persisted + immediately after startup, reducing on-startup I/O burden (#3619). + +## Bug Fixes + * BOLT 11 invoices longer than 1023 bytes long (and up to 7089 bytes) now + properly parse (#3665). + * In some cases, when using synchronous persistence with higher latency than + the latency to communicate with peers, when receiving an MPP payment with + multiple parts received over the same channel, a channel could hang and not + make progress, eventually leading to a force-closure due to timed-out HTLCs. + This has now been fixed (#3680). + * Some rare cases with multi-hop BOLT 11 route hints or multiple redundant + blinded paths could have led to the router creating invalid `Route`s were + fixed (#3586). + * Corrected the decay logic in `ProbabilisticScorer`'s historical buckets + model. Note that by default historical buckets are only decayed if no new + datapoints have been added for a channel for two weeks (#3562). + * `{Channel,Onion}MessageHandler::peer_disconnected` will now be called if a + different message handler refused connection by returning an `Err` from its + `peer_connected` method (#3580). + * If the counterparty broadcasts a revoked state with pending HTLCs, those + will now be claimed with other outputs which we consider to not be + vulnerable to pinning attacks if they are not yet claimable by our + counterparty, potentially reducing our exposure to pinning attacks (#3564). + # 0.1.1 - Jan 28, 2025 - "Onchain Matters" ## API Updates From 4b901668b103fd294df8769ff1f3271191f92ca0 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 1 Apr 2025 19:52:16 +0000 Subject: [PATCH 069/105] Bump crate versions to `lightning` 0.1.2/`lightning-invoice` 0.33.2 --- lightning-invoice/Cargo.toml | 2 +- lightning/Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lightning-invoice/Cargo.toml b/lightning-invoice/Cargo.toml index f4629c1ad5c..c45784e00c7 100644 --- a/lightning-invoice/Cargo.toml +++ b/lightning-invoice/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lightning-invoice" description = "Data structures to parse and serialize BOLT11 lightning invoices" -version = "0.33.1" +version = "0.33.2" authors = ["Sebastian Geisler "] documentation = "https://docs.rs/lightning-invoice/" license = "MIT OR Apache-2.0" diff --git a/lightning/Cargo.toml b/lightning/Cargo.toml index e4f3eed900b..e62c4251b01 100644 --- a/lightning/Cargo.toml +++ b/lightning/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning" -version = "0.1.1" +version = "0.1.2" authors = ["Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning/" From a5302633f9c8a0c07ddd2bf5527e56959cd8bdf6 Mon Sep 17 00:00:00 2001 From: Valentine Wallace Date: Mon, 28 Apr 2025 18:39:48 +0000 Subject: [PATCH 070/105] Fix spurious MPP pathfinding failure This bug was recently surfaced to us by a user who wrote a test where the sender is attempting to route an MPP payment split across the two channels that it has with its LSP, where the LSP has a single large channel to the recipient. Previously this led to a pathfinding failure because our router was not sending the maximum possible value over the first MPP path it found due to overestimating the fees needed to cover the following hops. Because the path that had just been found was not maxxed out, our router assumed that we had already found enough paths to cover the full payment amount and that we were finding additional paths for the purpose of redundant path selection. This caused the router to mark the recipient's only channel as exhausted, with the intention of choosing more unique paths in future iterations. In reality, this ended up with the recipient's only channel being disabled and subsequently failing to find a route entirely. Update the router to fully utilize the capacity of any paths it finds in this situation, preventing the "redundant path selection" behavior from kicking in. Use and `rustfmt` conflicts resolved in: * lightning/src/blinded_path/payment.rs The new test was also updated to avoid APIs not present on 0.1. --- lightning/src/blinded_path/payment.rs | 49 ++++++++++++++------ lightning/src/ln/payment_tests.rs | 52 +++++++++++++++++++++ lightning/src/routing/router.rs | 67 +++++++++++++++++++++------ 3 files changed, 139 insertions(+), 29 deletions(-) diff --git a/lightning/src/blinded_path/payment.rs b/lightning/src/blinded_path/payment.rs index e3a81927146..901f717ea93 100644 --- a/lightning/src/blinded_path/payment.rs +++ b/lightning/src/blinded_path/payment.rs @@ -30,6 +30,7 @@ use crate::offers::nonce::Nonce; use crate::offers::offer::OfferId; use crate::routing::gossip::{NodeId, ReadOnlyNetworkGraph}; use crate::sign::{EntropySource, NodeSigner, Recipient}; +use crate::types::routing::RoutingFees; use crate::util::ser::{FixedLengthReader, LengthReadableArgs, HighZeroBytesDroppedBigSize, Readable, WithoutLength, Writeable, Writer}; use core::mem; @@ -530,20 +531,17 @@ pub(crate) fn amt_to_forward_msat(inbound_amt_msat: u64, payment_relay: &Payment u64::try_from(amt_to_forward).ok() } -pub(super) fn compute_payinfo( - intermediate_nodes: &[PaymentForwardNode], payee_tlvs: &UnauthenticatedReceiveTlvs, - payee_htlc_maximum_msat: u64, min_final_cltv_expiry_delta: u16, -) -> Result { +// Returns (aggregated_base_fee, aggregated_proportional_fee) +pub(crate) fn compute_aggregated_base_prop_fee(hops_fees: I) -> Result<(u64, u64), ()> +where + I: DoubleEndedIterator, +{ let mut curr_base_fee: u64 = 0; let mut curr_prop_mil: u64 = 0; - let mut cltv_expiry_delta: u16 = min_final_cltv_expiry_delta; - for tlvs in intermediate_nodes.iter().rev().map(|n| &n.tlvs) { - // In the future, we'll want to take the intersection of all supported features for the - // `BlindedPayInfo`, but there are no features in that context right now. - if tlvs.features.requires_unknown_bits_from(&BlindedHopFeatures::empty()) { return Err(()) } + for fees in hops_fees.rev() { + let next_base_fee = fees.base_msat as u64; + let next_prop_mil = fees.proportional_millionths as u64; - let next_base_fee = tlvs.payment_relay.fee_base_msat as u64; - let next_prop_mil = tlvs.payment_relay.fee_proportional_millionths as u64; // Use integer arithmetic to compute `ceil(a/b)` as `(a+b-1)/b` // ((curr_base_fee * (1_000_000 + next_prop_mil)) / 1_000_000) + next_base_fee curr_base_fee = curr_base_fee.checked_mul(1_000_000 + next_prop_mil) @@ -558,13 +556,34 @@ pub(super) fn compute_payinfo( .map(|f| f / 1_000_000) .and_then(|f| f.checked_sub(1_000_000)) .ok_or(())?; - - cltv_expiry_delta = cltv_expiry_delta.checked_add(tlvs.payment_relay.cltv_expiry_delta).ok_or(())?; } + Ok((curr_base_fee, curr_prop_mil)) +} + +pub(super) fn compute_payinfo( + intermediate_nodes: &[PaymentForwardNode], payee_tlvs: &UnauthenticatedReceiveTlvs, + payee_htlc_maximum_msat: u64, min_final_cltv_expiry_delta: u16, +) -> Result { + let (aggregated_base_fee, aggregated_prop_fee) = + compute_aggregated_base_prop_fee(intermediate_nodes.iter().map(|node| RoutingFees { + base_msat: node.tlvs.payment_relay.fee_base_msat, + proportional_millionths: node.tlvs.payment_relay.fee_proportional_millionths, + }))?; + let mut htlc_minimum_msat: u64 = 1; let mut htlc_maximum_msat: u64 = 21_000_000 * 100_000_000 * 1_000; // Total bitcoin supply + let mut cltv_expiry_delta: u16 = min_final_cltv_expiry_delta; for node in intermediate_nodes.iter() { + // In the future, we'll want to take the intersection of all supported features for the + // `BlindedPayInfo`, but there are no features in that context right now. + if node.tlvs.features.requires_unknown_bits_from(&BlindedHopFeatures::empty()) { + return Err(()); + } + + cltv_expiry_delta = + cltv_expiry_delta.checked_add(node.tlvs.payment_relay.cltv_expiry_delta).ok_or(())?; + // The min htlc for an intermediate node is that node's min minus the fees charged by all of the // following hops for forwarding that min, since that fee amount will automatically be included // in the amount that this node receives and contribute towards reaching its min. @@ -583,8 +602,8 @@ pub(super) fn compute_payinfo( if htlc_maximum_msat < htlc_minimum_msat { return Err(()) } Ok(BlindedPayInfo { - fee_base_msat: u32::try_from(curr_base_fee).map_err(|_| ())?, - fee_proportional_millionths: u32::try_from(curr_prop_mil).map_err(|_| ())?, + fee_base_msat: u32::try_from(aggregated_base_fee).map_err(|_| ())?, + fee_proportional_millionths: u32::try_from(aggregated_prop_fee).map_err(|_| ())?, cltv_expiry_delta, htlc_minimum_msat, htlc_maximum_msat, diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 0963ed0aa4f..348cace949d 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -4479,3 +4479,55 @@ fn pay_route_without_params() { ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], payment_preimage) ); } + +#[test] +fn max_out_mpp_path() { + // In this setup, the sender is attempting to route an MPP payment split across the two channels + // that it has with its LSP, where the LSP has a single large channel to the recipient. + // + // Previously a user ran into a pathfinding failure here because our router was not sending the + // maximum possible value over the first MPP path it found due to overestimating the fees needed + // to cover the following hops. Because the path that had just been found was not maxxed out, our + // router assumed that we had already found enough paths to cover the full payment amount and that + // we were finding additional paths for the purpose of redundant path selection. This caused the + // router to mark the recipient's only channel as exhausted, with the intention of choosing more + // unique paths in future iterations. In reality, this ended up with the recipient's only channel + // being disabled and subsequently failing to find a route entirely. + // + // The router has since been updated to fully utilize the capacity of any paths it finds in this + // situation, preventing the "redundant path selection" behavior from kicking in. + + let mut user_cfg = test_default_channel_config(); + user_cfg.channel_config.forwarding_fee_base_msat = 0; + user_cfg.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + let mut lsp_cfg = test_default_channel_config(); + lsp_cfg.channel_config.forwarding_fee_base_msat = 0; + lsp_cfg.channel_config.forwarding_fee_proportional_millionths = 3000; + lsp_cfg.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs( + 3, &node_cfgs, &[Some(user_cfg.clone()), Some(lsp_cfg.clone()), Some(user_cfg.clone())] + ); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 200_000, 0); + create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 300_000, 0); + create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, 600_000, 0); + + let amt_msat = 350_000_000; + let invoice_params = crate::ln::channelmanager::Bolt11InvoiceParameters { + amount_msats: Some(amt_msat), + ..Default::default() + }; + let invoice = nodes[2].node.create_bolt11_invoice(invoice_params).unwrap(); + + let (hash, onion, params) = + crate::ln::bolt11_payment::payment_parameters_from_invoice(&invoice).unwrap(); + nodes[0].node.send_payment(hash, onion, PaymentId([42; 32]), params, Retry::Attempts(0)).unwrap(); + + assert!(nodes[0].node.list_recent_payments().len() == 1); + check_added_monitors(&nodes[0], 2); // one monitor update per MPP part + nodes[0].node.get_and_clear_pending_msg_events(); +} diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 079b83563c3..ce9108eddbe 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -1890,9 +1890,10 @@ impl<'a> PaymentPath<'a> { // that it the value being transferred has decreased while we were doing path finding, leading // to the fees being paid not lining up with the actual limits. // - // Note that this function is not aware of the available_liquidity limit, and thus does not - // support increasing the value being transferred beyond what was selected during the initial - // routing passes. + // This function may also be used to increase the value being transferred in the case that + // overestimating later hops' fees caused us to underutilize earlier hops' capacity. + // + // Note that this function is not aware of the available_liquidity limit of any hops. // // Returns the amount that this path contributes to the total payment value, which may be greater // than `value_msat` if we had to overpay to meet the final node's `htlc_minimum_msat`. @@ -1957,15 +1958,56 @@ impl<'a> PaymentPath<'a> { cur_hop.hop_use_fee_msat = new_fee; total_fee_paid_msat += new_fee; } else { - // It should not be possible because this function is called only to reduce the - // value. In that case, compute_fee was already called with the same fees for - // larger amount and there was no overflow. + // It should not be possible because this function is only called either to reduce the + // value or with a larger amount that was already checked for overflow in + // `compute_max_final_value_contribution`. In the former case, compute_fee was already + // called with the same fees for larger amount and there was no overflow. unreachable!(); } } } value_msat + extra_contribution_msat } + + // Returns the maximum contribution that this path can make to the final value of the payment. May + // be slightly lower than the actual max due to rounding errors when aggregating fees along the + // path. + fn compute_max_final_value_contribution( + &self, used_liquidities: &HashMap, channel_saturation_pow_half: u8 + ) -> u64 { + let mut max_path_contribution = u64::MAX; + for (idx, (hop, _)) in self.hops.iter().enumerate() { + let hop_effective_capacity_msat = hop.candidate.effective_capacity(); + let hop_max_msat = max_htlc_from_capacity( + hop_effective_capacity_msat, channel_saturation_pow_half + ).saturating_sub(*used_liquidities.get(&hop.candidate.id()).unwrap_or(&0_u64)); + + let next_hops_feerates_iter = self.hops + .iter() + .skip(idx + 1) + .map(|(hop, _)| hop.candidate.fees()); + + // Aggregate the fees of the hops that come after this one, and use those fees to compute the + // maximum amount that this hop can contribute to the final value received by the payee. + let (next_hops_aggregated_base, next_hops_aggregated_prop) = + crate::blinded_path::payment::compute_aggregated_base_prop_fee(next_hops_feerates_iter).unwrap(); + + // ceil(((hop_max_msat - agg_base) * 1_000_000) / (1_000_000 + agg_prop)) + let hop_max_final_value_contribution = (hop_max_msat as u128) + .checked_sub(next_hops_aggregated_base as u128) + .and_then(|f| f.checked_mul(1_000_000)) + .and_then(|f| f.checked_add(1_000_000 - 1)) + .and_then(|f| f.checked_add(next_hops_aggregated_prop as u128)) + .map(|f| f / ((next_hops_aggregated_prop as u128).saturating_add(1_000_000))); + + if let Some(hop_contribution) = hop_max_final_value_contribution { + let hop_contribution: u64 = hop_contribution.try_into().unwrap_or(u64::MAX); + max_path_contribution = core::cmp::min(hop_contribution, max_path_contribution); + } else { debug_assert!(false); } + } + + max_path_contribution + } } #[inline(always)] @@ -3116,7 +3158,10 @@ where L::Target: Logger { // recompute the fees again, so that if that's the case, we match the currently // underpaid htlc_minimum_msat with fees. debug_assert_eq!(payment_path.get_value_msat(), value_contribution_msat); - let desired_value_contribution = cmp::min(value_contribution_msat, final_value_msat); + let max_path_contribution_msat = payment_path.compute_max_final_value_contribution( + &used_liquidities, channel_saturation_pow_half + ); + let desired_value_contribution = cmp::min(max_path_contribution_msat, final_value_msat); value_contribution_msat = payment_path.update_value_and_recompute_fees(desired_value_contribution); // Since a path allows to transfer as much value as @@ -3128,7 +3173,6 @@ where L::Target: Logger { // might have been computed considering a larger value. // Remember that we used these channels so that we don't rely // on the same liquidity in future paths. - let mut prevented_redundant_path_selection = false; for (hop, _) in payment_path.hops.iter() { let spent_on_hop_msat = value_contribution_msat + hop.next_hops_fee_msat; let used_liquidity_msat = used_liquidities @@ -3137,14 +3181,9 @@ where L::Target: Logger { .or_insert(spent_on_hop_msat); let hop_capacity = hop.candidate.effective_capacity(); let hop_max_msat = max_htlc_from_capacity(hop_capacity, channel_saturation_pow_half); - if *used_liquidity_msat == hop_max_msat { - // If this path used all of this channel's available liquidity, we know - // this path will not be selected again in the next loop iteration. - prevented_redundant_path_selection = true; - } debug_assert!(*used_liquidity_msat <= hop_max_msat); } - if !prevented_redundant_path_selection { + if max_path_contribution_msat > value_contribution_msat { // If we weren't capped by hitting a liquidity limit on a channel in the path, // we'll probably end up picking the same path again on the next iteration. // Decrease the available liquidity of a hop in the middle of the path. From 8d8f15bcb74c56259687c444f3cec3674f1b2d53 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 28 Apr 2025 23:43:37 +0000 Subject: [PATCH 071/105] Use `floor` when computing max value of a path, not `ceil` When calculating the maximum contribution of a path to a larger route, we want to ensure we don't overshoot as that might cause us to violate a maximum value limit. In 209cb2aa2e0d67bf89a130b070f7116178e9ddb4, we started by calculating with `ceil`, which can trigger exactly that, so here we drop the extra addition, switching us to `floor`. Found both by the `router` fuzzer as well as the `generate_large_mpp_routes` test. --- lightning/src/routing/router.rs | 96 ++++++++++++++++++++++++++++++++- 1 file changed, 94 insertions(+), 2 deletions(-) diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index ce9108eddbe..c09a014dc62 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -1992,11 +1992,10 @@ impl<'a> PaymentPath<'a> { let (next_hops_aggregated_base, next_hops_aggregated_prop) = crate::blinded_path::payment::compute_aggregated_base_prop_fee(next_hops_feerates_iter).unwrap(); - // ceil(((hop_max_msat - agg_base) * 1_000_000) / (1_000_000 + agg_prop)) + // floor(((hop_max_msat - agg_base) * 1_000_000) / (1_000_000 + agg_prop)) let hop_max_final_value_contribution = (hop_max_msat as u128) .checked_sub(next_hops_aggregated_base as u128) .and_then(|f| f.checked_mul(1_000_000)) - .and_then(|f| f.checked_add(1_000_000 - 1)) .and_then(|f| f.checked_add(next_hops_aggregated_prop as u128)) .map(|f| f / ((next_hops_aggregated_prop as u128).saturating_add(1_000_000))); @@ -8504,6 +8503,99 @@ mod tests { assert_eq!(route.get_total_fees(), 123); } + #[test] + fn test_max_final_contribution() { + // When `compute_max_final_value_contribution` was added, it had a bug where it would + // over-estimate the maximum value contribution of a hop by using `ceil` rather than + // `floor`. This tests that case by attempting to send 1 million sats over a channel where + // the remaining hops have a base fee of zero and a proportional fee of 1 millionth. + + let (secp_ctx, network_graph, gossip_sync, _, logger) = build_graph(); + let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx); + let scorer = ln_test_utils::TestScorer::new(); + let random_seed_bytes = [42; 32]; + + // Enable channel 1, setting max HTLC to 1M sats + update_channel(&gossip_sync, &secp_ctx, &our_privkey, UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: 1, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 0, + cltv_expiry_delta: (1 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 1_000_000, + fee_base_msat: 0, + fee_proportional_millionths: 0, + excess_data: Vec::new() + }); + + // Set the fee on channel 3 to zero + update_channel(&gossip_sync, &secp_ctx, &privkeys[0], UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: 3, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 0, + cltv_expiry_delta: (3 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 1_000_000_000, + fee_base_msat: 0, + fee_proportional_millionths: 0, + excess_data: Vec::new() + }); + + // Set the fee on channel 6 to 1 millionth + update_channel(&gossip_sync, &secp_ctx, &privkeys[2], UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: 6, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 0, + cltv_expiry_delta: (6 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 1_000_000_000, + fee_base_msat: 0, + fee_proportional_millionths: 1, + excess_data: Vec::new() + }); + + // Now attempt to pay over the channel 1 -> channel 3 -> channel 6 path + // This should fail as we need to send 1M + 1 sats to cover the fee but channel 1 only + // allows for 1M sats to flow over it. + let config = UserConfig::default(); + let payment_params = PaymentParameters::from_node_id(nodes[4], 42) + .with_bolt11_features(channelmanager::provided_bolt11_invoice_features(&config)) + .unwrap(); + let route_params = RouteParameters::from_payment_params_and_value(payment_params, 1_000_000); + get_route(&our_id, &route_params, &network_graph.read_only(), None, + Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap_err(); + + // Now set channel 1 max HTLC to 1M + 1 sats + update_channel(&gossip_sync, &secp_ctx, &our_privkey, UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: 1, + timestamp: 3, + message_flags: 1, // Only must_be_one + channel_flags: 0, + cltv_expiry_delta: (1 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 1_000_001, + fee_base_msat: 0, + fee_proportional_millionths: 0, + excess_data: Vec::new() + }); + + // And attempt the same payment again, but this time it should work. + let route = get_route(&our_id, &route_params, &network_graph.read_only(), None, + Arc::clone(&logger), &scorer, &Default::default(), &random_seed_bytes).unwrap(); + assert_eq!(route.paths.len(), 1); + assert_eq!(route.paths[0].hops.len(), 3); + assert_eq!(route.paths[0].hops[0].short_channel_id, 1); + assert_eq!(route.paths[0].hops[1].short_channel_id, 3); + assert_eq!(route.paths[0].hops[2].short_channel_id, 6); + } + #[test] fn allow_us_being_first_hint() { // Check that we consider a route hint even if we are the src of the first hop. From 42c8dd4cba75308bc38546512d2d6c30a1196565 Mon Sep 17 00:00:00 2001 From: Philip Kannegaard Hayes Date: Tue, 22 Apr 2025 17:57:44 -0700 Subject: [PATCH 072/105] offers: avoid panic when truncating payer_note in UTF-8 code point `String::truncate` takes a byte index but panics if we split in the middle of a UTF-8 codepoint. Sadly, in `InvoiceRequest::fields` we want to tuncate the payer note to a maximum of 512 bytes, which may be in the middle of a UTF-8 codepoint and can cause panic. Here we iterate over the bytes in the string until we find one not in the middle of a UTF-8 codepoint and then split the string there. Trivial `rustfmt` conflicts resolved in: * lightning/src/offers/invoice_request.rs --- lightning/src/offers/invoice_request.rs | 80 ++++++++++++++++++++++--- 1 file changed, 71 insertions(+), 9 deletions(-) diff --git a/lightning/src/offers/invoice_request.rs b/lightning/src/offers/invoice_request.rs index 8d7d25cf2b5..fc4d7d274bd 100644 --- a/lightning/src/offers/invoice_request.rs +++ b/lightning/src/offers/invoice_request.rs @@ -944,13 +944,37 @@ impl VerifiedInvoiceRequest { InvoiceRequestFields { payer_signing_pubkey: *payer_signing_pubkey, quantity: *quantity, - payer_note_truncated: payer_note.clone() - .map(|mut s| { s.truncate(PAYER_NOTE_LIMIT); UntrustedString(s) }), + payer_note_truncated: payer_note + .clone() + // Truncate the payer note to `PAYER_NOTE_LIMIT` bytes, rounding + // down to the nearest valid UTF-8 code point boundary. + .map(|s| UntrustedString(string_truncate_safe(s, PAYER_NOTE_LIMIT))), human_readable_name: self.offer_from_hrn().clone(), } } } +/// `String::truncate(new_len)` panics if you split inside a UTF-8 code point, +/// which would leave the `String` containing invalid UTF-8. This function will +/// instead truncate the string to the next smaller code point boundary so the +/// truncated string always remains valid UTF-8. +/// +/// This can still split a grapheme cluster, but that's probably fine. +/// We'd otherwise have to pull in the `unicode-segmentation` crate and its big +/// unicode tables to find the next smaller grapheme cluster boundary. +fn string_truncate_safe(mut s: String, new_len: usize) -> String { + // Finds the largest byte index `x` not exceeding byte index `index` where + // `s.is_char_boundary(x)` is true. + // TODO(phlip9): remove when `std::str::floor_char_boundary` stabilizes. + let truncated_len = if new_len >= s.len() { + s.len() + } else { + (0..=new_len).rev().find(|idx| s.is_char_boundary(*idx)).unwrap_or(0) + }; + s.truncate(truncated_len); + s +} + impl InvoiceRequestContents { pub(super) fn metadata(&self) -> &[u8] { self.inner.metadata() @@ -1339,7 +1363,8 @@ mod tests { use crate::ln::inbound_payment::ExpandedKey; use crate::ln::msgs::{DecodeError, MAX_VALUE_MSAT}; use crate::offers::invoice::{Bolt12Invoice, SIGNATURE_TAG as INVOICE_SIGNATURE_TAG}; - use crate::offers::merkle::{SignatureTlvStreamRef, TaggedHash, TlvStream, self}; + use crate::offers::invoice_request::string_truncate_safe; + use crate::offers::merkle::{self, SignatureTlvStreamRef, TaggedHash, TlvStream}; use crate::offers::nonce::Nonce; use crate::offers::offer::{Amount, ExperimentalOfferTlvStreamRef, OfferTlvStreamRef, Quantity}; #[cfg(not(c_bindings))] @@ -2611,12 +2636,22 @@ mod tests { .build().unwrap(); assert_eq!(offer.issuer_signing_pubkey(), Some(node_id)); + // UTF-8 payer note that we can't naively `.truncate(PAYER_NOTE_LIMIT)` + // because it would split a multi-byte UTF-8 code point. + let payer_note = "❤️".repeat(86); + assert_eq!(payer_note.len(), PAYER_NOTE_LIMIT + 4); + let expected_payer_note = "❤️".repeat(85); + let invoice_request = offer - .request_invoice(&expanded_key, nonce, &secp_ctx, payment_id).unwrap() - .chain(Network::Testnet).unwrap() - .quantity(1).unwrap() - .payer_note("0".repeat(PAYER_NOTE_LIMIT * 2)) - .build_and_sign().unwrap(); + .request_invoice(&expanded_key, nonce, &secp_ctx, payment_id) + .unwrap() + .chain(Network::Testnet) + .unwrap() + .quantity(1) + .unwrap() + .payer_note(payer_note) + .build_and_sign() + .unwrap(); match invoice_request.verify_using_metadata(&expanded_key, &secp_ctx) { Ok(invoice_request) => { let fields = invoice_request.fields(); @@ -2626,7 +2661,7 @@ mod tests { InvoiceRequestFields { payer_signing_pubkey: invoice_request.payer_signing_pubkey(), quantity: Some(1), - payer_note_truncated: Some(UntrustedString("0".repeat(PAYER_NOTE_LIMIT))), + payer_note_truncated: Some(UntrustedString(expected_payer_note)), human_readable_name: None, } ); @@ -2641,4 +2676,31 @@ mod tests { Err(_) => panic!("unexpected error"), } } + + #[test] + fn test_string_truncate_safe() { + // We'll correctly truncate to the nearest UTF-8 code point boundary: + // ❤ variation-selector + // e29da4 efb88f + let s = String::from("❤️"); + assert_eq!(s.len(), 6); + assert_eq!(s, string_truncate_safe(s.clone(), 7)); + assert_eq!(s, string_truncate_safe(s.clone(), 6)); + assert_eq!("❤", string_truncate_safe(s.clone(), 5)); + assert_eq!("❤", string_truncate_safe(s.clone(), 4)); + assert_eq!("❤", string_truncate_safe(s.clone(), 3)); + assert_eq!("", string_truncate_safe(s.clone(), 2)); + assert_eq!("", string_truncate_safe(s.clone(), 1)); + assert_eq!("", string_truncate_safe(s.clone(), 0)); + + // Every byte in an ASCII string is also a full UTF-8 code point. + let s = String::from("my ASCII string!"); + for new_len in 0..(s.len() + 5) { + if new_len >= s.len() { + assert_eq!(s, string_truncate_safe(s.clone(), new_len)); + } else { + assert_eq!(s[..new_len], string_truncate_safe(s.clone(), new_len)); + } + } + } } From cf684faaea0c8cac5ae84741bbb851370da76682 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 25 Apr 2025 18:07:42 +0000 Subject: [PATCH 073/105] Make it easier for the fuzzer to get a `VerifiedInvoiceRequest` In the next commit we attempt to verify `InvoiceRequest`s when fuzzing so that we can test fetching the `InvoiceRequestFields`, but its useful to allow the verification to succeed more often first, which we do here. --- lightning/src/offers/signer.rs | 41 +++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/lightning/src/offers/signer.rs b/lightning/src/offers/signer.rs index fa9fdfa3467..acfa169bffb 100644 --- a/lightning/src/offers/signer.rs +++ b/lightning/src/offers/signer.rs @@ -361,19 +361,30 @@ fn verify_metadata( let derived_keys = Keypair::from_secret_key( secp_ctx, &SecretKey::from_slice(hmac.as_byte_array()).unwrap() ); - if fixed_time_eq(&signing_pubkey.serialize(), &derived_keys.public_key().serialize()) { + #[allow(unused_mut)] + let mut ok = fixed_time_eq(&signing_pubkey.serialize(), &derived_keys.public_key().serialize()); + #[cfg(fuzzing)] + if metadata[0] & 1 == 0 { + ok = true; + } + if ok { Ok(Some(derived_keys)) } else { Err(()) } - } else if metadata[Nonce::LENGTH..].len() == Sha256::LEN { - if fixed_time_eq(&metadata[Nonce::LENGTH..], &hmac.to_byte_array()) { + } else { + #[allow(unused_mut)] + let mut ok = metadata.len() == Nonce::LENGTH + Sha256::LEN + && fixed_time_eq(&metadata[Nonce::LENGTH..], &hmac.to_byte_array()); + #[cfg(fuzzing)] + if metadata.is_empty() || metadata[0] & 1 == 0 { + ok = true; + } + if ok { Ok(None) } else { Err(()) } - } else { - Err(()) } } @@ -381,16 +392,20 @@ fn hmac_for_message<'a>( metadata: &[u8], expanded_key: &ExpandedKey, iv_bytes: &[u8; IV_LEN], tlv_stream: impl core::iter::Iterator> ) -> Result, ()> { - if metadata.len() < Nonce::LENGTH { - return Err(()); - } - - let nonce = match Nonce::try_from(&metadata[..Nonce::LENGTH]) { - Ok(nonce) => nonce, - Err(_) => return Err(()), - }; let mut hmac = expanded_key.hmac_for_offer(); hmac.input(iv_bytes); + + let nonce = if metadata.len() < Nonce::LENGTH { + // In fuzzing its relatively challenging for the fuzzer to find cases where we have issues + // in a BOLT 12 object but also have a right-sized nonce. So instead we allow any size + // nonce. + if !cfg!(fuzzing) { + return Err(()); + } + Nonce::try_from(&[42; Nonce::LENGTH][..]).unwrap() + } else { + Nonce::try_from(&metadata[..Nonce::LENGTH])? + }; hmac.input(&nonce.0); for record in tlv_stream { From 406e0314bf716e4f59e60798b2061d914d615f42 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 25 Apr 2025 18:22:03 +0000 Subject: [PATCH 074/105] Fuzz fetching `InvoiceRequestFields` from `VerifiedInvoiceRequest`s This should allow us to reach the panic from two commits ago from the fuzzer. --- fuzz/src/invoice_request_deser.rs | 26 +++++++++++++++++-------- lightning/src/offers/invoice_request.rs | 14 ++++++++++++- 2 files changed, 31 insertions(+), 9 deletions(-) diff --git a/fuzz/src/invoice_request_deser.rs b/fuzz/src/invoice_request_deser.rs index 37668c1d801..beff5c21a0b 100644 --- a/fuzz/src/invoice_request_deser.rs +++ b/fuzz/src/invoice_request_deser.rs @@ -85,16 +85,26 @@ fn build_response( let expanded_key = ExpandedKey::new([42; 32]); let entropy_source = Randomness {}; let nonce = Nonce::from_entropy_source(&entropy_source); + + let invoice_request_fields = + if let Ok(ver) = invoice_request.clone().verify_using_metadata(&expanded_key, secp_ctx) { + // Previously we had a panic where we'd truncate the payer note possibly cutting a + // Unicode character in two here, so try to fetch fields if we can validate. + ver.fields() + } else { + InvoiceRequestFields { + payer_signing_pubkey: invoice_request.payer_signing_pubkey(), + quantity: invoice_request.quantity(), + payer_note_truncated: invoice_request + .payer_note() + .map(|s| UntrustedString(s.to_string())), + human_readable_name: None, + } + }; + let payment_context = PaymentContext::Bolt12Offer(Bolt12OfferContext { offer_id: OfferId([42; 32]), - invoice_request: InvoiceRequestFields { - payer_signing_pubkey: invoice_request.payer_signing_pubkey(), - quantity: invoice_request.quantity(), - payer_note_truncated: invoice_request - .payer_note() - .map(|s| UntrustedString(s.to_string())), - human_readable_name: None, - }, + invoice_request: invoice_request_fields, }); let payee_tlvs = UnauthenticatedReceiveTlvs { payment_secret: PaymentSecret([42; 32]), diff --git a/lightning/src/offers/invoice_request.rs b/lightning/src/offers/invoice_request.rs index fc4d7d274bd..36491fdb645 100644 --- a/lightning/src/offers/invoice_request.rs +++ b/lightning/src/offers/invoice_request.rs @@ -933,7 +933,14 @@ impl VerifiedInvoiceRequest { #[cfg(c_bindings)] invoice_request_respond_with_derived_signing_pubkey_methods!(self, self.inner, InvoiceWithDerivedSigningPubkeyBuilder); - pub(crate) fn fields(&self) -> InvoiceRequestFields { + /// Fetch the [`InvoiceRequestFields`] for this verified invoice. + /// + /// These are fields which we expect to be useful when receiving a payment for this invoice + /// request, and include the returned [`InvoiceRequestFields`] in the + /// [`PaymentContext::Bolt12Offer`]. + /// + /// [`PaymentContext::Bolt12Offer`]: crate::blinded_path::payment::PaymentContext::Bolt12Offer + pub fn fields(&self) -> InvoiceRequestFields { let InvoiceRequestContents { payer_signing_pubkey, inner: InvoiceRequestContentsWithoutPayerSigningPubkey { @@ -1316,8 +1323,13 @@ pub struct InvoiceRequestFields { } /// The maximum number of characters included in [`InvoiceRequestFields::payer_note_truncated`]. +#[cfg(not(fuzzing))] pub const PAYER_NOTE_LIMIT: usize = 512; +/// The maximum number of characters included in [`InvoiceRequestFields::payer_note_truncated`]. +#[cfg(fuzzing)] +pub const PAYER_NOTE_LIMIT: usize = 8; + impl Writeable for InvoiceRequestFields { fn write(&self, writer: &mut W) -> Result<(), io::Error> { write_tlv_fields!(writer, { From d5f149dde1611f26c23a4e424797c5665ca1f767 Mon Sep 17 00:00:00 2001 From: shaavan Date: Wed, 12 Mar 2025 17:45:00 +0530 Subject: [PATCH 075/105] Make InvoiceReceived event generation idempotent MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ensures `InvoiceReceived` events are not generated multiple times when `manually_handle_bolt12_invoice` is enabled. Duplicate events for the same invoice could cause confusion—this change introduces an idempotency check to prevent that. Conflicts resolved in: * lightning/src/ln/outbound_payment.rs due to the migration upstream from `max_total_routing_fee_msat` to a more general config struct. --- lightning/src/ln/channelmanager.rs | 5 ++ lightning/src/ln/outbound_payment.rs | 73 +++++++++++++++++++--------- 2 files changed, 55 insertions(+), 23 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 14d3c0ea5cb..33ad59a4139 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -12148,6 +12148,11 @@ where ); if self.default_configuration.manually_handle_bolt12_invoices { + // Update the corresponding entry in `PendingOutboundPayment` for this invoice. + // This ensures that event generation remains idempotent in case we receive + // the same invoice multiple times. + self.pending_outbound_payments.mark_invoice_received(&invoice, payment_id).ok()?; + let event = Event::InvoiceReceived { payment_id, invoice, context, responder, }; diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index 7b579b7a261..1719b47dab3 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -73,9 +73,9 @@ pub(crate) enum PendingOutboundPayment { max_total_routing_fee_msat: Option, retryable_invoice_request: Option }, - // This state will never be persisted to disk because we transition from `AwaitingInvoice` to - // `Retryable` atomically within the `ChannelManager::total_consistency_lock`. Useful to avoid - // holding the `OutboundPayments::pending_outbound_payments` lock during pathfinding. + // Represents the state after the invoice has been received, transitioning from the corresponding + // `AwaitingInvoice` state. + // Helps avoid holding the `OutboundPayments::pending_outbound_payments` lock during pathfinding. InvoiceReceived { payment_hash: PaymentHash, retry_strategy: Retry, @@ -833,26 +833,8 @@ impl OutboundPayments { IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, { - let payment_hash = invoice.payment_hash(); - let max_total_routing_fee_msat; - let retry_strategy; - match self.pending_outbound_payments.lock().unwrap().entry(payment_id) { - hash_map::Entry::Occupied(entry) => match entry.get() { - PendingOutboundPayment::AwaitingInvoice { - retry_strategy: retry, max_total_routing_fee_msat: max_total_fee, .. - } => { - retry_strategy = *retry; - max_total_routing_fee_msat = *max_total_fee; - *entry.into_mut() = PendingOutboundPayment::InvoiceReceived { - payment_hash, - retry_strategy: *retry, - max_total_routing_fee_msat, - }; - }, - _ => return Err(Bolt12PaymentError::DuplicateInvoice), - }, - hash_map::Entry::Vacant(_) => return Err(Bolt12PaymentError::UnexpectedInvoice), - } + let (payment_hash, retry_strategy, max_total_routing_fee_msat, _) = self + .mark_invoice_received_and_get_details(invoice, payment_id)?; if invoice.invoice_features().requires_unknown_bits_from(&features) { self.abandon_payment( @@ -1754,6 +1736,51 @@ impl OutboundPayments { } } + pub(super) fn mark_invoice_received( + &self, invoice: &Bolt12Invoice, payment_id: PaymentId + ) -> Result<(), Bolt12PaymentError> { + self.mark_invoice_received_and_get_details(invoice, payment_id) + .and_then(|(_, _, _, is_newly_marked)| { + is_newly_marked + .then_some(()) + .ok_or(Bolt12PaymentError::DuplicateInvoice) + }) + } + + fn mark_invoice_received_and_get_details( + &self, invoice: &Bolt12Invoice, payment_id: PaymentId + ) -> Result<(PaymentHash, Retry, Option, bool), Bolt12PaymentError> { + match self.pending_outbound_payments.lock().unwrap().entry(payment_id) { + hash_map::Entry::Occupied(entry) => match entry.get() { + PendingOutboundPayment::AwaitingInvoice { + retry_strategy: retry, max_total_routing_fee_msat: max_total_fee, .. + } => { + let payment_hash = invoice.payment_hash(); + let retry = *retry; + let max_total_fee = *max_total_fee; + *entry.into_mut() = PendingOutboundPayment::InvoiceReceived { + payment_hash, + retry_strategy: retry, + max_total_routing_fee_msat: max_total_fee, + }; + + Ok((payment_hash, retry, max_total_fee, true)) + }, + // When manual invoice handling is enabled, the corresponding `PendingOutboundPayment` entry + // is already updated at the time the invoice is received. This ensures that `InvoiceReceived` + // event generation remains idempotent, even if the same invoice is received again before the + // event is handled by the user. + PendingOutboundPayment::InvoiceReceived { + retry_strategy, max_total_routing_fee_msat, .. + } => { + Ok((invoice.payment_hash(), *retry_strategy, *max_total_routing_fee_msat, false)) + }, + _ => Err(Bolt12PaymentError::DuplicateInvoice), + }, + hash_map::Entry::Vacant(_) => Err(Bolt12PaymentError::UnexpectedInvoice), + } + } + fn pay_route_internal( &self, route: &Route, payment_hash: PaymentHash, recipient_onion: &RecipientOnionFields, keysend_preimage: Option, invoice_request: Option<&InvoiceRequest>, From a248367e56385a2761608bf842144088eeef3724 Mon Sep 17 00:00:00 2001 From: shaavan Date: Mon, 10 Mar 2025 23:18:45 +0530 Subject: [PATCH 076/105] Introduce idempotency check in tests --- lightning/src/ln/offers_tests.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/offers_tests.rs b/lightning/src/ln/offers_tests.rs index 35a4c61713c..48171d4faeb 100644 --- a/lightning/src/ln/offers_tests.rs +++ b/lightning/src/ln/offers_tests.rs @@ -1185,7 +1185,14 @@ fn pays_bolt12_invoice_asynchronously() { let onion_message = alice.onion_messenger.next_onion_message_for_peer(bob_id).unwrap(); bob.onion_messenger.handle_onion_message(alice_id, &onion_message); - let (invoice, context) = match get_event!(bob, Event::InvoiceReceived) { + // Re-process the same onion message to ensure idempotency — + // we should not generate a duplicate `InvoiceReceived` event. + bob.onion_messenger.handle_onion_message(alice_id, &onion_message); + + let mut events = bob.node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + + let (invoice, context) = match events.pop().unwrap() { Event::InvoiceReceived { payment_id: actual_payment_id, invoice, context, .. } => { assert_eq!(actual_payment_id, payment_id); (invoice, context) From 4deb527265eccd5e4a08d39b4989ba303a91429b Mon Sep 17 00:00:00 2001 From: Arik Sosman Date: Tue, 11 Mar 2025 00:00:21 -0700 Subject: [PATCH 077/105] Pin once_cell@1.20.3 for older Rust versions --- ci/ci-tests.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh index f4987569fda..1d11a5fd624 100755 --- a/ci/ci-tests.sh +++ b/ci/ci-tests.sh @@ -21,6 +21,9 @@ PIN_RELEASE_DEPS # pin the release dependencies in our main workspace # The addr2line v0.21 crate (a dependency of `backtrace` starting with 0.3.69) relies on rustc 1.65 [ "$RUSTC_MINOR_VERSION" -lt 65 ] && cargo update -p backtrace --precise "0.3.68" --verbose +# The once_cell v1.21.0 crate (a dependency of `proptest`) relies on rustc 1.70 +[ "$RUSTC_MINOR_VERSION" -lt 70 ] && cargo update -p once_cell --precise "1.20.3" --verbose + # proptest 1.3.0 requires rustc 1.64.0 [ "$RUSTC_MINOR_VERSION" -lt 64 ] && cargo update -p proptest --precise "1.2.0" --verbose From 8e962d7c11f0e8f859ebe7daa567c4a71ac5d6e1 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 30 Apr 2025 00:33:38 +0000 Subject: [PATCH 078/105] Add release notes for LDK 0.1.3 --- CHANGELOG.md | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index db55e7904bd..80652dcb743 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,27 @@ +# 0.1.3 - Apr 30, 2025 - "Routing Unicode in 2025" + +## Bug Fixes + * `Event::InvoiceReceived` is now only generated once for each `Bolt12Invoice` + received matching a pending outbound payment. Previously it would be provided + each time we received an invoice, which may happen many times if the sender + sends redundant messages to improve success rates (#3658). + * LDK's router now more fully saturates paths which are subject to HTLC + maximum restrictions after the first hop. In some rare cases this can result + in finding paths when it would previously spuriously decide it cannot find + enough diverse paths (#3707, #3755). + +## Security +0.1.3 fixes a denial-of-service vulnerability which cause a crash of an +LDK-based node if an attacker has access to a valid `Bolt12Offer` which the +LDK-based node created. + * A malicious payer which requests a BOLT 12 Invoice from an LDK-based node + (via the `Bolt12InvoiceRequest` message) can cause the panic of the + LDK-based node due to the way `String::truncate` handles UTF-8 codepoints. + The codepath can only be reached once the received `Botlt12InvoiceRequest` + has been authenticated to be based on a valid `Bolt12Offer` which the same + LDK-based node issued (#3747, #3750). + + # 0.1.2 - Apr 02, 2025 - "Foolishly Edgy Cases" ## API Updates @@ -35,6 +59,7 @@ vulnerable to pinning attacks if they are not yet claimable by our counterparty, potentially reducing our exposure to pinning attacks (#3564). + # 0.1.1 - Jan 28, 2025 - "Onchain Matters" ## API Updates @@ -71,6 +96,7 @@ cause force-closure of unrelated channels. when they broadcast the stale commitment (#3556). Thanks to Matt Morehouse for reporting this issue. + # 0.1 - Jan 15, 2025 - "Human Readable Version Numbers" The LDK 0.1 release represents an important milestone for the LDK project. While From 0edff530e492b6db9e311d164e67f049f3e438e5 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 30 Apr 2025 00:34:04 +0000 Subject: [PATCH 079/105] Bump the `lightning` crate to 0.1.3 --- lightning/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightning/Cargo.toml b/lightning/Cargo.toml index e62c4251b01..aaf6f60023c 100644 --- a/lightning/Cargo.toml +++ b/lightning/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning" -version = "0.1.2" +version = "0.1.3" authors = ["Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning/" From 50e1d15c23ebc174dd999ae51238171ec36f6e8c Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 12 May 2025 00:37:05 +0000 Subject: [PATCH 080/105] Do not fail to load `ChannelManager` when we see claiming payments When we begin claiming a payment, we move the tracking of it from `claimable_payments` to `claiming_payments`. This ensures we only ever have one payment which is in the process of being claimed with a given payment hash at a time and lets us keep track of when all parts have been claimed with their `ChannelMonitor`s. However, on startup, we check that failing to move a payment from `claimable_payments` to `claiming_payments` implies that it is not present in `claiming_payments`. This is fine if the payment doesn't exist, but if the payment has already started being claimed, this will fail and we'll refuse to deserialize the `ChannelManager` (with a `debug_assert` failure in debug mode). Here we resolve this by checking if a payment is already being claimed before we attempt to initiate claiming and skip the failing check in that case. --- lightning/src/ln/channelmanager.rs | 16 ++++++++++++++++ lightning/src/ln/reload_tests.rs | 23 ++++++++++++++++------- 2 files changed, 32 insertions(+), 7 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 33ad59a4139..12362878524 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -14237,6 +14237,22 @@ where if payment_claim.mpp_parts.is_empty() { return Err(DecodeError::InvalidValue); } + { + let payments = channel_manager.claimable_payments.lock().unwrap(); + if !payments.claimable_payments.contains_key(&payment_hash) { + if let Some(payment) = payments.pending_claiming_payments.get(&payment_hash) { + if payment.payment_id == payment_claim.claiming_payment.payment_id { + // If this payment already exists and was marked as + // being-claimed then the serialized state must contain all + // of the pending `ChannelMonitorUpdate`s required to get + // the preimage on disk in all MPP parts. Thus we can skip + // the replay below. + continue; + } + } + } + } + let mut channels_without_preimage = payment_claim.mpp_parts.iter() .map(|htlc_info| (htlc_info.counterparty_node_id, htlc_info.funding_txo, htlc_info.channel_id)) .collect::>(); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index eaeb3e7bac4..16904d85758 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -781,7 +781,7 @@ fn test_forwardable_regen() { claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2); } -fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { +fn do_test_partial_claim_before_restart(persist_both_monitors: bool, double_restart: bool) { // Test what happens if a node receives an MPP payment, claims it, but crashes before // persisting the ChannelManager. If `persist_both_monitors` is false, also crash after only // updating one of the two channels' ChannelMonitors. As a result, on startup, we'll (a) still @@ -797,11 +797,11 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { // definitely claimed. let chanmon_cfgs = create_chanmon_cfgs(4); let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); - let persister; - let new_chain_monitor; + let (persist_d_1, persist_d_2); + let (chain_d_1, chain_d_2); let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); - let nodes_3_deserialized; + let (node_d_1, node_d_2); let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); @@ -876,7 +876,14 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { } // Now restart nodes[3]. - reload_node!(nodes[3], original_manager, &[&updated_monitor.0, &original_monitor.0], persister, new_chain_monitor, nodes_3_deserialized); + reload_node!(nodes[3], original_manager.clone(), &[&updated_monitor.0, &original_monitor.0], persist_d_1, chain_d_1, node_d_1); + + if double_restart { + // Previously, we had a bug where we'd fail to reload if we re-persist the `ChannelManager` + // without updating any `ChannelMonitor`s as we'd fail to double-initiate the claim replay. + // We test that here ensuring that we can reload again. + reload_node!(nodes[3], node_d_1.encode(), &[&updated_monitor.0, &original_monitor.0], persist_d_2, chain_d_2, node_d_2); + } // Until the startup background events are processed (in `get_and_clear_pending_events`, // below), the preimage is not copied to the non-persisted monitor... @@ -971,8 +978,10 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { #[test] fn test_partial_claim_before_restart() { - do_test_partial_claim_before_restart(false); - do_test_partial_claim_before_restart(true); + do_test_partial_claim_before_restart(false, false); + do_test_partial_claim_before_restart(false, true); + do_test_partial_claim_before_restart(true, false); + do_test_partial_claim_before_restart(true, true); } fn do_forwarded_payment_no_manager_persistence(use_cs_commitment: bool, claim_htlc: bool, use_intercept: bool) { From 63f5d7733ef186bb89f96500d8d6a7e0948f0b91 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 2 Feb 2025 16:28:51 +0000 Subject: [PATCH 081/105] Optionally disable all state-based policy checks in test signer The signer we use in tests tracks the state of the channel and refuses to sign when the channel attempts an invalid state transition. In the next commit, however, we'll add an upgrade test which will fail these checks as the the state won't get copied from previous versions of LDK to this version. Thus, here, we add the ability to disable all state-based checks in the signer. --- fuzz/src/chanmon_consistency.rs | 4 +- fuzz/src/full_stack.rs | 3 +- lightning/src/util/test_channel_signer.rs | 88 ++++++++++++++--------- lightning/src/util/test_utils.rs | 14 ++-- 4 files changed, 68 insertions(+), 41 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 73e4f88f1f3..1f4e7c24152 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -382,7 +382,7 @@ impl SignerProvider for KeyProvider { channel_keys_id, ); let revoked_commitment = self.make_enforcement_state_cell(keys.commitment_seed); - TestChannelSigner::new_with_revoked(keys, revoked_commitment, false) + TestChannelSigner::new_with_revoked(keys, revoked_commitment, false, false) } fn read_chan_signer(&self, buffer: &[u8]) -> Result { @@ -391,7 +391,7 @@ impl SignerProvider for KeyProvider { let inner: InMemorySigner = ReadableArgs::read(&mut reader, self)?; let state = self.make_enforcement_state_cell(inner.commitment_seed); - Ok(TestChannelSigner::new_with_revoked(inner, state, false)) + Ok(TestChannelSigner::new_with_revoked(inner, state, false, false)) } fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result { diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index c1f2dd11b1e..143f69f160e 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -522,6 +522,7 @@ impl SignerProvider for KeyProvider { }, state, false, + false, ) } @@ -529,7 +530,7 @@ impl SignerProvider for KeyProvider { let inner: InMemorySigner = ReadableArgs::read(&mut data, self)?; let state = Arc::new(Mutex::new(EnforcementState::new())); - Ok(TestChannelSigner::new_with_revoked(inner, state, false)) + Ok(TestChannelSigner::new_with_revoked(inner, state, false, false)) } fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result { diff --git a/lightning/src/util/test_channel_signer.rs b/lightning/src/util/test_channel_signer.rs index f3ef4dc1557..2e1289b2eb0 100644 --- a/lightning/src/util/test_channel_signer.rs +++ b/lightning/src/util/test_channel_signer.rs @@ -71,6 +71,7 @@ pub struct TestChannelSigner { /// Channel state used for policy enforcement pub state: Arc>, pub disable_revocation_policy_check: bool, + pub disable_all_state_policy_checks: bool, } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] @@ -124,6 +125,7 @@ impl TestChannelSigner { inner, state, disable_revocation_policy_check: false, + disable_all_state_policy_checks: false, } } @@ -132,12 +134,11 @@ impl TestChannelSigner { /// Since there are multiple copies of this struct for each channel, some coordination is needed /// so that all copies are aware of enforcement state. A pointer to this state is provided /// here, usually by an implementation of KeysInterface. - pub fn new_with_revoked(inner: InMemorySigner, state: Arc>, disable_revocation_policy_check: bool) -> Self { - Self { - inner, - state, - disable_revocation_policy_check, - } + pub fn new_with_revoked( + inner: InMemorySigner, state: Arc>, + disable_revocation_policy_check: bool, disable_all_state_policy_checks: bool, + ) -> Self { + Self { inner, state, disable_revocation_policy_check, disable_all_state_policy_checks } } pub fn channel_type_features(&self) -> &ChannelTypeFeatures { self.inner.channel_type_features().unwrap() } @@ -177,19 +178,26 @@ impl ChannelSigner for TestChannelSigner { if !self.is_signer_available(SignerOp::ReleaseCommitmentSecret) { return Err(()); } - { - let mut state = self.state.lock().unwrap(); + let mut state = self.state.lock().unwrap(); + if !self.disable_all_state_policy_checks { assert!(idx == state.last_holder_revoked_commitment || idx == state.last_holder_revoked_commitment - 1, "can only revoke the current or next unrevoked commitment - trying {}, last revoked {}", idx, state.last_holder_revoked_commitment); assert!(idx > state.last_holder_commitment, "cannot revoke the last holder commitment - attempted to revoke {} last commitment {}", idx, state.last_holder_commitment); - state.last_holder_revoked_commitment = idx; } + state.last_holder_revoked_commitment = idx; self.inner.release_commitment_secret(idx) } fn validate_holder_commitment(&self, holder_tx: &HolderCommitmentTransaction, _outbound_htlc_preimages: Vec) -> Result<(), ()> { let mut state = self.state.lock().unwrap(); let idx = holder_tx.commitment_number(); - assert!(idx == state.last_holder_commitment || idx == state.last_holder_commitment - 1, "expecting to validate the current or next holder commitment - trying {}, current {}", idx, state.last_holder_commitment); + if !self.disable_all_state_policy_checks { + assert!( + idx == state.last_holder_commitment || idx == state.last_holder_commitment - 1, + "expecting to validate the current or next holder commitment - trying {}, current {}", + idx, + state.last_holder_commitment + ); + } state.last_holder_commitment = idx; Ok(()) } @@ -200,7 +208,9 @@ impl ChannelSigner for TestChannelSigner { return Err(()); } let mut state = self.state.lock().unwrap(); - assert!(idx == state.last_counterparty_revoked_commitment || idx == state.last_counterparty_revoked_commitment - 1, "expecting to validate the current or next counterparty revocation - trying {}, current {}", idx, state.last_counterparty_revoked_commitment); + if !self.disable_all_state_policy_checks { + assert!(idx == state.last_counterparty_revoked_commitment || idx == state.last_counterparty_revoked_commitment - 1, "expecting to validate the current or next counterparty revocation - trying {}, current {}", idx, state.last_counterparty_revoked_commitment); + } state.last_counterparty_revoked_commitment = idx; Ok(()) } @@ -218,22 +228,28 @@ impl EcdsaChannelSigner for TestChannelSigner { fn sign_counterparty_commitment(&self, commitment_tx: &CommitmentTransaction, inbound_htlc_preimages: Vec, outbound_htlc_preimages: Vec, secp_ctx: &Secp256k1) -> Result<(Signature, Vec), ()> { self.verify_counterparty_commitment_tx(commitment_tx, secp_ctx); - { - #[cfg(test)] - if !self.is_signer_available(SignerOp::SignCounterpartyCommitment) { - return Err(()); - } - let mut state = self.state.lock().unwrap(); - let actual_commitment_number = commitment_tx.commitment_number(); - let last_commitment_number = state.last_counterparty_commitment; + #[cfg(test)] + if !self.is_signer_available(SignerOp::SignCounterpartyCommitment) { + return Err(()); + } + let mut state = self.state.lock().unwrap(); + let actual_commitment_number = commitment_tx.commitment_number(); + let last_commitment_number = state.last_counterparty_commitment; + if !self.disable_all_state_policy_checks { // These commitment numbers are backwards counting. We expect either the same as the previously encountered, // or the next one. assert!(last_commitment_number == actual_commitment_number || last_commitment_number - 1 == actual_commitment_number, "{} doesn't come after {}", actual_commitment_number, last_commitment_number); // Ensure that the counterparty doesn't get more than two broadcastable commitments - // the last and the one we are trying to sign - assert!(actual_commitment_number >= state.last_counterparty_revoked_commitment - 2, "cannot sign a commitment if second to last wasn't revoked - signing {} revoked {}", actual_commitment_number, state.last_counterparty_revoked_commitment); - state.last_counterparty_commitment = cmp::min(last_commitment_number, actual_commitment_number) + assert!( + actual_commitment_number >= state.last_counterparty_revoked_commitment - 2, + "cannot sign a commitment if second to last wasn't revoked - signing {} revoked {}", + actual_commitment_number, + state.last_counterparty_revoked_commitment + ); } + state.last_counterparty_commitment = + cmp::min(last_commitment_number, actual_commitment_number); Ok(self.inner.sign_counterparty_commitment(commitment_tx, inbound_htlc_preimages, outbound_htlc_preimages, secp_ctx).unwrap()) } @@ -244,12 +260,14 @@ impl EcdsaChannelSigner for TestChannelSigner { return Err(()); } let trusted_tx = self.verify_holder_commitment_tx(commitment_tx, secp_ctx); - let state = self.state.lock().unwrap(); - let commitment_number = trusted_tx.commitment_number(); - if state.last_holder_revoked_commitment - 1 != commitment_number && state.last_holder_revoked_commitment - 2 != commitment_number { - if !self.disable_revocation_policy_check { - panic!("can only sign the next two unrevoked commitment numbers, revoked={} vs requested={} for {}", - state.last_holder_revoked_commitment, commitment_number, self.inner.commitment_seed[0]) + if !self.disable_all_state_policy_checks { + let state = self.state.lock().unwrap(); + let commitment_number = trusted_tx.commitment_number(); + if state.last_holder_revoked_commitment - 1 != commitment_number && state.last_holder_revoked_commitment - 2 != commitment_number { + if !self.disable_revocation_policy_check { + panic!("can only sign the next two unrevoked commitment numbers, revoked={} vs requested={} for {}", + state.last_holder_revoked_commitment, commitment_number, self.inner.commitment_seed[0]) + } } } Ok(self.inner.sign_holder_commitment(commitment_tx, secp_ctx).unwrap()) @@ -284,13 +302,15 @@ impl EcdsaChannelSigner for TestChannelSigner { if !self.is_signer_available(SignerOp::SignHolderHtlcTransaction) { return Err(()); } - let state = self.state.lock().unwrap(); - if state.last_holder_revoked_commitment - 1 != htlc_descriptor.per_commitment_number && - state.last_holder_revoked_commitment - 2 != htlc_descriptor.per_commitment_number - { - if !self.disable_revocation_policy_check { - panic!("can only sign the next two unrevoked commitment numbers, revoked={} vs requested={} for {}", - state.last_holder_revoked_commitment, htlc_descriptor.per_commitment_number, self.inner.commitment_seed[0]) + if !self.disable_all_state_policy_checks { + let state = self.state.lock().unwrap(); + if state.last_holder_revoked_commitment - 1 != htlc_descriptor.per_commitment_number && + state.last_holder_revoked_commitment - 2 != htlc_descriptor.per_commitment_number + { + if !self.disable_revocation_policy_check { + panic!("can only sign the next two unrevoked commitment numbers, revoked={} vs requested={} for {}", + state.last_holder_revoked_commitment, htlc_descriptor.per_commitment_number, self.inner.commitment_seed[0]) + } } } assert_eq!(htlc_tx.input[input], htlc_descriptor.unsigned_tx_input()); diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 5bd5acaf176..317b46a02ef 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -327,7 +327,8 @@ impl SignerProvider for OnlyReadsKeysInterface { Ok(TestChannelSigner::new_with_revoked( inner, state, - false + false, + false, )) } @@ -1263,7 +1264,8 @@ pub struct TestKeysInterface { pub backing: sign::PhantomKeysManager, pub override_random_bytes: Mutex>, pub disable_revocation_policy_check: bool, - enforcement_states: Mutex>>>, + pub disable_all_state_policy_checks: bool, + enforcement_states: Mutex>>>, expectations: Mutex>>, pub unavailable_signers_ops: Mutex>>, pub next_signer_disabled_ops: Mutex>, @@ -1319,7 +1321,9 @@ impl SignerProvider for TestKeysInterface { fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> TestChannelSigner { let keys = self.backing.derive_channel_signer(channel_value_satoshis, channel_keys_id); let state = self.make_enforcement_state_cell(keys.commitment_seed); - let signer = TestChannelSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check); + let rev_checks = self.disable_revocation_policy_check; + let state_checks = self.disable_all_state_policy_checks; + let signer = TestChannelSigner::new_with_revoked(keys, state, rev_checks, state_checks); #[cfg(test)] if let Some(ops) = self.unavailable_signers_ops.lock().unwrap().get(&channel_keys_id) { for &op in ops { @@ -1342,7 +1346,8 @@ impl SignerProvider for TestKeysInterface { Ok(TestChannelSigner::new_with_revoked( inner, state, - self.disable_revocation_policy_check + self.disable_revocation_policy_check, + self.disable_all_state_policy_checks, )) } @@ -1366,6 +1371,7 @@ impl TestKeysInterface { backing: sign::PhantomKeysManager::new(seed, now.as_secs(), now.subsec_nanos(), seed), override_random_bytes: Mutex::new(None), disable_revocation_policy_check: false, + disable_all_state_policy_checks: false, enforcement_states: Mutex::new(new_hash_map()), expectations: Mutex::new(None), unavailable_signers_ops: Mutex::new(new_hash_map()), From 87bb72d94b8689240a6c2690bbe56747950418dc Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 2 Feb 2025 16:27:25 +0000 Subject: [PATCH 082/105] Add a simple test of upgrading from LDK 0.1 One major hole in our test coverage historically has been tests covering upgrades or downgrades across LDK versions. Luckily, these aren't particularly hard to write as cargo lets us depend on previous versions of the `lightning` crate directly, which we can use in tests. Here we add a simple initial test of upgrading from LDK 0.1 while there's a pending payment to be claimed. --- Cargo.toml | 1 + ci/ci-tests.sh | 6 ++ lightning-tests/Cargo.toml | 21 ++++++ lightning-tests/src/lib.rs | 5 ++ .../src/upgrade_downgrade_tests.rs | 65 +++++++++++++++++++ lightning/src/ln/functional_test_utils.rs | 2 +- lightning/src/onion_message/messenger.rs | 4 +- 7 files changed, 101 insertions(+), 3 deletions(-) create mode 100644 lightning-tests/Cargo.toml create mode 100644 lightning-tests/src/lib.rs create mode 100644 lightning-tests/src/upgrade_downgrade_tests.rs diff --git a/Cargo.toml b/Cargo.toml index dc3eb92c7e2..2f73851d114 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,6 +21,7 @@ members = [ exclude = [ "lightning-transaction-sync", + "lightning-tests", "no-std-check", "msrv-no-dev-deps-check", "bench", diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh index 1d11a5fd624..3be6afde89a 100755 --- a/ci/ci-tests.sh +++ b/ci/ci-tests.sh @@ -57,6 +57,12 @@ for DIR in "${WORKSPACE_MEMBERS[@]}"; do cargo doc -p "$DIR" --document-private-items done +echo -e "\n\nTesting upgrade from prior versions of LDK" +pushd lightning-tests +[ "$RUSTC_MINOR_VERSION" -lt 65 ] && cargo update -p regex --precise "1.9.6" --verbose +cargo test +popd + echo -e "\n\nChecking and testing Block Sync Clients with features" cargo test -p lightning-block-sync --verbose --color always --features rest-client diff --git a/lightning-tests/Cargo.toml b/lightning-tests/Cargo.toml new file mode 100644 index 00000000000..75c68e03f5a --- /dev/null +++ b/lightning-tests/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "lightning-tests" +version = "0.0.1" +authors = ["Matt Corallo"] +license = "MIT OR Apache-2.0" +repository = "https://github.com/lightningdevkit/rust-lightning/" +description = "Tests for LDK crates" +edition = "2021" + +[features] + +[dependencies] +lightning-types = { path = "../lightning-types", features = ["_test_utils"] } +lightning-invoice = { path = "../lightning-invoice", default-features = false } +lightning-macros = { path = "../lightning-macros" } +lightning = { path = "../lightning", features = ["_test_utils"] } +lightning_0_1 = { package = "lightning", version = "0.1.1", features = ["_test_utils"] } + +bitcoin = { version = "0.32.2", default-features = false } + +[dev-dependencies] diff --git a/lightning-tests/src/lib.rs b/lightning-tests/src/lib.rs new file mode 100644 index 00000000000..c028193d692 --- /dev/null +++ b/lightning-tests/src/lib.rs @@ -0,0 +1,5 @@ +#[cfg_attr(test, macro_use)] +extern crate lightning; + +#[cfg(all(test, not(taproot)))] +pub mod upgrade_downgrade_tests; diff --git a/lightning-tests/src/upgrade_downgrade_tests.rs b/lightning-tests/src/upgrade_downgrade_tests.rs new file mode 100644 index 00000000000..0a989553752 --- /dev/null +++ b/lightning-tests/src/upgrade_downgrade_tests.rs @@ -0,0 +1,65 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +//! Tests which test upgrading from previous versions of LDK or downgrading to previous versions of +//! LDK. + +use lightning_0_1::get_monitor as get_monitor_0_1; +use lightning_0_1::ln::functional_test_utils as lightning_0_1_utils; +use lightning_0_1::util::ser::Writeable; + +use lightning::ln::functional_test_utils::*; + +use lightning_types::payment::PaymentPreimage; + +#[test] +fn simple_upgrade() { + // Tests a simple case of upgrading from LDK 0.1 with a pending payment + let (node_a_ser, node_b_ser, mon_a_ser, mon_b_ser, preimage); + { + let chanmon_cfgs = lightning_0_1_utils::create_chanmon_cfgs(2); + let node_cfgs = lightning_0_1_utils::create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = lightning_0_1_utils::create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = lightning_0_1_utils::create_network(2, &node_cfgs, &node_chanmgrs); + + let chan_id = lightning_0_1_utils::create_announced_chan_between_nodes(&nodes, 0, 1).2; + + let payment_preimage = + lightning_0_1_utils::route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + preimage = PaymentPreimage(payment_preimage.0 .0); + + node_a_ser = nodes[0].node.encode(); + node_b_ser = nodes[1].node.encode(); + mon_a_ser = get_monitor_0_1!(nodes[0], chan_id).encode(); + mon_b_ser = get_monitor_0_1!(nodes[1], chan_id).encode(); + } + + // Create a dummy node to reload over with the 0.1 state + + let mut chanmon_cfgs = create_chanmon_cfgs(2); + + // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let (persister_a, persister_b, chain_mon_a, chain_mon_b); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let (node_a, node_b); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let config = test_default_channel_config(); + let a_mons = &[&mon_a_ser[..]]; + reload_node!(nodes[0], config.clone(), &node_a_ser, a_mons, persister_a, chain_mon_a, node_a); + reload_node!(nodes[1], config, &node_b_ser, &[&mon_b_ser], persister_b, chain_mon_b, node_b); + + reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); + + claim_payment(&nodes[0], &[&nodes[1]], preimage); +} diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index be77547b79c..04295073861 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1196,7 +1196,7 @@ pub fn _reload_node<'a, 'b, 'c>(node: &'a Node<'a, 'b, 'c>, default_config: User node_deserialized } -#[cfg(test)] +#[macro_export] macro_rules! reload_node { ($node: expr, $new_config: expr, $chanman_encoded: expr, $monitors_encoded: expr, $persister: ident, $new_chain_monitor: ident, $new_channelmanager: ident) => { let chanman_encoded = $chanman_encoded; diff --git a/lightning/src/onion_message/messenger.rs b/lightning/src/onion_message/messenger.rs index f076e6a9da4..c326cfca804 100644 --- a/lightning/src/onion_message/messenger.rs +++ b/lightning/src/onion_message/messenger.rs @@ -1172,8 +1172,8 @@ where } } - #[cfg(test)] - pub(crate) fn set_offers_handler(&mut self, offers_handler: OMH) { + #[cfg(any(test, feature = "_test_utils"))] + pub fn set_offers_handler(&mut self, offers_handler: OMH) { self.offers_handler = offers_handler; } From bc9ffe44332c1636c11ea3c0f65aeafbfd40ccb7 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 21 May 2025 15:42:16 +0000 Subject: [PATCH 083/105] Read `ChannelManager` even if we have no-peer post-update actions In 93b4479e472e6767af5df90fecdcdfb79074e260 we fixed an issue which could cause a `ChannelMonitorUpdate` to get marked as blocked on itself, leading to an eventual force-closure. One potential side-effect of that issue, however, is that any further `ChannelMonitorUpdate`s to the same channel while it is blocked will not have any post-update actions processed (as there is a pending blocked `ChannelMonitorUpdate` sitting in the channel). This can leave a dangling `MonitorUpdateCompletionAction` sitting around even after the channel is closed. In 0.1, because `ChannelMonitorUpdate`s to closed channels were finally fully tracked, we started enforcing that any post-update completion action we had on startup corresponded to a peer entry, while at the same time no longer creating peer entries just because we had serialized one in the data we were loading (only creating them if we had channel(s) or a `ChannelMonitor`). This can cause some `ChannelManager` to no longer deserialize on 0.1 as we might have a left-over dangling `MonitorUpdateCompletionAction` and will no longer always have a peer entry just because of it. Here we fix this issue by specifically checking for dangling `MonitorUpdateCompletionAction::PaymentClaim` entries and dropping them if there is no corresponding channel or peer state entry. We only check for `PaymentClaimed` actions rather than allowing for any dangling actions as 93b4479e472e6767af5df90fecdcdfb79074e260 was only triggerable with MPP claims, so dangling `MonitorUpdateCompletionAction`s for forwarded payments should be exceedingly rare. This also adds an upgrade test to test a slightly convoluted version of this scenario. Trivial conflicts addressed in: * lightning/src/ln/channelmanager.rs --- lightning-tests/Cargo.toml | 1 + .../src/upgrade_downgrade_tests.rs | 152 +++++++++++++++++- lightning/src/ln/channelmanager.rs | 34 +++- 3 files changed, 183 insertions(+), 4 deletions(-) diff --git a/lightning-tests/Cargo.toml b/lightning-tests/Cargo.toml index 75c68e03f5a..23c81fae4a3 100644 --- a/lightning-tests/Cargo.toml +++ b/lightning-tests/Cargo.toml @@ -15,6 +15,7 @@ lightning-invoice = { path = "../lightning-invoice", default-features = false } lightning-macros = { path = "../lightning-macros" } lightning = { path = "../lightning", features = ["_test_utils"] } lightning_0_1 = { package = "lightning", version = "0.1.1", features = ["_test_utils"] } +lightning_0_0_125 = { package = "lightning", version = "0.0.125", features = ["_test_utils"] } bitcoin = { version = "0.32.2", default-features = false } diff --git a/lightning-tests/src/upgrade_downgrade_tests.rs b/lightning-tests/src/upgrade_downgrade_tests.rs index 0a989553752..2b57cd23a9a 100644 --- a/lightning-tests/src/upgrade_downgrade_tests.rs +++ b/lightning-tests/src/upgrade_downgrade_tests.rs @@ -12,7 +12,21 @@ use lightning_0_1::get_monitor as get_monitor_0_1; use lightning_0_1::ln::functional_test_utils as lightning_0_1_utils; -use lightning_0_1::util::ser::Writeable; +use lightning_0_1::util::ser::Writeable as _; + +use lightning_0_0_125::chain::ChannelMonitorUpdateStatus as ChannelMonitorUpdateStatus_0_0_125; +use lightning_0_0_125::check_added_monitors as check_added_monitors_0_0_125; +use lightning_0_0_125::events::ClosureReason as ClosureReason_0_0_125; +use lightning_0_0_125::expect_payment_claimed as expect_payment_claimed_0_0_125; +use lightning_0_0_125::get_htlc_update_msgs as get_htlc_update_msgs_0_0_125; +use lightning_0_0_125::get_monitor as get_monitor_0_0_125; +use lightning_0_0_125::get_revoke_commit_msgs as get_revoke_commit_msgs_0_0_125; +use lightning_0_0_125::ln::channelmanager::PaymentId as PaymentId_0_0_125; +use lightning_0_0_125::ln::channelmanager::RecipientOnionFields as RecipientOnionFields_0_0_125; +use lightning_0_0_125::ln::functional_test_utils as lightning_0_0_125_utils; +use lightning_0_0_125::ln::msgs::ChannelMessageHandler as _; +use lightning_0_0_125::routing::router as router_0_0_125; +use lightning_0_0_125::util::ser::Writeable as _; use lightning::ln::functional_test_utils::*; @@ -63,3 +77,139 @@ fn simple_upgrade() { claim_payment(&nodes[0], &[&nodes[1]], preimage); } + +#[test] +fn test_125_dangling_post_update_actions() { + // Tests a failure of upgrading from 0.0.125 to 0.1 when there's a dangling + // `MonitorUpdateCompletionAction` due to the bug fixed in + // 93b4479e472e6767af5df90fecdcdfb79074e260. + let (node_d_ser, mon_ser); + { + // First, we get RAA-source monitor updates held by using async persistence (note that this + // issue was first identified as a consequence of the bug fixed in + // 93b4479e472e6767af5df90fecdcdfb79074e260 but in order to replicate that bug we need a + // complicated multi-threaded race that is not deterministic, thus we "cheat" here by using + // async persistence). We do this by simply claiming an MPP payment and not completing the + // second channel's `ChannelMonitorUpdate`, blocking RAA `ChannelMonitorUpdate`s from the + // first (which is ultimately a very similar bug to the one fixed in 93b4479e472e6767af5df). + // + // Then, we claim a second payment on the channel, which ultimately doesn't have its + // `ChannelMonitorUpdate` completion handled due to the presence of the blocked + // `ChannelMonitorUpdate`. The claim also generates a post-update completion action, but + // the `ChannelMonitorUpdate` isn't queued due to the RAA-update block. + let chanmon_cfgs = lightning_0_0_125_utils::create_chanmon_cfgs(4); + let node_cfgs = lightning_0_0_125_utils::create_node_cfgs(4, &chanmon_cfgs); + let node_chanmgrs = + lightning_0_0_125_utils::create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let nodes = lightning_0_0_125_utils::create_network(4, &node_cfgs, &node_chanmgrs); + + let node_b_id = nodes[1].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + + lightning_0_0_125_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 1, 100_000, 0, + ); + lightning_0_0_125_utils::create_announced_chan_between_nodes_with_value( + &nodes, 0, 2, 100_000, 0, + ); + let chan_id_1_3 = lightning_0_0_125_utils::create_announced_chan_between_nodes_with_value( + &nodes, 1, 3, 100_000, 0, + ) + .2; + let chan_id_2_3 = lightning_0_0_125_utils::create_announced_chan_between_nodes_with_value( + &nodes, 2, 3, 100_000, 0, + ) + .2; + + let (preimage, hash, secret) = + lightning_0_0_125_utils::get_payment_preimage_hash(&nodes[3], Some(15_000_000), None); + + let pay_params = router_0_0_125::PaymentParameters::from_node_id( + node_d_id, + lightning_0_0_125_utils::TEST_FINAL_CLTV, + ) + .with_bolt11_features(nodes[3].node.bolt11_invoice_features()) + .unwrap(); + + let route_params = + router_0_0_125::RouteParameters::from_payment_params_and_value(pay_params, 15_000_000); + let route = lightning_0_0_125_utils::get_route(&nodes[0], &route_params).unwrap(); + + let onion = RecipientOnionFields_0_0_125::secret_only(secret); + let id = PaymentId_0_0_125(hash.0); + nodes[0].node.send_payment_with_route(route, hash, onion, id).unwrap(); + + check_added_monitors_0_0_125!(nodes[0], 2); + let paths = &[&[&nodes[1], &nodes[3]][..], &[&nodes[2], &nodes[3]]]; + lightning_0_0_125_utils::pass_along_route(&nodes[0], paths, 15_000_000, hash, secret); + + let preimage_2 = lightning_0_0_125_utils::route_payment(&nodes[1], &[&nodes[3]], 100_000).0; + + chanmon_cfgs[3].persister.set_update_ret(ChannelMonitorUpdateStatus_0_0_125::InProgress); + chanmon_cfgs[3].persister.set_update_ret(ChannelMonitorUpdateStatus_0_0_125::InProgress); + nodes[3].node.claim_funds(preimage); + check_added_monitors_0_0_125!(nodes[3], 2); + + let (outpoint, update_id, _) = { + let latest_monitors = nodes[3].chain_monitor.latest_monitor_update_id.lock().unwrap(); + latest_monitors.get(&chan_id_1_3).unwrap().clone() + }; + nodes[3].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, update_id).unwrap(); + expect_payment_claimed_0_0_125!(nodes[3], hash, 15_000_000); + + let ds_fulfill = get_htlc_update_msgs_0_0_125!(nodes[3], node_b_id); + // Due to an unrelated test bug in 0.0.125, we have to leave the `ChannelMonitorUpdate` for + // the previous node un-completed or we will panic when dropping the `Node`. + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus_0_0_125::InProgress); + nodes[1].node.handle_update_fulfill_htlc(&node_d_id, &ds_fulfill.update_fulfill_htlcs[0]); + check_added_monitors_0_0_125!(nodes[1], 1); + + nodes[1].node.handle_commitment_signed(&node_d_id, &ds_fulfill.commitment_signed); + check_added_monitors_0_0_125!(nodes[1], 1); + + // The `ChannelMonitorUpdate` generated by the RAA from node B to node D will be blocked. + let (bs_raa, _) = get_revoke_commit_msgs_0_0_125!(nodes[1], node_d_id); + nodes[3].node.handle_revoke_and_ack(&node_b_id, &bs_raa); + check_added_monitors_0_0_125!(nodes[3], 0); + + // Now that there is a blocked update in the B <-> D channel, we can claim the second + // payment across it, which, while it will generate a `ChannelMonitorUpdate`, will not + // complete its post-update actions. + nodes[3].node.claim_funds(preimage_2); + check_added_monitors_0_0_125!(nodes[3], 1); + + // Finally, we set up the failure by force-closing the channel in question, ensuring that + // 0.1 will not create a per-peer state for node B. + let err = "Force Closing Channel".to_owned(); + nodes[3].node.force_close_without_broadcasting_txn(&chan_id_1_3, &node_b_id, err).unwrap(); + let reason = + ClosureReason_0_0_125::HolderForceClosed { broadcasted_latest_txn: Some(false) }; + let peers = &[node_b_id]; + lightning_0_0_125_utils::check_closed_event(&nodes[3], 1, reason, false, peers, 100_000); + lightning_0_0_125_utils::check_closed_broadcast(&nodes[3], 1, true); + check_added_monitors_0_0_125!(nodes[3], 1); + + node_d_ser = nodes[3].node.encode(); + mon_ser = get_monitor_0_0_125!(nodes[3], chan_id_2_3).encode(); + } + + // Create a dummy node to reload over with the 0.0.125 state + + let mut chanmon_cfgs = create_chanmon_cfgs(4); + + // Our TestChannelSigner will fail as we're jumping ahead, so disable its state-based checks + chanmon_cfgs[0].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[1].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[2].keys_manager.disable_all_state_policy_checks = true; + chanmon_cfgs[3].keys_manager.disable_all_state_policy_checks = true; + + let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let (persister, chain_mon); + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let node; + let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); + + // Finally, reload the node in the latest LDK. This previously failed. + let config = test_default_channel_config(); + reload_node!(nodes[3], config, &node_d_ser, &[&mon_ser], persister, chain_mon, node); +} diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 12362878524..b973432056a 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -13582,9 +13582,17 @@ where $monitor: expr, $peer_state: expr, $logger: expr, $channel_info_log: expr ) => { { let mut max_in_flight_update_id = 0; + let starting_len = $chan_in_flight_upds.len(); $chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id()); + if $chan_in_flight_upds.len() < starting_len { + log_debug!( + $logger, + "{} ChannelMonitorUpdates completed after ChannelManager was last serialized", + starting_len - $chan_in_flight_upds.len() + ); + } for update in $chan_in_flight_upds.iter() { - log_trace!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}", + log_debug!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}", update.update_id, $channel_info_log, &$monitor.channel_id()); max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id); pending_background_events.push( @@ -14148,11 +14156,31 @@ where debug_assert!(false, "Non-event-generating channel freeing should not appear in our queue"); } } + // Note that we may have a post-update action for a channel that has no pending + // `ChannelMonitorUpdate`s, but unlike the no-peer-state case, it may simply be + // because we had a `ChannelMonitorUpdate` complete after the last time this + // `ChannelManager` was serialized. In that case, we'll run the post-update + // actions as soon as we get going. } peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions; } else { - log_error!(WithContext::from(&args.logger, Some(node_id), None, None), "Got blocked actions without a per-peer-state for {}", node_id); - return Err(DecodeError::InvalidValue); + for actions in monitor_update_blocked_actions.values() { + for action in actions.iter() { + if matches!(action, MonitorUpdateCompletionAction::PaymentClaimed { .. }) { + // If there are no state for this channel but we have pending + // post-update actions, its possible that one was left over from pre-0.1 + // payment claims where MPP claims led to a channel blocked on itself + // and later `ChannelMonitorUpdate`s didn't get their post-update + // actions run. + // This should only have happened for `PaymentClaimed` post-update actions, + // which we ignore here. + } else { + let logger = WithContext::from(&args.logger, Some(node_id), None, None); + log_error!(logger, "Got blocked actions {:?} without a per-peer-state for {}", monitor_update_blocked_actions, node_id); + return Err(DecodeError::InvalidValue); + } + } + } } } From edcd3761ae4edb0b1e5ce568667c07a3da98c776 Mon Sep 17 00:00:00 2001 From: Leo Nash Date: Fri, 23 May 2025 17:54:55 +0000 Subject: [PATCH 084/105] Do not dip into the funder's reserve to cover the two anchors At all times, the funder's balance should cover the commitment transaction fee, any non-zero-value anchors, and the fundee-selected channel reserve. Prior to this commit, we would allow the funder to dip into its reserve to pay for the two 330sat anchors. LDK sets reserves to at least 1000sat, so two 330 sat anchors would never overdraw this reserve. We now prevent any such dips, and ensure that the funder can pay for the complete sum of the transaction fee, the anchors, and the reserve. Substantial conflicts resulted in the `channel.rs` parts of this patch being rewriten. The `functional_tests.rs` changes also conflicted but were re-applied to the proper file. --- lightning/src/ln/channel.rs | 14 ++++++-- lightning/src/ln/functional_tests.rs | 51 ++++++++++++++++++++++------ 2 files changed, 52 insertions(+), 13 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index c15a4bee643..96492ef97f2 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -5039,7 +5039,12 @@ impl Channel where if update_fee { debug_assert!(!self.context.is_outbound()); let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000; - if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat { + let total_anchor_sats = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { + ANCHOR_OUTPUT_VALUE_SATOSHI * 2 + } else { + 0 + }; + if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + total_anchor_sats * 1000 + counterparty_reserve_we_require_msat { return Err(ChannelError::close("Funding remote cannot afford proposed new fee".to_owned())); } } @@ -5772,7 +5777,12 @@ impl Channel where let commitment_stats = self.context.build_commitment_transaction(self.holder_commitment_point.transaction_number(), &keys, true, true, logger); let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + htlc_stats.on_holder_tx_outbound_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000; let holder_balance_msat = commitment_stats.local_balance_msat - htlc_stats.outbound_holding_cell_msat; - if holder_balance_msat < buffer_fee_msat + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 { + let total_anchor_sats = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { + ANCHOR_OUTPUT_VALUE_SATOSHI * 2 + } else { + 0 + }; + if holder_balance_msat < buffer_fee_msat + total_anchor_sats * 1000 + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 { //TODO: auto-close after a number of failures? log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw); return None; diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index bdb1621771f..fc8bf38519d 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -24,7 +24,7 @@ use crate::ln::types::ChannelId; use crate::types::payment::{PaymentPreimage, PaymentSecret, PaymentHash}; use crate::ln::channel::{CONCURRENT_INBOUND_HTLC_FEE_BUFFER, FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE, MIN_AFFORDABLE_HTLC_COUNT, get_holder_selected_channel_reserve_satoshis, OutboundV1Channel, InboundV1Channel, COINBASE_MATURITY, ChannelPhase}; use crate::ln::channelmanager::{self, PaymentId, RAACommitmentOrder, RecipientOnionFields, BREAKDOWN_TIMEOUT, ENABLE_GOSSIP_TICKS, DISABLE_GOSSIP_TICKS, MIN_CLTV_EXPIRY_DELTA}; -use crate::ln::channel::{DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError}; +use crate::ln::channel::{ANCHOR_OUTPUT_VALUE_SATOSHI, DISCONNECT_PEER_AWAITING_RESPONSE_TICKS, ChannelError}; use crate::ln::{chan_utils, onion_utils}; use crate::ln::chan_utils::{commitment_tx_base_weight, COMMITMENT_TX_WEIGHT_PER_HTLC, OFFERED_HTLC_SCRIPT_WEIGHT, htlc_success_tx_weight, htlc_timeout_tx_weight, HTLCOutputInCommitment}; use crate::routing::gossip::{NetworkGraph, NetworkUpdate}; @@ -673,28 +673,49 @@ fn test_update_fee_vanilla() { check_added_monitors!(nodes[1], 1); } -#[test] -fn test_update_fee_that_funder_cannot_afford() { +pub fn do_test_update_fee_that_funder_cannot_afford(channel_type_features: ChannelTypeFeatures) { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + + let mut default_config = test_default_channel_config(); + if channel_type_features == ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies() { + default_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + // this setting is also needed to create an anchor channel + default_config.manually_accept_inbound_channels = true; + } + + let node_chanmgrs = create_node_chanmgrs( + 2, + &node_cfgs, + &[Some(default_config.clone()), Some(default_config.clone())], + ); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_value = 5000; let push_sats = 700; let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, push_sats * 1000); let channel_id = chan.2; let secp_ctx = Secp256k1::new(); - let default_config = UserConfig::default(); let bs_channel_reserve_sats = get_holder_selected_channel_reserve_satoshis(channel_value, &default_config); - let channel_type_features = ChannelTypeFeatures::only_static_remote_key(); + let (anchor_outputs_value_sats, outputs_num_no_htlcs) = + if channel_type_features.supports_anchors_zero_fee_htlc_tx() { + (ANCHOR_OUTPUT_VALUE_SATOSHI * 2, 4) + } else { + (0, 2) + }; // Calculate the maximum feerate that A can afford. Note that we don't send an update_fee // CONCURRENT_INBOUND_HTLC_FEE_BUFFER HTLCs before actually running out of local balance, so we // calculate two different feerates here - the expected local limit as well as the expected // remote limit. - let feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / (commitment_tx_base_weight(&channel_type_features) + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32; - let non_buffer_feerate = ((channel_value - bs_channel_reserve_sats - push_sats) * 1000 / commitment_tx_base_weight(&channel_type_features)) as u32; + let feerate = + ((channel_value - bs_channel_reserve_sats - push_sats - anchor_outputs_value_sats) * 1000 + / (commitment_tx_base_weight(&channel_type_features) + + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC)) as u32; + let non_buffer_feerate = + ((channel_value - bs_channel_reserve_sats - push_sats - anchor_outputs_value_sats) * 1000 + / commitment_tx_base_weight(&channel_type_features)) as u32; { let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); *feerate_lock = feerate; @@ -711,8 +732,8 @@ fn test_update_fee_that_funder_cannot_afford() { { let commitment_tx = get_local_commitment_txn!(nodes[1], channel_id)[0].clone(); - //We made sure neither party's funds are below the dust limit and there are no HTLCs here - assert_eq!(commitment_tx.output.len(), 2); + // We made sure neither party's funds are below the dust limit and there are no HTLCs here + assert_eq!(commitment_tx.output.len(), outputs_num_no_htlcs); let total_fee: u64 = commit_tx_fee_msat(feerate, 0, &channel_type_features) / 1000; let mut actual_fee = commitment_tx.output.iter().fold(0, |acc, output| acc + output.value.to_sat()); actual_fee = channel_value - actual_fee; @@ -771,7 +792,7 @@ fn test_update_fee_that_funder_cannot_afford() { let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data( INITIAL_COMMITMENT_NUMBER - 1, push_sats, - channel_value - push_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, &channel_type_features) / 1000, + channel_value - push_sats - anchor_outputs_value_sats - commit_tx_fee_msat(non_buffer_feerate + 4, 0, &channel_type_features) / 1000, local_funding, remote_funding, commit_tx_keys.clone(), non_buffer_feerate + 4, @@ -808,6 +829,14 @@ fn test_update_fee_that_funder_cannot_afford() { [nodes[0].node.get_our_node_id()], channel_value); } +#[test] +pub fn test_update_fee_that_funder_cannot_afford() { + do_test_update_fee_that_funder_cannot_afford(ChannelTypeFeatures::only_static_remote_key()); + do_test_update_fee_that_funder_cannot_afford( + ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), + ); +} + #[test] fn test_update_fee_with_fundee_update_add_htlc() { let chanmon_cfgs = create_chanmon_cfgs(2); From 96e0f34bb4b3c61ab958bc554937077727c6300b Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 23 May 2025 20:26:56 +0000 Subject: [PATCH 085/105] Bump the `lightning` crate version to 0.1.4 --- lightning/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightning/Cargo.toml b/lightning/Cargo.toml index aaf6f60023c..d0c7336653a 100644 --- a/lightning/Cargo.toml +++ b/lightning/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning" -version = "0.1.3" +version = "0.1.4" authors = ["Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning/" From 1fcaca29e20c5d17ea4e8cf231a39c295dfbb8de Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 23 May 2025 20:27:05 +0000 Subject: [PATCH 086/105] Add release notes for LDK 0.1.4 --- CHANGELOG.md | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 80652dcb743..7c68aba4c31 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,28 @@ +# 0.1.4 - May 23, 2025 - "Careful Validation of Bogus States" + +## Bug Fixes + * In cases where using synchronous persistence with higher latency than the + latency to communicate with peers caused issues fixed in 0.1.2, + `ChannelManager`s may have been left in a state which LDK 0.1.2 and later + would refuse to deserialize. This has been fixed and nodes which experienced + this issue prior to 0.1.2 should now deserialize fine (#3790). + * In some cases, when using synchronous persistence with higher latency than + the latency to communicate with peers, when receiving an MPP payment with + multiple parts received over the same channel, a channel could hang and not + make progress, eventually leading to a force-closure due to timed-out HTLCs. + This has now been fixed (#3680). + +## Security +0.1.4 fixes a funds-theft vulnerability in exceedingly rare cases. + * If an LDK-based node funds an anchor channel to a malicious peer, and that + peer sets the channel reserve on the LDK-based node to zero, the LDK-node + could overdraw its total balance upon increasing the feerate of the + commitment transaction. If the malicious peer forwards HTLCs through the + LDK-based node, this could leave the LDK-based node with no valid commitment + transaction to broadcast to claim its part of the forwarded HTLC. The + counterparty would have to forfeit their reserve value (#3796). + + # 0.1.3 - Apr 30, 2025 - "Routing Unicode in 2025" ## Bug Fixes From 502b9b5aff65670b6c058330eef14b15d52ceed9 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 27 Mar 2025 19:45:24 +0000 Subject: [PATCH 087/105] Only run aggressive `test_node_counter_consistency` in tests `test_node_counter_consistency` can make gossip operations *really* slow. This makes it a pretty bad idea in a general node just running in debug mode. It also makes our `lightning-rapid-gossip-sync` real-world test painfully slow. Thus, here, we make `test_node_counter_consistency` only actually run in the `lightning`-crate tests, rather than always with `debug_assertions`. --- lightning/src/routing/gossip.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index fc097d5f915..7eb82722134 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -1779,7 +1779,7 @@ where } fn test_node_counter_consistency(&self) { - #[cfg(debug_assertions)] + #[cfg(test)] { let channels = self.channels.read().unwrap(); let nodes = self.nodes.read().unwrap(); From b1e9921c5750e8c69e2c8ffe33a6d6727631931c Mon Sep 17 00:00:00 2001 From: Fuyin Date: Fri, 13 Jun 2025 21:11:05 +0800 Subject: [PATCH 088/105] Fix `update_id` gap during `force_shutdown` When a channel is force-closed, there might be blocked monitor updates not yet applied. But `latest_monitor_update_id` has been incremented and assigned to these updates. This results in a panic when trying to apply the `ChannelForceClosed` update. Use the unblocked update id instead. Resolves: #3857 Conflicts resolved in: * lightning/src/ln/channel.rs due to `rustfmt`-induced changes. --- lightning/src/ln/channel.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 96492ef97f2..cbfb26daa49 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -2452,6 +2452,13 @@ impl ChannelContext where SP::Target: SignerProvider { self.latest_monitor_update_id } + pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 { + if self.blocked_monitor_updates.is_empty() { + return self.get_latest_monitor_update_id(); + } + self.blocked_monitor_updates[0].update.update_id - 1 + } + pub fn should_announce(&self) -> bool { self.config.announce_for_forwarding } @@ -3890,7 +3897,7 @@ impl ChannelContext where SP::Target: SignerProvider { // monitor update to the user, even if we return one). // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more. if !self.channel_state.is_pre_funded_state() { - self.latest_monitor_update_id += 1; + self.latest_monitor_update_id = self.get_latest_unblocked_monitor_update_id() + 1; Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), ChannelMonitorUpdate { update_id: self.latest_monitor_update_id, counterparty_node_id: Some(self.counterparty_node_id), @@ -7128,8 +7135,7 @@ impl Channel where /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight. pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 { - if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); } - self.context.blocked_monitor_updates[0].update.update_id - 1 + self.context.get_latest_unblocked_monitor_update_id() } /// Returns the next blocked monitor update, if one exists, and a bool which indicates a From d2f3d1f8db3381cdff276a9dea984cb1ff5ae472 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 25 Jun 2025 02:38:33 +0000 Subject: [PATCH 089/105] Skip storing an explicit `node_id` in `RouteGraphNode` `RouteGraphNode` is the main heap entry in our dijkstra's next-best heap. Thus, because its rather constantly being sorted, we care a good bit about its size as fitting more of them on a cache line can provide some additional speed. In 43d250dadcdad54836eacd8b447bb36d5c8e6cb5, we switched from tracking nodes during pathfinding by their `NodeId` to a "counter" which allows us to avoid `HashMap`s lookups for much of the pathfinding process. Because the `dist` lookup is now quite cheap (its just a `Vec`), there's no reason to track `NodeId`s in the heap entries. Instead, we simply fetch the `NodeId` of the node via the `dist` map by examining its `candidate`'s pointer to its source `NodeId`. This allows us to remove a `NodeId` in `RouteGraphNode`, moving it from 64 to 32 bytes. This allows us to expand the `score` field size in a coming commit without expanding `RouteGraphNode`'s size. While we were doing the `dist` lookup in `add_entries_to_cheapest_to_target_node` anyway, the `NodeId` lookup via the `candidate` may not be free. Still, avoiding expanding `RouteGraphNode` above 128 bytes in a few commits is a nice win. --- lightning/src/routing/router.rs | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index c09a014dc62..681e285dd7c 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -1161,9 +1161,8 @@ impl_writeable_tlv_based!(RouteHintHop, { }); #[derive(Eq, PartialEq)] -#[repr(align(64))] // Force the size to 64 bytes +#[repr(align(32))] // Force the size to 32 bytes struct RouteGraphNode { - node_id: NodeId, node_counter: u32, score: u64, // The maximum value a yet-to-be-constructed payment path might flow through this node. @@ -1193,9 +1192,8 @@ impl cmp::PartialOrd for RouteGraphNode { } // While RouteGraphNode can be laid out with fewer bytes, performance appears to be improved -// substantially when it is laid out at exactly 64 bytes. -const _GRAPH_NODE_SMALL: usize = 64 - core::mem::size_of::(); -const _GRAPH_NODE_FIXED_SIZE: usize = core::mem::size_of::() - 64; +// substantially when it is laid out at exactly 32 bytes. +const _GRAPH_NODE_32: () = assert!(core::mem::size_of::() == 32); /// A [`CandidateRouteHop::FirstHop`] entry. #[derive(Clone, Debug)] @@ -2747,7 +2745,6 @@ where L::Target: Logger { } let new_graph_node = RouteGraphNode { - node_id: src_node_id, node_counter: src_node_counter, score: cmp::max(total_fee_msat, path_htlc_minimum_msat).saturating_add(path_penalty_msat), total_cltv_delta: hop_total_cltv_delta, @@ -2824,7 +2821,7 @@ where L::Target: Logger { // meaning how much will be paid in fees after this node (to the best of our knowledge). // This data can later be helpful to optimize routing (pay lower fees). macro_rules! add_entries_to_cheapest_to_target_node { - ( $node: expr, $node_counter: expr, $node_id: expr, $next_hops_value_contribution: expr, + ( $node_counter: expr, $node_id: expr, $next_hops_value_contribution: expr, $next_hops_cltv_delta: expr, $next_hops_path_length: expr ) => { let fee_to_target_msat; let next_hops_path_htlc_minimum_msat; @@ -2880,7 +2877,7 @@ where L::Target: Logger { } } - if let Some(node) = $node { + if let Some(node) = network_nodes.get(&$node_id) { let features = if let Some(node_info) = node.announcement_info.as_ref() { &node_info.features() } else { @@ -3007,7 +3004,7 @@ where L::Target: Logger { entry.value_contribution_msat = path_value_msat; } add_entries_to_cheapest_to_target_node!( - network_nodes.get(&payee), payee_node_counter, payee, path_value_msat, 0, 0 + payee_node_counter, payee, path_value_msat, 0, 0 ); } @@ -3082,11 +3079,11 @@ where L::Target: Logger { // Both these cases (and other cases except reaching recommended_value_msat) mean that // paths_collection will be stopped because found_new_path==false. // This is not necessarily a routing failure. - 'path_construction: while let Some(RouteGraphNode { node_id, node_counter, total_cltv_delta, mut value_contribution_msat, path_length_to_node, .. }) = targets.pop() { + 'path_construction: while let Some(RouteGraphNode { node_counter, total_cltv_delta, mut value_contribution_msat, path_length_to_node, .. }) = targets.pop() { // Since we're going payee-to-payer, hitting our node as a target means we should stop // traversing the graph and arrange the path out of what we found. - if node_id == our_node_id { + if node_counter == payer_node_counter { let mut new_entry = dist[payer_node_counter as usize].take().unwrap(); let mut ordered_hops: Vec<(PathBuildingHop, NodeFeatures)> = vec!((new_entry.clone(), default_node_features.clone())); @@ -3209,13 +3206,20 @@ where L::Target: Logger { // If we found a path back to the payee, we shouldn't try to process it again. This is // the equivalent of the `elem.was_processed` check in // add_entries_to_cheapest_to_target_node!() (see comment there for more info). - if node_id == maybe_dummy_payee_node_id { continue 'path_construction; } + if node_counter == payee_node_counter { continue 'path_construction; } + + let node_id = if let Some(entry) = &dist[node_counter as usize] { + entry.candidate.source() + } else { + debug_assert!(false, "Best nodes in the heap should have entries in dist"); + continue 'path_construction; + }; // Otherwise, since the current target node is not us, // keep "unrolling" the payment graph from payee to payer by // finding a way to reach the current target from the payer side. add_entries_to_cheapest_to_target_node!( - network_nodes.get(&node_id), node_counter, node_id, + node_counter, node_id, value_contribution_msat, total_cltv_delta, path_length_to_node ); From 6577c4a1f7a8abf3da45ead96247114b193d24c9 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 25 Jun 2025 02:49:17 +0000 Subject: [PATCH 090/105] Reduce `total_cltv_delta` size in `RouteGraphNode` We track the total CLTV from the recipient to the current hop in `RouteGraphNode` so that we can limit its total during pathfinding. While its great to use a `u32` for that to match existing CLTV types, allowing a total CLTV limit of 64K blocks (455 days) is somewhat absurd, so here we swap the `total_cltv_delta` to a `u16`. This keeps `RouteGraphNode` to 32 bytes in a coming commit as we expand `score`. --- lightning/src/routing/router.rs | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 681e285dd7c..e6025fa4802 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -1171,7 +1171,7 @@ struct RouteGraphNode { // - how much value can channels following this node (up to the destination) can contribute, // considering their capacity and fees value_contribution_msat: u64, - total_cltv_delta: u32, + total_cltv_delta: u16, /// The number of hops walked up to this node. path_length_to_node: u8, } @@ -2440,6 +2440,16 @@ where L::Target: Logger { // drop the requirement by setting this to 0. let mut channel_saturation_pow_half = payment_params.max_channel_saturation_power_of_half; + // In order to already account for some of the privacy enhancing random CLTV + // expiry delta offset we add on top later, we subtract a rough estimate + // (2*MEDIAN_HOP_CLTV_EXPIRY_DELTA) here. + let max_total_cltv_expiry_delta: u16 = + (payment_params.max_total_cltv_expiry_delta - final_cltv_expiry_delta) + .checked_sub(2*MEDIAN_HOP_CLTV_EXPIRY_DELTA) + .unwrap_or(payment_params.max_total_cltv_expiry_delta - final_cltv_expiry_delta) + .try_into() + .unwrap_or(u16::MAX); + // Keep track of how much liquidity has been used in selected channels or blinded paths. Used to // determine if the channel can be used by additional MPP paths or to inform path finding // decisions. It is aware of direction *only* to ensure that the correct htlc_maximum_msat value @@ -2529,15 +2539,9 @@ where L::Target: Logger { let exceeds_max_path_length = path_length_to_node > max_path_length; // Do not consider candidates that exceed the maximum total cltv expiry limit. - // In order to already account for some of the privacy enhancing random CLTV - // expiry delta offset we add on top later, we subtract a rough estimate - // (2*MEDIAN_HOP_CLTV_EXPIRY_DELTA) here. - let max_total_cltv_expiry_delta = (payment_params.max_total_cltv_expiry_delta - final_cltv_expiry_delta) - .checked_sub(2*MEDIAN_HOP_CLTV_EXPIRY_DELTA) - .unwrap_or(payment_params.max_total_cltv_expiry_delta - final_cltv_expiry_delta); let hop_total_cltv_delta = ($next_hops_cltv_delta as u32) .saturating_add(cltv_expiry_delta); - let exceeds_cltv_delta_limit = hop_total_cltv_delta > max_total_cltv_expiry_delta; + let exceeds_cltv_delta_limit = hop_total_cltv_delta > max_total_cltv_expiry_delta as u32; let value_contribution_msat = cmp::min(available_value_contribution_msat, $next_hops_value_contribution); // Includes paying fees for the use of the following channels. @@ -2742,12 +2746,13 @@ where L::Target: Logger { #[cfg(all(not(ldk_bench), any(test, fuzzing)))] { assert!(!old_entry.best_path_from_hop_selected); + assert!(hop_total_cltv_delta <= u16::MAX as u32); } let new_graph_node = RouteGraphNode { node_counter: src_node_counter, score: cmp::max(total_fee_msat, path_htlc_minimum_msat).saturating_add(path_penalty_msat), - total_cltv_delta: hop_total_cltv_delta, + total_cltv_delta: hop_total_cltv_delta as u16, value_contribution_msat, path_length_to_node, }; From e7c1f8fb754eb7e1d87104319c4b5842fcb9d280 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 25 Jun 2025 11:53:51 +0000 Subject: [PATCH 091/105] Use `cost / path amt limit` as the pathfinding score, not `cost` While walking nodes in our Dijkstra's pathfinding, we may find a channel which is amount-limited to less than the amount we're currently trying to send. This is fine, and when we encounter such nodes we simply limit the amount we'd send in this path if we pick the channel. When we encounter such a path, we keep summing the cost across hops as we go, keeping whatever scores we assigned to channels between the amount-limited one and the recipient, but using the new limited amount for any channels we look at later as we walk towards the sender. This leads to somewhat inconsistent scores, especially as our scorer assigns a large portion of its penalties and a portion of network fees are proportional to the amount. Thus, we end up with a somewhat higher score than we "should" for this path as later hops use a high proportional cost. We accepted this as a simple way to bias against small-value paths and many MPP parts. Sadly, in practice it appears our bias is not strong enough, as several users have reported that we often attempt far too many MPP parts. In practice, if we encounter a channel with a small limit early in the Dijkstra's pass (towards the end of the path), we may prefer it over many other paths as we start assigning very low costs early on before we've accumulated much cost from larger channels. Here, we swap the `cost` Dijkstra's score for `cost / path amount`. This should bias much stronger against many MPP parts by preferring larger paths proportionally to their amount. This somewhat better aligns with our goal - if we have to pick multiple paths, we should be searching for paths the optimize fee-per-sat-sent, not strictly the fee paid. However, it might bias us against smaller paths somewhat stronger than we want - because we're still using the fees/scores calculated with the sought amount for hops processed already, but are now dividing by a smaller sent amount when walking further hops, we will bias "incorrectly" (and fairly strongly) against smaller parts. Still, because of the complaints on pathfinding performance due to too many MPP paths, it seems like a worthwhile tradeoff, as ultimately MPP splitting is always the domain of heuristics anyway. --- lightning/src/routing/router.rs | 260 ++++++++++++++++++++++++++-- lightning/src/routing/test_utils.rs | 2 +- 2 files changed, 246 insertions(+), 16 deletions(-) diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index e6025fa4802..33ad0f936ba 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -1164,7 +1164,7 @@ impl_writeable_tlv_based!(RouteHintHop, { #[repr(align(32))] // Force the size to 32 bytes struct RouteGraphNode { node_counter: u32, - score: u64, + score: u128, // The maximum value a yet-to-be-constructed payment path might flow through this node. // This value is upper-bounded by us by: // - how much is needed for a path being constructed @@ -1877,6 +1877,22 @@ impl<'a> PaymentPath<'a> { return result; } + /// Gets the cost (fees plus scorer penalty in msats) of the path divided by the value we + /// can/will send over the path. This is also the heap score during our Dijkstra's walk. + fn get_cost_per_msat(&self) -> u128 { + let fee_cost = self.get_cost_msat(); + let value_msat = self.get_value_msat(); + debug_assert!(value_msat > 0, "Paths should always send more than 0 msat"); + if fee_cost == u64::MAX || value_msat == 0 { + u64::MAX.into() + } else { + // In order to avoid integer division precision loss, we simply shift the costs up to + // the top half of a u128 and divide by the value (which is, at max, just under a u64). + ((fee_cost as u128) << 64) / value_msat as u128 + } + } + + /// Gets the fees plus scorer penalty in msats of the path. fn get_cost_msat(&self) -> u64 { self.get_total_fee_paid_msat().saturating_add(self.get_path_penalty_msat()) } @@ -2531,8 +2547,6 @@ where L::Target: Logger { *used_liquidity_msat }); - // Verify the liquidity offered by this channel complies to the minimal contribution. - let contributes_sufficient_value = available_value_contribution_msat >= minimal_value_contribution_msat; // Do not consider candidate hops that would exceed the maximum path length. let path_length_to_node = $next_hops_path_length + if $candidate.blinded_hint_idx().is_some() { 0 } else { 1 }; @@ -2544,6 +2558,8 @@ where L::Target: Logger { let exceeds_cltv_delta_limit = hop_total_cltv_delta > max_total_cltv_expiry_delta as u32; let value_contribution_msat = cmp::min(available_value_contribution_msat, $next_hops_value_contribution); + // Verify the liquidity offered by this channel complies to the minimal contribution. + let contributes_sufficient_value = value_contribution_msat >= minimal_value_contribution_msat; // Includes paying fees for the use of the following channels. let amount_to_transfer_over_msat: u64 = match value_contribution_msat.checked_add($next_hops_fee_msat) { Some(result) => result, @@ -2693,7 +2709,7 @@ where L::Target: Logger { // Ignore hops if augmenting the current path to them would put us over `max_total_routing_fee_msat` if total_fee_msat > max_total_routing_fee_msat { if should_log_candidate { - log_trace!(logger, "Ignoring {} due to exceeding max total routing fee limit.", LoggedCandidateHop(&$candidate)); + log_trace!(logger, "Ignoring {} with fee {total_fee_msat} due to exceeding max total routing fee limit {max_total_routing_fee_msat}.", LoggedCandidateHop(&$candidate)); if let Some(_) = first_hop_details { log_trace!(logger, @@ -2734,15 +2750,31 @@ where L::Target: Logger { // but it may require additional tracking - we don't want to double-count // the fees included in $next_hops_path_htlc_minimum_msat, but also // can't use something that may decrease on future hops. - let old_cost = cmp::max(old_entry.total_fee_msat, old_entry.path_htlc_minimum_msat) + let old_fee_cost = cmp::max(old_entry.total_fee_msat, old_entry.path_htlc_minimum_msat) .saturating_add(old_entry.path_penalty_msat); - let new_cost = cmp::max(total_fee_msat, path_htlc_minimum_msat) + let new_fee_cost = cmp::max(total_fee_msat, path_htlc_minimum_msat) .saturating_add(path_penalty_msat); - let should_replace = - new_cost < old_cost - || (new_cost == old_cost && old_entry.value_contribution_msat < value_contribution_msat); + // The actual score we use for our heap is the cost divided by how + // much we are thinking of sending over this channel. This avoids + // prioritizing channels that have a very low fee because we aren't + // sending very much over them. + // In order to avoid integer division precision loss, we simply + // shift the costs up to the top half of a u128 and divide by the + // value (which is, at max, just under a u64). + let old_cost = if old_fee_cost != u64::MAX && old_entry.value_contribution_msat != 0 { + ((old_fee_cost as u128) << 64) / old_entry.value_contribution_msat as u128 + } else { + u128::MAX + }; + let new_cost = if new_fee_cost != u64::MAX { + // value_contribution_msat is always >= 1, checked above via + // `contributes_sufficient_value`. + ((new_fee_cost as u128) << 64) / value_contribution_msat as u128 + } else { + u128::MAX + }; - if !old_entry.was_processed && should_replace { + if !old_entry.was_processed && new_cost < old_cost { #[cfg(all(not(ldk_bench), any(test, fuzzing)))] { assert!(!old_entry.best_path_from_hop_selected); @@ -2751,7 +2783,7 @@ where L::Target: Logger { let new_graph_node = RouteGraphNode { node_counter: src_node_counter, - score: cmp::max(total_fee_msat, path_htlc_minimum_msat).saturating_add(path_penalty_msat), + score: new_cost, total_cltv_delta: hop_total_cltv_delta as u16, value_contribution_msat, path_length_to_node, @@ -3299,10 +3331,7 @@ where L::Target: Logger { // First, sort by the cost-per-value of the path, dropping the paths that cost the most for // the value they contribute towards the payment amount. // We sort in descending order as we will remove from the front in `retain`, next. - selected_route.sort_unstable_by(|a, b| - (((b.get_cost_msat() as u128) << 64) / (b.get_value_msat() as u128)) - .cmp(&(((a.get_cost_msat() as u128) << 64) / (a.get_value_msat() as u128))) - ); + selected_route.sort_unstable_by(|a, b| b.get_cost_per_msat().cmp(&a.get_cost_per_msat())); // We should make sure that at least 1 path left. let mut paths_left = selected_route.len(); @@ -8654,6 +8683,207 @@ mod tests { assert_eq!(route.paths[0].hops[0].short_channel_id, 44); } + + #[test] + fn prefers_paths_by_cost_amt_ratio() { + // Previously, we preferred paths during MPP selection based on their absolute cost, rather + // than the cost-per-amount-transferred. This could result in selecting many MPP paths with + // relatively low value contribution, rather than one large path which is ultimately + // cheaper. While this is a tradeoff (and not universally better), in practice the old + // behavior was problematic, so we shifted to a proportional cost. + // + // Here we check that the proportional cost is being used in a somewhat absurd setup where + // we have one good path and several cheaper, but smaller paths. + let (secp_ctx, network_graph, gossip_sync, _, logger) = build_graph(); + let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx); + let scorer = ln_test_utils::TestScorer::new(); + let random_seed_bytes = [42; 32]; + + // Enable channel 1 + let update_1 = UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: 1, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 0, + cltv_expiry_delta: (1 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 10_000_000, + fee_base_msat: 0, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }; + update_channel(&gossip_sync, &secp_ctx, &our_privkey, update_1); + + // Set the fee on channel 3 to 1 sat, max HTLC to 1M msat + let update_3 = UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: 3, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 0, + cltv_expiry_delta: (3 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 1_000_000, + fee_base_msat: 1_000, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }; + update_channel(&gossip_sync, &secp_ctx, &privkeys[0], update_3); + + // Set the fee on channel 13 to 1 sat, max HTLC to 1M msat + let update_13 = UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: 13, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 0, + cltv_expiry_delta: (13 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 1_000_000, + fee_base_msat: 1_000, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }; + update_channel(&gossip_sync, &secp_ctx, &privkeys[7], update_13); + + // Set the fee on channel 4 to 1 sat, max HTLC to 1M msat + let update_4 = UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: 4, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 0, + cltv_expiry_delta: (4 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 1_000_000, + fee_base_msat: 1_000, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }; + update_channel(&gossip_sync, &secp_ctx, &privkeys[1], update_4); + + // The router will attempt to gather 3x the requested amount, and if it finds the new path + // through channel 16, added below, it'll always prefer that, even prior to the changes + // which introduced this test. + // Instead, we add 6 additional channels so that the pathfinder always just gathers useless + // paths first. + for i in 0..6 { + // Finally, create a single channel with fee of 2 sat from node 1 to node 2 which allows + // for a larger payment. + let chan_features = ChannelFeatures::from_le_bytes(vec![]); + add_channel(&gossip_sync, &secp_ctx, &privkeys[7], &privkeys[2], chan_features, i + 42); + + // Set the fee on channel 16 to 2 sats, max HTLC to 3M msat + let update_a = UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: i + 42, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 0, + cltv_expiry_delta: (42 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 1_000_000, + fee_base_msat: 1_000, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }; + update_channel(&gossip_sync, &secp_ctx, &privkeys[7], update_a); + + // Enable channel 16 by providing an update in both directions + let update_b = UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: i + 42, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 1, + cltv_expiry_delta: (42 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 10_000_000, + fee_base_msat: u32::MAX, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }; + update_channel(&gossip_sync, &secp_ctx, &privkeys[2], update_b); + } + + // Ensure that we can build a route for 3M msat across the three paths to node 2. + let config = UserConfig::default(); + let mut payment_params = PaymentParameters::from_node_id(nodes[2], 42) + .with_bolt11_features(channelmanager::provided_bolt11_invoice_features(&config)) + .unwrap(); + payment_params.max_channel_saturation_power_of_half = 0; + let route_params = + RouteParameters::from_payment_params_and_value(payment_params, 3_000_000); + let route = get_route( + &our_id, + &route_params, + &network_graph.read_only(), + None, + Arc::clone(&logger), + &scorer, + &Default::default(), + &random_seed_bytes, + ) + .unwrap(); + assert_eq!(route.paths.len(), 3); + for path in route.paths { + assert_eq!(path.hops.len(), 2); + } + + // Finally, create a single channel with fee of 2 sat from node 1 to node 2 which allows + // for a larger payment. + let features_16 = ChannelFeatures::from_le_bytes(id_to_feature_flags(16)); + add_channel(&gossip_sync, &secp_ctx, &privkeys[1], &privkeys[2], features_16, 16); + + // Set the fee on channel 16 to 2 sats, max HTLC to 3M msat + let update_16_a = UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: 16, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 0, + cltv_expiry_delta: (16 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 3_000_000, + fee_base_msat: 2_000, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }; + update_channel(&gossip_sync, &secp_ctx, &privkeys[1], update_16_a); + + // Enable channel 16 by providing an update in both directions + let update_16_b = UnsignedChannelUpdate { + chain_hash: ChainHash::using_genesis_block(Network::Testnet), + short_channel_id: 16, + timestamp: 2, + message_flags: 1, // Only must_be_one + channel_flags: 1, + cltv_expiry_delta: (16 << 4) | 1, + htlc_minimum_msat: 0, + htlc_maximum_msat: 10_000_000, + fee_base_msat: u32::MAX, + fee_proportional_millionths: 0, + excess_data: Vec::new(), + }; + update_channel(&gossip_sync, &secp_ctx, &privkeys[2], update_16_b); + + // Ensure that we now build a route for 3M msat across just the new path + let route = get_route( + &our_id, + &route_params, + &network_graph.read_only(), + None, + Arc::clone(&logger), + &scorer, + &Default::default(), + &random_seed_bytes, + ) + .unwrap(); + assert_eq!(route.paths.len(), 1); + assert_eq!(route.paths[0].hops.len(), 2); + assert_eq!(route.paths[0].hops[1].short_channel_id, 16); + } } #[cfg(any(test, ldk_bench))] diff --git a/lightning/src/routing/test_utils.rs b/lightning/src/routing/test_utils.rs index 258652b575d..380f4dbe223 100644 --- a/lightning/src/routing/test_utils.rs +++ b/lightning/src/routing/test_utils.rs @@ -110,7 +110,7 @@ pub(crate) fn update_channel( match gossip_sync.handle_channel_update(Some(node_pubkey), &valid_channel_update) { Ok(res) => assert!(res), - Err(_) => panic!() + Err(e) => panic!("{e:?}") }; } From 2fdd07ec07ff2b596144ce4d9745a10c3dc5fd7b Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 2 Jul 2025 22:51:28 +0000 Subject: [PATCH 092/105] Only mark all mon updates complete if there are no blocked updates In `handle_new_monitor_update!`, we correctly check that the channel doesn't have any blocked monitor updates pending before calling `handle_monitor_update_completion!` (which calls `Channel::monitor_updating_restored`, which in turn assumes that all generated `ChannelMonitorUpdate`s, including blocked ones, have completed). We, however, did not do the same check at several other places where we called `handle_monitor_update_completion!`. Specifically, after a monitor update completes during reload (processed via a `BackgroundEvent` or when monitor update completes async, we didn't check if there were any blocked monitor updates before completing). Here we add the missing check, as well as an assertion in `Channel::monitor_updating_restored`. Conflicts resolved in: * lightning/src/ln/chanmon_update_fail_tests.rs due to `rustfmt`-induced changes as well as other tests cleanups. * lightning/src/ln/channelmanager.rs due to upstream Channel object refactoring * lightning/src/ln/quiescence_tests.rs which were dropped as they were fixing a test which only exists upstream --- lightning/src/ln/chanmon_update_fail_tests.rs | 78 ++++++++++++++++--- lightning/src/ln/channel.rs | 1 + lightning/src/ln/channelmanager.rs | 12 ++- 3 files changed, 77 insertions(+), 14 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index ad1e6c26b98..657e089d293 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -2931,16 +2931,28 @@ fn test_inbound_reload_without_init_mon() { do_test_inbound_reload_without_init_mon(false, false); } -#[test] -fn test_blocked_chan_preimage_release() { +#[derive(PartialEq, Eq)] +enum BlockedUpdateComplMode { + Async, + AtReload, + Sync, +} + +fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode) { // Test that even if a channel's `ChannelMonitorUpdate` flow is blocked waiting on an event to // be handled HTLC preimage `ChannelMonitorUpdate`s will still go out. let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_mon; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_reload; let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2; let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5_000_000); @@ -2968,25 +2980,62 @@ fn test_blocked_chan_preimage_release() { expect_payment_claimed!(nodes[0], payment_hash_2, 1_000_000); let as_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + if completion_mode != BlockedUpdateComplMode::Sync { + // We use to incorrectly handle monitor update completion in cases where we completed a + // monitor update async or after reload. We test both based on the `completion_mode`. + chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + } nodes[1].node.handle_update_fulfill_htlc(nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.update_fulfill_htlcs[0]); check_added_monitors(&nodes[1], 1); // We generate only a preimage monitor update assert!(get_monitor!(nodes[1], chan_id_2).get_stored_preimages().contains_key(&payment_hash_2)); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + if completion_mode == BlockedUpdateComplMode::AtReload { + let node_ser = nodes[1].node.encode(); + let chan_mon_0 = get_monitor!(nodes[1], chan_id_1).encode(); + let chan_mon_1 = get_monitor!(nodes[1], chan_id_2).encode(); + + let mons = &[&chan_mon_0[..], &chan_mon_1[..]]; + reload_node!(nodes[1], &node_ser, mons, persister, new_chain_mon, nodes_1_reload); + + nodes[0].node.peer_disconnected(node_b_id); + nodes[2].node.peer_disconnected(node_b_id); + + let mut a_b_reconnect = ReconnectArgs::new(&nodes[0], &nodes[1]); + a_b_reconnect.pending_htlc_claims.1 = 1; + // Note that we will expect no final RAA monitor update in + // `commitment_signed_dance_through_cp_raa` during the reconnect, matching the below case. + reconnect_nodes(a_b_reconnect); + reconnect_nodes(ReconnectArgs::new(&nodes[2], &nodes[1])); + } else if completion_mode == BlockedUpdateComplMode::Async { + let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_2).unwrap().clone(); + nodes[1] + .chain_monitor + .chain_monitor + .channel_monitor_updated(outpoint, latest_update) + .unwrap(); + } // Finish the CS dance between nodes[0] and nodes[1]. Note that until the event handling, the // update_fulfill_htlc + CS is held, even though the preimage is already on disk for the // channel. - nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.commitment_signed); - check_added_monitors(&nodes[1], 1); - let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); - assert!(a.is_none()); + // Note that when completing as a side effect of a reload we completed the CS dance in + // `reconnect_nodes` above. + if completion_mode != BlockedUpdateComplMode::AtReload { + nodes[1].node.handle_commitment_signed( + node_a_id, + &as_htlc_fulfill_updates.commitment_signed, + ); + check_added_monitors(&nodes[1], 1); + let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); + assert!(a.is_none()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &raa); - check_added_monitors(&nodes[1], 0); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); + check_added_monitors(&nodes[1], 0); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + } let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 3); + assert_eq!(events.len(), 3, "{events:?}"); if let Event::PaymentSent { .. } = events[0] {} else { panic!(); } if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); } if let Event::PaymentForwarded { .. } = events[1] {} else { panic!(); } @@ -3004,6 +3053,13 @@ fn test_blocked_chan_preimage_release() { expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true); } +#[test] +fn test_blocked_chan_preimage_release() { + do_test_blocked_chan_preimage_release(BlockedUpdateComplMode::AtReload); + do_test_blocked_chan_preimage_release(BlockedUpdateComplMode::Sync); + do_test_blocked_chan_preimage_release(BlockedUpdateComplMode::Async); +} + fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_commitment_dance: bool) { // When we forward a payment and receive `update_fulfill_htlc`+`commitment_signed` messages // from the downstream channel, we immediately claim the HTLC on the upstream channel, before diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index cbfb26daa49..d23363229f4 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -5941,6 +5941,7 @@ impl Channel where { assert!(self.context.channel_state.is_monitor_update_in_progress()); self.context.channel_state.clear_monitor_update_in_progress(); + assert_eq!(self.blocked_monitor_updates_pending(), 0); // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to // (re-)broadcast the funding transaction as we may have declined to broadcast it when we diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b973432056a..a9e14f17f99 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6347,7 +6347,9 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) { - handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); + if chan.blocked_monitor_updates_pending() == 0 { + handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); + } } else { let update_actions = peer_state.monitor_update_blocked_actions .remove(&channel_id).unwrap_or(Vec::new()); @@ -7625,8 +7627,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(channel_id) { if chan.is_awaiting_monitor_update() { - log_trace!(logger, "Channel is open and awaiting update, resuming it"); - handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); + if chan.blocked_monitor_updates_pending() == 0 { + log_trace!(logger, "Channel is open and awaiting update, resuming it"); + handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); + } else { + log_trace!(logger, "Channel is open and awaiting update, leaving it blocked due to a blocked monitor update"); + } } else { log_trace!(logger, "Channel is open but not awaiting update"); } From 445550525a1e1309437f6af590dd92fcee299854 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 10 Jul 2025 00:41:37 +0000 Subject: [PATCH 093/105] Add a test utility to provide nodes with anchor reserves In a number of tests we require available UTXOs to do HTLC anchor claims by bringing our own fees. We previously wrote that out in each test, which is somewhat verbose, so here we simply add a test utility that gives each node a full BTC in a single UTXO. Trivial conflicts resolved in: * lightning/src/ln/monitor_tests.rs --- lightning/src/ln/functional_test_utils.rs | 22 ++++ lightning/src/ln/monitor_tests.rs | 148 ++-------------------- 2 files changed, 36 insertions(+), 134 deletions(-) diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 04295073861..420978ad5fc 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -335,6 +335,28 @@ fn do_connect_block_without_consistency_checks<'a, 'b, 'c, 'd>(node: &'a Node<'b } } +pub fn provide_anchor_reserves<'a, 'b, 'c>(nodes: &[Node<'a, 'b, 'c>]) -> Transaction { + let mut output = Vec::with_capacity(nodes.len()); + for node in nodes { + output.push(TxOut { + value: Amount::ONE_BTC, + script_pubkey: node.wallet_source.get_change_script().unwrap(), + }); + } + let tx = Transaction { + version: TxVersion::TWO, + lock_time: LockTime::ZERO, + input: vec![TxIn { ..Default::default() }], + output, + }; + let height = nodes[0].best_block_info().1 + 1; + let block = create_dummy_block(nodes[0].best_block_hash(), height, vec![tx.clone()]); + for node in nodes { + do_connect_block_with_consistency_checks(node, block.clone(), false); + } + tx +} + pub fn disconnect_blocks<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, count: u32) { call_claimable_balances(node); eprintln!("Disconnecting {} blocks using Block Connection Style: {:?}", count, *node.connect_style.borrow()); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 92b19790be5..ad2b0599598 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -13,7 +13,7 @@ use crate::sign::{ecdsa::EcdsaChannelSigner, OutputSpender, SpendableOutputDescr use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ARCHIVAL_DELAY_BLOCKS,LATENCY_GRACE_PERIOD_BLOCKS, COUNTERPARTY_CLAIMABLE_WITHIN_BLOCKS_PINNABLE, Balance, BalanceSource, ChannelMonitorUpdateStep}; use crate::chain::transaction::OutPoint; use crate::chain::chaininterface::{ConfirmationTarget, LowerBoundedFeeEstimator, compute_feerate_sat_per_1000_weight}; -use crate::events::bump_transaction::{BumpTransactionEvent, WalletSource}; +use crate::events::bump_transaction::BumpTransactionEvent; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; use crate::ln::channel; use crate::ln::types::ChannelId; @@ -462,25 +462,7 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config), Some(user_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![ - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }, - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), - }, - ], - }; - if anchors { - nodes[0].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, coinbase_tx.output[0].value); - nodes[1].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 1 }, coinbase_tx.output[1].value); - } + let coinbase_tx = provide_anchor_reserves(&nodes); let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 1_000_000); @@ -865,25 +847,7 @@ fn do_test_balances_on_local_commitment_htlcs(anchors: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config), Some(user_config)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![ - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }, - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), - }, - ], - }; - if anchors { - nodes[0].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, coinbase_tx.output[0].value); - nodes[1].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 1 }, coinbase_tx.output[1].value); - } + let coinbase_tx = provide_anchor_reserves(&nodes); // Create a single channel with two pending HTLCs from nodes[0] to nodes[1], one which nodes[1] // knows the preimage for, one which it does not. @@ -1650,25 +1614,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config), Some(user_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![ - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }, - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), - }, - ], - }; - if anchors { - nodes[0].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, coinbase_tx.output[0].value); - nodes[1].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 1 }, coinbase_tx.output[1].value); - } + let coinbase_tx = provide_anchor_reserves(&nodes); // Create some initial channels let (_, _, chan_id, funding_tx) = @@ -1951,16 +1897,7 @@ fn do_test_revoked_counterparty_aggregated_claims(anchors: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config), Some(user_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }], - }; - nodes[0].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, coinbase_tx.output[0].value); + let coinbase_tx = provide_anchor_reserves(&nodes); let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000); @@ -2241,25 +2178,7 @@ fn do_test_claimable_balance_correct_while_payment_pending(outbound_payment: boo let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(user_config), Some(user_config), Some(user_config)]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![ - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }, - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), - }, - ], - }; - if anchors { - nodes[0].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, coinbase_tx.output[0].value); - nodes[1].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 1 }, coinbase_tx.output[1].value); - } + provide_anchor_reserves(&nodes); // Create a channel from A -> B let (_, _, chan_ab_id, funding_tx_ab) = @@ -2406,6 +2325,8 @@ fn do_test_monitor_rebroadcast_pending_claims(anchors: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), Some(config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let coinbase_tx = provide_anchor_reserves(&nodes); + let (_, _, _, chan_id, funding_tx) = create_chan_between_nodes_with_value( &nodes[0], &nodes[1], 1_000_000, 500_000_000 ); @@ -2424,17 +2345,6 @@ fn do_test_monitor_rebroadcast_pending_claims(anchors: bool) { false, [nodes[1].node.get_our_node_id()], 1000000); check_added_monitors(&nodes[0], 1); - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![TxOut { // UTXO to attach fees to `htlc_tx` on anchors - value: Amount::ONE_BTC, - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }], - }; - nodes[0].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, coinbase_tx.output[0].value); - // Set up a helper closure we'll use throughout our test. We should only expect retries without // bumps if fees have not increased after a block has been connected (assuming the height timer // re-evaluates at every block) or after `ChainMonitor::rebroadcast_pending_claims` is called. @@ -2538,6 +2448,8 @@ fn do_test_yield_anchors_events(have_htlcs: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config), Some(anchors_config)]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let coinbase_tx = provide_anchor_reserves(&nodes); + let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value( &nodes, 0, 1, 1_000_000, 500_000_000 ); @@ -2613,16 +2525,6 @@ fn do_test_yield_anchors_events(have_htlcs: bool) { assert_eq!(holder_events.len(), 1); let (commitment_tx, anchor_tx) = match holder_events.pop().unwrap() { Event::BumpTransaction(event) => { - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![TxOut { // UTXO to attach fees to `anchor_tx` - value: Amount::ONE_BTC, - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }], - }; - nodes[0].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, coinbase_tx.output[0].value); nodes[0].bump_tx_handler.handle_event(&event); let mut txn = nodes[0].tx_broadcaster.unique_txn_broadcast(); assert_eq!(txn.len(), 2); @@ -2738,6 +2640,8 @@ fn test_anchors_aggregated_revoked_htlc_tx() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let coinbase_tx = provide_anchor_reserves(&nodes); + let chan_a = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000); let chan_b = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000); @@ -2796,18 +2700,7 @@ fn test_anchors_aggregated_revoked_htlc_tx() { assert_eq!(events.len(), 2); let mut revoked_commitment_txs = Vec::with_capacity(events.len()); let mut anchor_txs = Vec::with_capacity(events.len()); - for (idx, event) in events.into_iter().enumerate() { - let utxo_value = Amount::ONE_BTC * (idx + 1) as u64; - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![TxOut { // UTXO to attach fees to `anchor_tx` - value: utxo_value, - script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), - }], - }; - nodes[1].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, utxo_value); + for event in events { match event { Event::BumpTransaction(event) => nodes[1].bump_tx_handler.handle_event(&event), _ => panic!("Unexpected event"), @@ -3125,20 +3018,7 @@ fn do_test_monitor_claims_with_random_signatures(anchors: bool, confirm_counterp let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(user_config), Some(user_config)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let coinbase_tx = Transaction { - version: Version::TWO, - lock_time: LockTime::ZERO, - input: vec![TxIn { ..Default::default() }], - output: vec![ - TxOut { - value: Amount::ONE_BTC, - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }, - ], - }; - if anchors { - nodes[0].wallet_source.add_utxo(bitcoin::OutPoint { txid: coinbase_tx.compute_txid(), vout: 0 }, coinbase_tx.output[0].value); - } + let coinbase_tx = provide_anchor_reserves(&nodes); // Open a channel and route a payment. We'll let it timeout to claim it. let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0); From 56a9bf5a086991a1f4a6526f184630f3bf7263fd Mon Sep 17 00:00:00 2001 From: Fuyin Date: Fri, 11 Jul 2025 01:13:32 +0800 Subject: [PATCH 094/105] Prune locktimed packages when inputs are spent We have to prune locktimed packages when their inputs are spent, otherwise the notification of the watched outputs might be missed. This can lead to locktimed packages with spent inputs being added back to the pending claim requests in the future, and they are never cleaned up until node restart. Resolves: #3859 Conflicts resolved in: * lightning/src/ln/functional_tests.rs due to upstream changes of removed code * lightning/src/ln/monitor_tests.rs due to trivial upstream changes --- lightning/src/chain/onchaintx.rs | 21 ++++++++++++++ lightning/src/ln/functional_tests.rs | 42 ++++++++++------------------ lightning/src/ln/monitor_tests.rs | 15 +++------- 3 files changed, 39 insertions(+), 39 deletions(-) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 2a43b006920..9253975de42 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -278,6 +278,9 @@ pub struct OnchainTxHandler { #[cfg(not(test))] claimable_outpoints: HashMap, + #[cfg(any(test, feature = "_test_utils"))] + pub(crate) locktimed_packages: BTreeMap>, + #[cfg(not(any(test, feature = "_test_utils")))] locktimed_packages: BTreeMap>, onchain_events_awaiting_threshold_conf: Vec, @@ -969,6 +972,17 @@ impl OnchainTxHandler { panic!("Inconsistencies between pending_claim_requests map and claimable_outpoints map"); } } + + // Also remove/split any locktimed packages whose inputs have been spent by this transaction. + self.locktimed_packages.retain(|_locktime, packages|{ + packages.retain_mut(|package| { + if let Some(p) = package.split_package(&inp.previous_output) { + claimed_outputs_material.push(p); + } + !package.outpoints().is_empty() + }); + !packages.is_empty() + }); } for package in claimed_outputs_material.drain(..) { let entry = OnchainEventEntry { @@ -1104,6 +1118,13 @@ impl OnchainTxHandler { //- resurect outpoint back in its claimable set and regenerate tx match entry.event { OnchainEvent::ContentiousOutpoint { package } => { + // We pass 0 to `package_locktime` to get the actual required locktime. + let package_locktime = package.package_locktime(0); + if package_locktime >= height { + self.locktimed_packages.entry(package_locktime).or_default().push(package); + continue; + } + if let Some(pending_claim) = self.claimable_outpoints.get(package.outpoints()[0]) { if let Some(request) = self.pending_claim_requests.get_mut(&pending_claim.0) { assert!(request.merge_package(package, height).is_ok()); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index fc8bf38519d..2535a756c9c 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -3329,33 +3329,9 @@ fn test_htlc_on_chain_success() { _ => panic!("Unexpected event"), } - macro_rules! check_tx_local_broadcast { - ($node: expr, $htlc_offered: expr, $commitment_tx: expr) => { { - let mut node_txn = $node.tx_broadcaster.txn_broadcasted.lock().unwrap(); - // HTLC timeout claims for non-anchor channels are only aggregated when claimed from the - // remote commitment transaction. - if $htlc_offered { - assert_eq!(node_txn.len(), 2); - for tx in node_txn.iter() { - check_spends!(tx, $commitment_tx); - assert_ne!(tx.lock_time, LockTime::ZERO); - assert_eq!(tx.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); - assert!(tx.output[0].script_pubkey.is_p2wsh()); // revokeable output - } - assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output); - } else { - assert_eq!(node_txn.len(), 1); - check_spends!(node_txn[0], $commitment_tx); - assert_ne!(node_txn[0].lock_time, LockTime::ZERO); - assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); - assert!(node_txn[0].output[0].script_pubkey.is_p2wpkh()); // direct payment - assert_ne!(node_txn[0].input[0].previous_output, node_txn[0].input[1].previous_output); - } - node_txn.clear(); - } } - } - // nodes[1] now broadcasts its own timeout-claim of the output that nodes[2] just claimed via success. - check_tx_local_broadcast!(nodes[1], false, commitment_tx[0]); + // nodes[1] does not broadcast its own timeout-claim of the output as nodes[2] just claimed it + // via success. + assert!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().is_empty()); // Broadcast legit commitment tx from A on B's chain // Broadcast preimage tx by B on offered output from A commitment tx on A's chain @@ -3416,7 +3392,17 @@ fn test_htlc_on_chain_success() { _ => panic!("Unexpected event"), } } - check_tx_local_broadcast!(nodes[0], true, node_a_commitment_tx[0]); + // HTLC timeout claims for non-anchor channels are only aggregated when claimed from the + // remote commitment transaction. + let mut node_txn = nodes[0].tx_broadcaster.txn_broadcast(); + assert_eq!(node_txn.len(), 2); + for tx in node_txn.iter() { + check_spends!(tx, node_a_commitment_tx[0]); + assert_ne!(tx.lock_time, LockTime::ZERO); + assert_eq!(tx.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert!(tx.output[0].script_pubkey.is_p2wsh()); // revokeable output + } + assert_ne!(node_txn[0].input[0].previous_output, node_txn[1].input[0].previous_output); } fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index ad2b0599598..d105d69edd2 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -711,8 +711,9 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { test_spendable_output(&nodes[0], &remote_txn[0], false); assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); - // After broadcasting the HTLC claim transaction, node A will still consider the HTLC - // possibly-claimable up to ANTI_REORG_DELAY, at which point it will drop it. + // After confirming the HTLC claim transaction, node A will no longer attempt to claim said + // HTLC, unless the transaction is reorged. However, we'll still report a + // `MaybeTimeoutClaimableHTLC` balance for it until we reach `ANTI_REORG_DELAY` confirmations. mine_transaction(&nodes[0], &b_broadcast_txn[0]); if prev_commitment_tx { expect_payment_path_successful!(nodes[0]); @@ -728,18 +729,10 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { // When the HTLC timeout output is spendable in the next block, A should broadcast it connect_blocks(&nodes[0], htlc_cltv_timeout - nodes[0].best_block_info().1); let a_broadcast_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); - // Aggregated claim transaction. assert_eq!(a_broadcast_txn.len(), 1); check_spends!(a_broadcast_txn[0], remote_txn[0]); - assert_eq!(a_broadcast_txn[0].input.len(), 2); - assert_ne!(a_broadcast_txn[0].input[0].previous_output.vout, a_broadcast_txn[0].input[1].previous_output.vout); - // a_broadcast_txn [0] and [1] should spend the HTLC outputs of the commitment tx - assert!(a_broadcast_txn[0].input.iter().any(|input| remote_txn[0].output[input.previous_output.vout as usize].value.to_sat() == 3_000)); + assert_eq!(a_broadcast_txn[0].input.len(), 1); assert!(a_broadcast_txn[0].input.iter().any(|input| remote_txn[0].output[input.previous_output.vout as usize].value.to_sat() == 4_000)); - - // Confirm node B's claim for node A to remove that claim from the aggregated claim transaction. - mine_transaction(&nodes[0], &b_broadcast_txn[0]); - let a_broadcast_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); let a_htlc_timeout_tx = a_broadcast_txn.into_iter().last().unwrap(); // Once the HTLC-Timeout transaction confirms, A will no longer consider the HTLC From 41c2b510699badff62590d74aeeacca50a01712b Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 10 Jul 2025 17:03:12 +0000 Subject: [PATCH 095/105] Track outpoint creation height in `PackageSolvingData` When we have an outpoint to claim which is lock-timed and the locktime is reached, we add it to `OnchainTxHandler::claimable_outpoints` to indicate the outpoint is now being claimed. However, `claimable_outpoints` is supposed to track when the outpoint first appeared on chain so that we can remove the claim if the outpoint is reorged out. Sadly, in the handling for lock-timed packages, we incorrectly stored the current height in `claimable_outpoints`, causing such claims to be removed in case of a reorg right after they were generated, even if the output we intend to claim isn't removed at all. Here we start tracking when the outpoint we're spending was created in `PackageSolvingData`'s constituent types. While we could have tracked this information in `PackageTemplate`, it would preclude later merging packages that are spending outpoints included in different blocks, which we don't necessarily want to do. Conflicts resolved in: * lightning/src/chain/channelmonitor.rs, * lightning/src/chain/onchaintx.rs, and * lightning/src/chain/package.rs due to upstream changes to package struct fields. --- lightning/src/chain/channelmonitor.rs | 47 ++++++++------- lightning/src/chain/onchaintx.rs | 1 + lightning/src/chain/package.rs | 84 ++++++++++++++++++--------- 3 files changed, 83 insertions(+), 49 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 4c195b20a78..9522d06c638 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -3008,23 +3008,26 @@ impl ChannelMonitorImpl { (payment_preimage.clone(), payment_info.clone().into_iter().collect()) }); - let confirmed_spend_txid = self.funding_spend_confirmed.or_else(|| { - self.onchain_events_awaiting_threshold_conf.iter().find_map(|event| match event.event { - OnchainEvent::FundingSpendConfirmation { .. } => Some(event.txid), - _ => None, - }) - }); - let confirmed_spend_txid = if let Some(txid) = confirmed_spend_txid { - txid - } else { - return; - }; + let confirmed_spend_info = self.funding_spend_confirmed + .map(|txid| (txid, None)) + .or_else(|| { + self.onchain_events_awaiting_threshold_conf.iter().find_map(|event| match event.event { + OnchainEvent::FundingSpendConfirmation { .. } => Some((event.txid, Some(event.height))), + _ => None, + }) + }); + let (confirmed_spend_txid, confirmed_spend_height) = + if let Some((txid, height)) = confirmed_spend_info { + (txid, height) + } else { + return; + }; // If the channel is force closed, try to claim the output from this preimage. // First check if a counterparty commitment transaction has been broadcasted: macro_rules! claim_htlcs { ($commitment_number: expr, $txid: expr, $htlcs: expr) => { - let (htlc_claim_reqs, _) = self.get_counterparty_output_claim_info($commitment_number, $txid, None, $htlcs); + let (htlc_claim_reqs, _) = self.get_counterparty_output_claim_info($commitment_number, $txid, None, $htlcs, confirmed_spend_height); let conf_target = self.closure_conf_target(); self.onchain_tx_handler.update_claims_view_from_requests(htlc_claim_reqs, self.best_block.height, self.best_block.height, broadcaster, conf_target, fee_estimator, logger); } @@ -3542,7 +3545,7 @@ impl ChannelMonitorImpl { // First, process non-htlc outputs (to_holder & to_counterparty) for (idx, outp) in tx.output.iter().enumerate() { if outp.script_pubkey == revokeable_p2wsh { - let revk_outp = RevokedOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, outp.value, self.counterparty_commitment_params.on_counterparty_tx_csv, self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx()); + let revk_outp = RevokedOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, outp.value, self.counterparty_commitment_params.on_counterparty_tx_csv, self.onchain_tx_handler.channel_type_features().supports_anchors_zero_fee_htlc_tx(), height); let justice_package = PackageTemplate::build_package( commitment_txid, idx as u32, PackageSolvingData::RevokedOutput(revk_outp), @@ -3563,7 +3566,7 @@ impl ChannelMonitorImpl { // per_commitment_data is corrupt or our commitment signing key leaked! return (claimable_outpoints, to_counterparty_output_info); } - let revk_htlc_outp = RevokedHTLCOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, htlc.amount_msat / 1000, htlc.clone(), &self.onchain_tx_handler.channel_transaction_parameters.channel_type_features); + let revk_htlc_outp = RevokedHTLCOutput::build(per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, htlc.amount_msat / 1000, htlc.clone(), &self.onchain_tx_handler.channel_transaction_parameters.channel_type_features, height); let counterparty_spendable_height = if htlc.offered { htlc.cltv_expiry } else { @@ -3617,7 +3620,7 @@ impl ChannelMonitorImpl { (htlc, htlc_source.as_ref().map(|htlc_source| htlc_source.as_ref())) ), logger); let (htlc_claim_reqs, counterparty_output_info) = - self.get_counterparty_output_claim_info(commitment_number, commitment_txid, Some(tx), per_commitment_option); + self.get_counterparty_output_claim_info(commitment_number, commitment_txid, Some(tx), per_commitment_option, Some(height)); to_counterparty_output_info = counterparty_output_info; for req in htlc_claim_reqs { claimable_outpoints.push(req); @@ -3628,7 +3631,7 @@ impl ChannelMonitorImpl { } /// Returns the HTLC claim package templates and the counterparty output info - fn get_counterparty_output_claim_info(&self, commitment_number: u64, commitment_txid: Txid, tx: Option<&Transaction>, per_commitment_option: Option<&Vec<(HTLCOutputInCommitment, Option>)>>) + fn get_counterparty_output_claim_info(&self, commitment_number: u64, commitment_txid: Txid, tx: Option<&Transaction>, per_commitment_option: Option<&Vec<(HTLCOutputInCommitment, Option>)>>, confirmation_height: Option) -> (Vec, CommitmentTxCounterpartyOutputInfo) { let mut claimable_outpoints = Vec::new(); let mut to_counterparty_output_info: CommitmentTxCounterpartyOutputInfo = None; @@ -3688,13 +3691,15 @@ impl ChannelMonitorImpl { CounterpartyOfferedHTLCOutput::build(*per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, - preimage.unwrap(), htlc.clone(), self.onchain_tx_handler.channel_type_features().clone())) + preimage.unwrap(), htlc.clone(), self.onchain_tx_handler.channel_type_features().clone(), + confirmation_height)) } else { PackageSolvingData::CounterpartyReceivedHTLCOutput( CounterpartyReceivedHTLCOutput::build(*per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, - htlc.clone(), self.onchain_tx_handler.channel_type_features().clone())) + htlc.clone(), self.onchain_tx_handler.channel_type_features().clone(), + confirmation_height)) }; let counterparty_package = PackageTemplate::build_package(commitment_txid, transaction_output_index, counterparty_htlc_outp, htlc.cltv_expiry); claimable_outpoints.push(counterparty_package); @@ -3736,7 +3741,7 @@ impl ChannelMonitorImpl { per_commitment_point, self.counterparty_commitment_params.counterparty_delayed_payment_base_key, self.counterparty_commitment_params.counterparty_htlc_base_key, per_commitment_key, tx.output[idx].value, self.counterparty_commitment_params.on_counterparty_tx_csv, - false + false, height, ); let justice_package = PackageTemplate::build_package( htlc_txid, idx as u32, PackageSolvingData::RevokedOutput(revk_outp), @@ -3765,7 +3770,7 @@ impl ChannelMonitorImpl { if let Some(transaction_output_index) = htlc.transaction_output_index { let (htlc_output, counterparty_spendable_height) = if htlc.offered { let htlc_output = HolderHTLCOutput::build_offered( - htlc.amount_msat, htlc.cltv_expiry, self.onchain_tx_handler.channel_type_features().clone() + htlc.amount_msat, htlc.cltv_expiry, self.onchain_tx_handler.channel_type_features().clone(), conf_height ); (htlc_output, conf_height) } else { @@ -3776,7 +3781,7 @@ impl ChannelMonitorImpl { continue; }; let htlc_output = HolderHTLCOutput::build_accepted( - payment_preimage, htlc.amount_msat, self.onchain_tx_handler.channel_type_features().clone() + payment_preimage, htlc.amount_msat, self.onchain_tx_handler.channel_type_features().clone(), conf_height ); (htlc_output, htlc.cltv_expiry) }; diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 9253975de42..38385e6f4fe 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -1429,6 +1429,7 @@ mod tests { htlc.amount_msat, htlc.cltv_expiry, ChannelTypeFeatures::only_static_remote_key(), + 0, )), 0, )); diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index bd6912c21f8..90386ba194e 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -122,7 +122,7 @@ const HIGH_FREQUENCY_BUMP_INTERVAL: u32 = 1; /// /// CSV and pubkeys are used as part of a witnessScript redeeming a balance output, amount is used /// as part of the signature hash and revocation secret to generate a satisfying witness. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct RevokedOutput { per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, @@ -132,10 +132,12 @@ pub(crate) struct RevokedOutput { amount: Amount, on_counterparty_tx_csv: u16, is_counterparty_balance_on_anchors: Option<()>, + // Added in LDK 0.1.4/0.2 and always set since. + outpoint_confirmation_height: Option, } impl RevokedOutput { - pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, per_commitment_key: SecretKey, amount: Amount, on_counterparty_tx_csv: u16, is_counterparty_balance_on_anchors: bool) -> Self { + pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, per_commitment_key: SecretKey, amount: Amount, on_counterparty_tx_csv: u16, is_counterparty_balance_on_anchors: bool, outpoint_confirmation_height: u32) -> Self { RevokedOutput { per_commitment_point, counterparty_delayed_payment_base_key, @@ -144,13 +146,15 @@ impl RevokedOutput { weight: WEIGHT_REVOKED_OUTPUT, amount, on_counterparty_tx_csv, - is_counterparty_balance_on_anchors: if is_counterparty_balance_on_anchors { Some(()) } else { None } + is_counterparty_balance_on_anchors: if is_counterparty_balance_on_anchors { Some(()) } else { None }, + outpoint_confirmation_height: Some(outpoint_confirmation_height), } } } impl_writeable_tlv_based!(RevokedOutput, { (0, per_commitment_point, required), + (1, outpoint_confirmation_height, option), // Added in 0.1.4/0.2 and always set (2, counterparty_delayed_payment_base_key, required), (4, counterparty_htlc_base_key, required), (6, per_commitment_key, required), @@ -168,7 +172,7 @@ impl_writeable_tlv_based!(RevokedOutput, { /// /// CSV is used as part of a witnessScript redeeming a balance output, amount is used as part /// of the signature hash and revocation secret to generate a satisfying witness. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct RevokedHTLCOutput { per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, @@ -177,10 +181,12 @@ pub(crate) struct RevokedHTLCOutput { weight: u64, amount: u64, htlc: HTLCOutputInCommitment, + // Added in LDK 0.1.4/0.2 and always set since. + outpoint_confirmation_height: Option, } impl RevokedHTLCOutput { - pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, per_commitment_key: SecretKey, amount: u64, htlc: HTLCOutputInCommitment, channel_type_features: &ChannelTypeFeatures) -> Self { + pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, per_commitment_key: SecretKey, amount: u64, htlc: HTLCOutputInCommitment, channel_type_features: &ChannelTypeFeatures, outpoint_confirmation_height: u32) -> Self { let weight = if htlc.offered { weight_revoked_offered_htlc(channel_type_features) } else { weight_revoked_received_htlc(channel_type_features) }; RevokedHTLCOutput { per_commitment_point, @@ -189,13 +195,15 @@ impl RevokedHTLCOutput { per_commitment_key, weight, amount, - htlc + htlc, + outpoint_confirmation_height: Some(outpoint_confirmation_height), } } } impl_writeable_tlv_based!(RevokedHTLCOutput, { (0, per_commitment_point, required), + (1, outpoint_confirmation_height, option), // Added in 0.1.4/0.2 and always set (2, counterparty_delayed_payment_base_key, required), (4, counterparty_htlc_base_key, required), (6, per_commitment_key, required), @@ -212,7 +220,7 @@ impl_writeable_tlv_based!(RevokedHTLCOutput, { /// The preimage is used as part of the witness. /// /// Note that on upgrades, some features of existing outputs may be missed. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct CounterpartyOfferedHTLCOutput { per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, @@ -220,10 +228,12 @@ pub(crate) struct CounterpartyOfferedHTLCOutput { preimage: PaymentPreimage, htlc: HTLCOutputInCommitment, channel_type_features: ChannelTypeFeatures, + // Added in LDK 0.1.4/0.2 and always set since. + outpoint_confirmation_height: Option, } impl CounterpartyOfferedHTLCOutput { - pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, preimage: PaymentPreimage, htlc: HTLCOutputInCommitment, channel_type_features: ChannelTypeFeatures) -> Self { + pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, preimage: PaymentPreimage, htlc: HTLCOutputInCommitment, channel_type_features: ChannelTypeFeatures, outpoint_confirmation_height: Option) -> Self { CounterpartyOfferedHTLCOutput { per_commitment_point, counterparty_delayed_payment_base_key, @@ -231,6 +241,7 @@ impl CounterpartyOfferedHTLCOutput { preimage, htlc, channel_type_features, + outpoint_confirmation_height, } } } @@ -240,6 +251,7 @@ impl Writeable for CounterpartyOfferedHTLCOutput { let legacy_deserialization_prevention_marker = chan_utils::legacy_deserialization_prevention_marker_for_channel_type_features(&self.channel_type_features); write_tlv_fields!(writer, { (0, self.per_commitment_point, required), + (1, self.outpoint_confirmation_height, option), // Added in 0.1.4/0.2, not always set (2, self.counterparty_delayed_payment_base_key, required), (4, self.counterparty_htlc_base_key, required), (6, self.preimage, required), @@ -260,9 +272,11 @@ impl Readable for CounterpartyOfferedHTLCOutput { let mut htlc = RequiredWrapper(None); let mut _legacy_deserialization_prevention_marker: Option<()> = None; let mut channel_type_features = None; + let mut outpoint_confirmation_height = None; read_tlv_fields!(reader, { (0, per_commitment_point, required), + (1, outpoint_confirmation_height, option), // Added in 0.1.4/0.2, not always set (2, counterparty_delayed_payment_base_key, required), (4, counterparty_htlc_base_key, required), (6, preimage, required), @@ -279,7 +293,8 @@ impl Readable for CounterpartyOfferedHTLCOutput { counterparty_htlc_base_key: counterparty_htlc_base_key.0.unwrap(), preimage: preimage.0.unwrap(), htlc: htlc.0.unwrap(), - channel_type_features: channel_type_features.unwrap_or(ChannelTypeFeatures::only_static_remote_key()) + channel_type_features: channel_type_features.unwrap_or(ChannelTypeFeatures::only_static_remote_key()), + outpoint_confirmation_height, }) } } @@ -290,23 +305,25 @@ impl Readable for CounterpartyOfferedHTLCOutput { /// witnessScript. /// /// Note that on upgrades, some features of existing outputs may be missed. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct CounterpartyReceivedHTLCOutput { per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, htlc: HTLCOutputInCommitment, channel_type_features: ChannelTypeFeatures, + outpoint_confirmation_height: Option, } impl CounterpartyReceivedHTLCOutput { - pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, htlc: HTLCOutputInCommitment, channel_type_features: ChannelTypeFeatures) -> Self { + pub(crate) fn build(per_commitment_point: PublicKey, counterparty_delayed_payment_base_key: DelayedPaymentBasepoint, counterparty_htlc_base_key: HtlcBasepoint, htlc: HTLCOutputInCommitment, channel_type_features: ChannelTypeFeatures, outpoint_confirmation_height: Option) -> Self { CounterpartyReceivedHTLCOutput { per_commitment_point, counterparty_delayed_payment_base_key, counterparty_htlc_base_key, htlc, - channel_type_features + channel_type_features, + outpoint_confirmation_height, } } } @@ -316,6 +333,7 @@ impl Writeable for CounterpartyReceivedHTLCOutput { let legacy_deserialization_prevention_marker = chan_utils::legacy_deserialization_prevention_marker_for_channel_type_features(&self.channel_type_features); write_tlv_fields!(writer, { (0, self.per_commitment_point, required), + (1, self.outpoint_confirmation_height, option), // Added in 0.1.4/0.2, not always set (2, self.counterparty_delayed_payment_base_key, required), (4, self.counterparty_htlc_base_key, required), (6, self.htlc, required), @@ -334,9 +352,11 @@ impl Readable for CounterpartyReceivedHTLCOutput { let mut htlc = RequiredWrapper(None); let mut _legacy_deserialization_prevention_marker: Option<()> = None; let mut channel_type_features = None; + let mut outpoint_confirmation_height = None; read_tlv_fields!(reader, { (0, per_commitment_point, required), + (1, outpoint_confirmation_height, option), // Added in 0.1.4/0.2, not always set (2, counterparty_delayed_payment_base_key, required), (4, counterparty_htlc_base_key, required), (6, htlc, required), @@ -351,7 +371,8 @@ impl Readable for CounterpartyReceivedHTLCOutput { counterparty_delayed_payment_base_key: counterparty_delayed_payment_base_key.0.unwrap(), counterparty_htlc_base_key: counterparty_htlc_base_key.0.unwrap(), htlc: htlc.0.unwrap(), - channel_type_features: channel_type_features.unwrap_or(ChannelTypeFeatures::only_static_remote_key()) + channel_type_features: channel_type_features.unwrap_or(ChannelTypeFeatures::only_static_remote_key()), + outpoint_confirmation_height, }) } } @@ -362,31 +383,34 @@ impl Readable for CounterpartyReceivedHTLCOutput { /// Preimage is only included as part of the witness in former case. /// /// Note that on upgrades, some features of existing outputs may be missed. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct HolderHTLCOutput { preimage: Option, amount_msat: u64, /// Defaults to 0 for HTLC-Success transactions, which have no expiry cltv_expiry: u32, channel_type_features: ChannelTypeFeatures, + outpoint_confirmation_height: Option, } impl HolderHTLCOutput { - pub(crate) fn build_offered(amount_msat: u64, cltv_expiry: u32, channel_type_features: ChannelTypeFeatures) -> Self { + pub(crate) fn build_offered(amount_msat: u64, cltv_expiry: u32, channel_type_features: ChannelTypeFeatures, outpoint_confirmation_height: u32) -> Self { HolderHTLCOutput { preimage: None, amount_msat, cltv_expiry, channel_type_features, + outpoint_confirmation_height: Some(outpoint_confirmation_height), } } - pub(crate) fn build_accepted(preimage: PaymentPreimage, amount_msat: u64, channel_type_features: ChannelTypeFeatures) -> Self { + pub(crate) fn build_accepted(preimage: PaymentPreimage, amount_msat: u64, channel_type_features: ChannelTypeFeatures, outpoint_confirmation_height: u32) -> Self { HolderHTLCOutput { preimage: Some(preimage), amount_msat, cltv_expiry: 0, channel_type_features, + outpoint_confirmation_height: Some(outpoint_confirmation_height), } } } @@ -396,6 +420,7 @@ impl Writeable for HolderHTLCOutput { let legacy_deserialization_prevention_marker = chan_utils::legacy_deserialization_prevention_marker_for_channel_type_features(&self.channel_type_features); write_tlv_fields!(writer, { (0, self.amount_msat, required), + (1, self.outpoint_confirmation_height, option), // Added in 0.1.4/0.2 and always set (2, self.cltv_expiry, required), (4, self.preimage, option), (6, legacy_deserialization_prevention_marker, option), @@ -412,9 +437,11 @@ impl Readable for HolderHTLCOutput { let mut preimage = None; let mut _legacy_deserialization_prevention_marker: Option<()> = None; let mut channel_type_features = None; + let mut outpoint_confirmation_height = None; read_tlv_fields!(reader, { (0, amount_msat, required), + (1, outpoint_confirmation_height, option), // Added in 0.1.4/0.2 and always set (2, cltv_expiry, required), (4, preimage, option), (6, _legacy_deserialization_prevention_marker, option), @@ -427,7 +454,8 @@ impl Readable for HolderHTLCOutput { amount_msat: amount_msat.0.unwrap(), cltv_expiry: cltv_expiry.0.unwrap(), preimage, - channel_type_features: channel_type_features.unwrap_or(ChannelTypeFeatures::only_static_remote_key()) + channel_type_features: channel_type_features.unwrap_or(ChannelTypeFeatures::only_static_remote_key()), + outpoint_confirmation_height, }) } } @@ -437,7 +465,7 @@ impl Readable for HolderHTLCOutput { /// witnessScript is used as part of the witness redeeming the funding utxo. /// /// Note that on upgrades, some features of existing outputs may be missed. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct HolderFundingOutput { funding_redeemscript: ScriptBuf, pub(crate) funding_amount: Option, @@ -496,7 +524,7 @@ impl Readable for HolderFundingOutput { /// /// The generic API offers access to an outputs common attributes or allow transformation such as /// finalizing an input claiming the output. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub(crate) enum PackageSolvingData { RevokedOutput(RevokedOutput), RevokedHTLCOutput(RevokedHTLCOutput), @@ -737,7 +765,7 @@ impl_writeable_tlv_based_enum_legacy!(PackageSolvingData, ; /// That way we avoid claiming in too many discrete transactions while also avoiding /// unnecessarily exposing ourselves to pinning attacks or delaying claims when we could have /// claimed at least part of the available outputs quickly and without risk. -#[derive(Copy, Clone, PartialEq, Eq)] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] enum AggregationCluster { /// Our counterparty can potentially claim this output. Pinnable, @@ -748,7 +776,7 @@ enum AggregationCluster { /// A malleable package might be aggregated with other packages to save on fees. /// A untractable package has been counter-signed and aggregable will break cached counterparty signatures. -#[derive(Copy, Clone, PartialEq, Eq)] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] enum PackageMalleability { Malleable(AggregationCluster), Untractable, @@ -763,7 +791,7 @@ enum PackageMalleability { /// /// As packages are time-sensitive, we fee-bump and rebroadcast them at scheduled intervals. /// Failing to confirm a package translate as a loss of funds for the user. -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct PackageTemplate { // List of onchain outputs and solving data to generate satisfying witnesses. inputs: Vec<(BitcoinOutPoint, PackageSolvingData)>, @@ -1394,7 +1422,7 @@ mod tests { let secp_ctx = Secp256k1::new(); let dumb_scalar = SecretKey::from_slice(&>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap(); let dumb_point = PublicKey::from_secret_key(&secp_ctx, &dumb_scalar); - PackageSolvingData::RevokedOutput(RevokedOutput::build(dumb_point, DelayedPaymentBasepoint::from(dumb_point), HtlcBasepoint::from(dumb_point), dumb_scalar, Amount::ZERO, 0, $is_counterparty_balance_on_anchors)) + PackageSolvingData::RevokedOutput(RevokedOutput::build(dumb_point, DelayedPaymentBasepoint::from(dumb_point), HtlcBasepoint::from(dumb_point), dumb_scalar, Amount::ZERO, 0, $is_counterparty_balance_on_anchors, 0)) } } } @@ -1407,7 +1435,7 @@ mod tests { let dumb_point = PublicKey::from_secret_key(&secp_ctx, &dumb_scalar); let hash = PaymentHash([1; 32]); let htlc = HTLCOutputInCommitment { offered: false, amount_msat: 1_000_000, cltv_expiry: 0, payment_hash: hash, transaction_output_index: None }; - PackageSolvingData::RevokedHTLCOutput(RevokedHTLCOutput::build(dumb_point, DelayedPaymentBasepoint::from(dumb_point), HtlcBasepoint::from(dumb_point), dumb_scalar, 1_000_000 / 1_000, htlc, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies())) + PackageSolvingData::RevokedHTLCOutput(RevokedHTLCOutput::build(dumb_point, DelayedPaymentBasepoint::from(dumb_point), HtlcBasepoint::from(dumb_point), dumb_scalar, 1_000_000 / 1_000, htlc, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), 0)) } } } @@ -1420,7 +1448,7 @@ mod tests { let dumb_point = PublicKey::from_secret_key(&secp_ctx, &dumb_scalar); let hash = PaymentHash([1; 32]); let htlc = HTLCOutputInCommitment { offered: true, amount_msat: $amt, cltv_expiry: $expiry, payment_hash: hash, transaction_output_index: None }; - PackageSolvingData::CounterpartyReceivedHTLCOutput(CounterpartyReceivedHTLCOutput::build(dumb_point, DelayedPaymentBasepoint::from(dumb_point), HtlcBasepoint::from(dumb_point), htlc, $features)) + PackageSolvingData::CounterpartyReceivedHTLCOutput(CounterpartyReceivedHTLCOutput::build(dumb_point, DelayedPaymentBasepoint::from(dumb_point), HtlcBasepoint::from(dumb_point), htlc, $features, None)) } } } @@ -1434,7 +1462,7 @@ mod tests { let hash = PaymentHash([1; 32]); let preimage = PaymentPreimage([2;32]); let htlc = HTLCOutputInCommitment { offered: false, amount_msat: $amt, cltv_expiry: 0, payment_hash: hash, transaction_output_index: None }; - PackageSolvingData::CounterpartyOfferedHTLCOutput(CounterpartyOfferedHTLCOutput::build(dumb_point, DelayedPaymentBasepoint::from(dumb_point), HtlcBasepoint::from(dumb_point), preimage, htlc, $features)) + PackageSolvingData::CounterpartyOfferedHTLCOutput(CounterpartyOfferedHTLCOutput::build(dumb_point, DelayedPaymentBasepoint::from(dumb_point), HtlcBasepoint::from(dumb_point), preimage, htlc, $features, None)) } } } @@ -1443,7 +1471,7 @@ mod tests { ($features: expr) => { { let preimage = PaymentPreimage([2;32]); - PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build_accepted(preimage, 0, $features)) + PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build_accepted(preimage, 0, $features, 0)) } } } @@ -1451,7 +1479,7 @@ mod tests { macro_rules! dumb_offered_htlc_output { ($cltv_expiry: expr, $features: expr) => { { - PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build_offered(0, $cltv_expiry, $features)) + PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build_offered(0, $cltv_expiry, $features, 0)) } } } From 3463a0cd124794de8fd55c38fabbfd9bc4fa1c58 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 10 Jul 2025 17:04:07 +0000 Subject: [PATCH 096/105] Use outpoint creation height when restoring locktimed packages When we have an outpoint to claim which is lock-timed and the locktime is reached, we add it to `OnchainTxHandler::claimable_outpoints` to indicate the outpoint is now being claimed. However, `claimable_outpoints` is supposed to track when the outpoint first appeared on chain so that we can remove the claim if the outpoint is reorged out. Sadly, in the handling for lock-timed packages, we incorrectly stored the current height in `claimable_outpoints`, causing such claims to be removed in case of a reorg right after they were generated, even if the output we intend to claim isn't removed at all. Here we use the creation-height tracking added in the previous commit to actually address the issue, using the tracked height when adding a claim to `OnchainTxHandler::claimable_outpoints`. In cases where we have no information, we continue to use the current height, retaining the issue for locktimed packages on upgrades, but this simplifies cases where we actually don't have the information available anyway. Trivial conflicts resolved in: * lightning/src/chain/package.rs --- lightning/src/chain/onchaintx.rs | 7 ++++--- lightning/src/chain/package.rs | 35 ++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 3 deletions(-) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 38385e6f4fe..ae221c1c61d 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -865,9 +865,10 @@ impl OnchainTxHandler { // Because fuzzing can cause hash collisions, we can end up with conflicting claim // ids here, so we only assert when not fuzzing. debug_assert!(cfg!(fuzzing) || self.pending_claim_requests.get(&claim_id).is_none()); - for k in req.outpoints() { - log_info!(logger, "Registering claiming request for {}:{}", k.txid, k.vout); - self.claimable_outpoints.insert(k.clone(), (claim_id, conf_height)); + for (k, outpoint_confirmation_height) in req.outpoints_and_creation_heights() { + let creation_height = outpoint_confirmation_height.unwrap_or(conf_height); + log_info!(logger, "Registering claiming request for {}:{}, which exists as of height {creation_height}", k.txid, k.vout); + self.claimable_outpoints.insert(k.clone(), (claim_id, creation_height)); } self.pending_claim_requests.insert(claim_id, req); } diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index 90386ba194e..9fe16915be4 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -603,6 +603,35 @@ impl PackageSolvingData { } } + fn input_confirmation_height(&self) -> Option { + match self { + PackageSolvingData::RevokedOutput(RevokedOutput { + outpoint_confirmation_height, + .. + }) + | PackageSolvingData::RevokedHTLCOutput(RevokedHTLCOutput { + outpoint_confirmation_height, + .. + }) + | PackageSolvingData::CounterpartyOfferedHTLCOutput(CounterpartyOfferedHTLCOutput { + outpoint_confirmation_height, + .. + }) + | PackageSolvingData::CounterpartyReceivedHTLCOutput( + CounterpartyReceivedHTLCOutput { outpoint_confirmation_height, .. }, + ) + | PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput { + outpoint_confirmation_height, + .. + }) => *outpoint_confirmation_height, + // We don't bother to track `HolderFundingOutput`'s creation height as its the funding + // transaction itself and we build `HolderFundingOutput`s before we actually get the + // commitment transaction confirmed. + PackageSolvingData::HolderFundingOutput(_) => None, + } + } + + #[rustfmt::skip] fn as_tx_input(&self, previous_output: BitcoinOutPoint) -> TxIn { let sequence = match self { PackageSolvingData::RevokedOutput(_) => Sequence::ENABLE_RBF_NO_LOCKTIME, @@ -905,6 +934,12 @@ impl PackageTemplate { pub(crate) fn outpoints(&self) -> Vec<&BitcoinOutPoint> { self.inputs.iter().map(|(o, _)| o).collect() } + pub(crate) fn outpoints_and_creation_heights( + &self, + ) -> impl Iterator)> { + self.inputs.iter().map(|(o, p)| (o, p.input_confirmation_height())) + } + pub(crate) fn inputs(&self) -> impl ExactSizeIterator { self.inputs.iter().map(|(_, i)| i) } From a9597aa88031239a830776ee19ada45f5c00baad Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 10 Jul 2025 00:51:51 +0000 Subject: [PATCH 097/105] Add a test case for the issues fixed in the previous few commits This adds a single test which exercises both the ability to prune locktimed packages when inputs are spent as well as the creation-height tracking for locktimed packages. Trivial conflicts resolved in: * lightning/src/ln/reorg_tests.rs --- lightning/src/ln/reorg_tests.rs | 226 +++++++++++++++++++++++++++++++- 1 file changed, 225 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index b1b4f77c590..56760c510a3 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -10,13 +10,14 @@ //! Further functional tests which test blockchain reorganizations. use crate::chain::chaininterface::LowerBoundedFeeEstimator; -use crate::chain::channelmonitor::{ANTI_REORG_DELAY, LATENCY_GRACE_PERIOD_BLOCKS}; +use crate::chain::channelmonitor::{ANTI_REORG_DELAY, Balance, LATENCY_GRACE_PERIOD_BLOCKS}; use crate::chain::transaction::OutPoint; use crate::chain::Confirm; use crate::events::{Event, MessageSendEventsProvider, ClosureReason, HTLCDestination, MessageSendEvent}; use crate::ln::msgs::{ChannelMessageHandler, Init}; use crate::ln::types::ChannelId; use crate::sign::OutputSpender; +use crate::types::payment::PaymentHash; use crate::util::ser::Writeable; use crate::util::string::UntrustedString; @@ -897,3 +898,226 @@ fn test_retries_own_commitment_broadcast_after_reorg() { do_test_retries_own_commitment_broadcast_after_reorg(true, false); do_test_retries_own_commitment_broadcast_after_reorg(true, true); } + +fn do_test_split_htlc_expiry_tracking(use_third_htlc: bool, reorg_out: bool) { + // Previously, we had a bug where if there were two HTLCs which expired at different heights, + // and a counterparty commitment transaction confirmed spending both of them, we'd continually + // rebroadcast attempted HTLC claims against the higher-expiry HTLC forever. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + + // This test relies on being able to consolidate HTLC claims into a single transaction, which + // requires anchors: + let mut config = test_default_channel_config(); + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + config.manually_accept_inbound_channels = true; + + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let coinbase_tx = provide_anchor_reserves(&nodes); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let (_, _, chan_id, funding_tx) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0); + + // Route two non-dust HTLCs with different expiry, with a third having the same expiry as the + // second if `use_third_htlc` is set. + let (preimage_a, payment_hash_a, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000_000); + connect_blocks(&nodes[0], 2); + connect_blocks(&nodes[1], 2); + let (preimage_b, payment_hash_b, ..) = route_payment(&nodes[0], &[&nodes[1]], 100_000_000); + let payment_hash_c = if use_third_htlc { + route_payment(&nodes[0], &[&nodes[1]], 100_000_000).1 + } else { + PaymentHash([0; 32]) + }; + + // First disconnect peers so that we don't have to deal with messages: + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); + + // Give node B preimages so that it will claim the first two HTLCs on-chain. + nodes[1].node.claim_funds(preimage_a); + expect_payment_claimed!(nodes[1], payment_hash_a, 100_000_000); + nodes[1].node.claim_funds(preimage_b); + expect_payment_claimed!(nodes[1], payment_hash_b, 100_000_000); + check_added_monitors(&nodes[1], 2); + + let err = "Channel force-closed".to_string(); + + // Force-close and fetch node B's commitment transaction and the transaction claiming the first + // two HTLCs. + nodes[1].node.force_close_broadcasting_latest_txn(&chan_id, &node_a_id, err).unwrap(); + check_closed_broadcast(&nodes[1], 1, true); + check_added_monitors(&nodes[1], 1); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 10_000_000); + + let mut txn = nodes[1].tx_broadcaster.txn_broadcast(); + assert_eq!(txn.len(), 1); + let commitment_tx = txn.pop().unwrap(); + check_spends!(commitment_tx, funding_tx); + + mine_transaction(&nodes[0], &commitment_tx); + check_closed_broadcast(&nodes[0], 1, true); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 10_000_000); + check_added_monitors(&nodes[0], 1); + + mine_transaction(&nodes[1], &commitment_tx); + let mut bump_events = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events(); + assert_eq!(bump_events.len(), 1); + match bump_events.pop().unwrap() { + Event::BumpTransaction(bump_event) => { + nodes[1].bump_tx_handler.handle_event(&bump_event); + }, + ev => panic!("Unexpected event {ev:?}"), + } + + let mut txn = nodes[1].tx_broadcaster.txn_broadcast(); + if nodes[1].connect_style.borrow().updates_best_block_first() { + assert_eq!(txn.len(), 2, "{txn:?}"); + check_spends!(txn[0], funding_tx); + } else { + assert_eq!(txn.len(), 1, "{txn:?}"); + } + let bs_htlc_spend_tx = txn.pop().unwrap(); + check_spends!(bs_htlc_spend_tx, commitment_tx, coinbase_tx); + + // Now connect blocks until the first HTLC expires + assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0); + connect_blocks(&nodes[0], TEST_FINAL_CLTV - 2); + let mut txn = nodes[0].tx_broadcaster.txn_broadcast(); + assert_eq!(txn.len(), 1); + let as_first_htlc_spend_tx = txn.pop().unwrap(); + check_spends!(as_first_htlc_spend_tx, commitment_tx); + + // But confirm B's dual-HTLC-claim transaction instead. A should now have nothing to broadcast + // as the third HTLC (if there is one) won't expire for another block. + mine_transaction(&nodes[0], &bs_htlc_spend_tx); + let mut txn = nodes[0].tx_broadcaster.txn_broadcast(); + assert_eq!(txn.len(), 0); + + let sent_events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(sent_events.len(), 4, "{sent_events:?}"); + let mut found_expected_events = [false, false, false, false]; + for event in sent_events { + match event { + Event::PaymentSent { payment_hash, .. }|Event::PaymentPathSuccessful { payment_hash: Some(payment_hash), .. } => { + let path_success = matches!(event, Event::PaymentPathSuccessful { .. }); + if payment_hash == payment_hash_a { + found_expected_events[0 + if path_success { 1 } else { 0 }] = true; + } else if payment_hash == payment_hash_b { + found_expected_events[2 + if path_success { 1 } else { 0 }] = true; + } else { + panic!("Wrong payment hash {event:?}"); + } + }, + _ => panic!("Wrong event {event:?}"), + } + } + assert_eq!(found_expected_events, [true, true, true, true]); + + // However if we connect one more block the third HTLC will time out and A should claim it + connect_blocks(&nodes[0], 1); + let mut txn = nodes[0].tx_broadcaster.txn_broadcast(); + if use_third_htlc { + assert_eq!(txn.len(), 1); + let as_third_htlc_spend_tx = txn.pop().unwrap(); + check_spends!(as_third_htlc_spend_tx, commitment_tx); + // Previously, node A would generate a bogus claim here, trying to claim both HTLCs B and C in + // one transaction, so we check that the single input being spent was not already spent in node + // B's HTLC claim transaction. + assert_eq!(as_third_htlc_spend_tx.input.len(), 1, "{as_third_htlc_spend_tx:?}"); + for spent_input in bs_htlc_spend_tx.input.iter() { + let third_htlc_vout = as_third_htlc_spend_tx.input[0].previous_output.vout; + assert_ne!(third_htlc_vout, spent_input.previous_output.vout); + } + + mine_transaction(&nodes[0], &as_third_htlc_spend_tx); + + assert_eq!(&nodes[0].node.get_and_clear_pending_events(), &[]); + } else { + assert_eq!(txn.len(), 0); + // Connect a block so that both cases end with the same height + connect_blocks(&nodes[0], 1); + } + + // At this point all HTLCs have been resolved and no further transactions should be generated. + // We connect blocks until one block before `bs_htlc_spend_tx` reaches `ANTI_REORG_DELAY` + // confirmations. + connect_blocks(&nodes[0], ANTI_REORG_DELAY - 4); + let mut txn = nodes[0].tx_broadcaster.txn_broadcast(); + assert_eq!(txn.len(), 0); + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + + if reorg_out { + // Reorg out bs_htlc_spend_tx, letting node A claim all the HTLCs instead. + disconnect_blocks(&nodes[0], ANTI_REORG_DELAY - 2); + assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0); + + // As soon as bs_htlc_spend_tx is disconnected, node A should consider all HTLCs + // claimable-on-timeout. + disconnect_blocks(&nodes[0], 1); + let balances = nodes[0].chain_monitor.chain_monitor.get_claimable_balances(&[]); + assert_eq!(balances.len(), if use_third_htlc { 3 } else { 2 }); + for balance in balances { + if let Balance::MaybeTimeoutClaimableHTLC { .. } = balance { + } else { + panic!("Unexpected balance {balance:?}"); + } + } + + connect_blocks(&nodes[0], 100); + let txn = nodes[0].tx_broadcaster.txn_broadcast(); + let mut claiming_outpoints = new_hash_set(); + for tx in txn.iter() { + for input in tx.input.iter() { + claiming_outpoints.insert(input.previous_output); + } + } + assert_eq!(claiming_outpoints.len(), if use_third_htlc { 3 } else { 2 }); + } else { + // Connect a final block, which puts `bs_htlc_spend_tx` at `ANTI_REORG_DELAY` and we wipe + // the claimable balances for the first two HTLCs. + connect_blocks(&nodes[0], 1); + let balances = nodes[0].chain_monitor.chain_monitor.get_claimable_balances(&[]); + assert_eq!(balances.len(), if use_third_htlc { 1 } else { 0 }); + + // Connect two more blocks to get `as_third_htlc_spend_tx` to `ANTI_REORG_DELAY` confs. + connect_blocks(&nodes[0], 2); + if use_third_htlc { + let failed_events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(failed_events.len(), 2); + let mut found_expected_events = [false, false]; + for event in failed_events { + match event { + Event::PaymentFailed { payment_hash: Some(payment_hash), .. }|Event::PaymentPathFailed { payment_hash, .. } => { + let path_failed = matches!(event, Event::PaymentPathFailed { .. }); + if payment_hash == payment_hash_c { + found_expected_events[if path_failed { 1 } else { 0 }] = true; + } else { + panic!("Wrong payment hash {event:?}"); + } + }, + _ => panic!("Wrong event {event:?}"), + } + } + assert_eq!(found_expected_events, [true, true]); + } + + // Further, there should be no spendable balances. + assert!(nodes[0].chain_monitor.chain_monitor.get_claimable_balances(&[]).is_empty()); + } +} + +#[test] +fn test_split_htlc_expiry_tracking() { + do_test_split_htlc_expiry_tracking(true, true); + do_test_split_htlc_expiry_tracking(false, true); + do_test_split_htlc_expiry_tracking(true, false); + do_test_split_htlc_expiry_tracking(false, false); +} From 382e71b1d2c659cb569554a630c30d20cf598a57 Mon Sep 17 00:00:00 2001 From: Leo Nash Date: Tue, 15 Jul 2025 12:40:33 -0700 Subject: [PATCH 098/105] Correct non-dust HTLC accounting in `next_remote_commit_tx_fee_msat` `next_remote_commit_tx_fee_msat` previously mistakenly classified HTLCs with values equal to the dust limit as dust. This did not cause any force closes because the code that builds commitment transactions for signing correctly trims dust HTLCs. Nonetheless, this can cause `next_remote_commit_tx_fee_msat` to predict a weight for the next remote commitment transaction that is significantly lower than the eventual weight. This allows a malicious channel funder to create an unbroadcastable commitment for the channel fundee by adding HTLCs with values equal to the dust limit to the commitment transaction; according to the fundee, the funder has not exhausted their reserve because all the added HTLCs are dust, while in reality all the HTLCs are non-dust, and the funder does not have the funds to pay the minimum feerate to enter the mempool. Conflicts resolved in: * lightning/src/ln/htlc_reserve_unit_tests.rs which is a new file upstream. The new test was instead moved to lightning/src/ln/functional_tests.rs and rewritten where the upstream API has changed (in some cases nontrivially). --- lightning/src/ln/channel.rs | 6 +- lightning/src/ln/functional_tests.rs | 290 +++++++++++++++++++++++++++ 2 files changed, 293 insertions(+), 3 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index d23363229f4..16803a45bfd 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -3216,7 +3216,7 @@ impl ChannelContext where SP::Target: SignerProvider { /// Creates a set of keys for build_commitment_transaction to generate a transaction which we /// will sign and send to our counterparty. /// If an Err is returned, it is a ChannelError::Close (for get_funding_created) - fn build_remote_transaction_keys(&self) -> TxCreationKeys { + pub fn build_remote_transaction_keys(&self) -> TxCreationKeys { let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint; let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint; let counterparty_pubkeys = self.get_counterparty_pubkeys(); @@ -3774,14 +3774,14 @@ impl ChannelContext where SP::Target: SignerProvider { // committed outbound HTLCs, see below. let mut included_htlcs = 0; for ref htlc in context.pending_inbound_htlcs.iter() { - if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat { + if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat { continue } included_htlcs += 1; } for ref htlc in context.pending_outbound_htlcs.iter() { - if htlc.amount_msat / 1000 <= real_dust_limit_success_sat { + if htlc.amount_msat / 1000 < real_dust_limit_success_sat { continue } // We only include outbound HTLCs if it will not be included in their next commitment_signed, diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 2535a756c9c..2cbf04a40ff 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -11684,3 +11684,293 @@ fn test_funding_signed_event() { nodes[1].node.get_and_clear_pending_msg_events(); } +#[test] +pub fn test_dust_limit_fee_accounting() { + do_test_dust_limit_fee_accounting(false); + do_test_dust_limit_fee_accounting(true); +} + +pub fn do_test_dust_limit_fee_accounting(can_afford: bool) { + // Test that when a channel funder sends HTLCs exactly on the dust limit + // of the funder, the fundee correctly accounts for the additional fee on the + // funder's commitment transaction due to those additional non-dust HTLCs when + // checking for any infrigements on the funder's reserve. + + let channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(); + + let chanmon_cfgs = create_chanmon_cfgs(2); + + let mut default_config = test_default_channel_config(); + default_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + default_config.manually_accept_inbound_channels = true; + + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(default_config.clone()), Some(default_config)]); + + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + + // Set a HTLC amount that is equal to the dust limit of the funder + const HTLC_AMT_SAT: u64 = 354; + + const CHANNEL_VALUE_SAT: u64 = 100_000; + + const FEERATE_PER_KW: u32 = 253; + + let commit_tx_fee_sat = + chan_utils::commit_tx_fee_sat(FEERATE_PER_KW, MIN_AFFORDABLE_HTLC_COUNT, &channel_type); + + // By default the reserve is set to 1% or 1000sat, whichever is higher + let channel_reserve_satoshis = 1_000; + + // Set node 0's balance to pay for exactly MIN_AFFORDABLE_HTLC_COUNT non-dust HTLCs on the channel, minus some offset + let node_0_balance_sat = commit_tx_fee_sat + + channel_reserve_satoshis + + 2 * crate::ln::channel::ANCHOR_OUTPUT_VALUE_SATOSHI + + MIN_AFFORDABLE_HTLC_COUNT as u64 * HTLC_AMT_SAT + - if can_afford { 0 } else { 1 }; + let mut node_1_balance_sat = CHANNEL_VALUE_SAT - node_0_balance_sat; + + let chan_id = create_chan_between_nodes_with_value( + &nodes[0], + &nodes[1], + CHANNEL_VALUE_SAT, + node_1_balance_sat * 1000, + ) + .3; + + { + // Double check the reserve that node 0 has to maintain here + let per_peer_state_lock; + let mut peer_state_lock; + let chan = + get_channel_ref!(nodes[1], nodes[0], per_peer_state_lock, peer_state_lock, chan_id); + assert_eq!( + chan.context().holder_selected_channel_reserve_satoshis, + channel_reserve_satoshis + ); + } + { + // Double check the dust limit on node 0's commitment transactions; when node 0 + // adds a HTLC, node 1 will check that the fee on node 0's commitment transaction + // does not dip under the node 1 selected reserve. + let per_peer_state_lock; + let mut peer_state_lock; + let chan = + get_channel_ref!(nodes[0], nodes[1], per_peer_state_lock, peer_state_lock, chan_id); + assert_eq!(chan.context().holder_dust_limit_satoshis, HTLC_AMT_SAT); + } + + // Precompute the route to skip any router complaints when sending the last HTLC + let (route_0_1, payment_hash_0_1, _, payment_secret_0_1) = + get_route_and_payment_hash!(nodes[0], nodes[1], HTLC_AMT_SAT * 1000); + + let mut htlcs = Vec::new(); + for _ in 0..MIN_AFFORDABLE_HTLC_COUNT - 1 { + let (_payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1]], HTLC_AMT_SAT * 1000); + // Grab a snapshot of these HTLCs to manually build the commitment transaction later... + let accepted_htlc = chan_utils::HTLCOutputInCommitment { + offered: false, + amount_msat: HTLC_AMT_SAT * 1000, + // Hard-coded to match the expected value + cltv_expiry: 81, + payment_hash, + transaction_output_index: None, + }; + htlcs.push((accepted_htlc, ())); + } + + // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc() + let secp_ctx = Secp256k1::new(); + let session_priv = SecretKey::from_slice(&[42; 32]).expect("RNG is bad!"); + + let cur_height = nodes[1].node.best_block.read().unwrap().height + 1; + + let onion_keys = + onion_utils::construct_onion_keys(&secp_ctx, &route_0_1.paths[0], &session_priv).unwrap(); + let recipient_onion_fields = RecipientOnionFields::secret_only(payment_secret_0_1); + let (onion_payloads, amount_msat, cltv_expiry) = onion_utils::build_onion_payloads( + &route_0_1.paths[0], + HTLC_AMT_SAT * 1000, + &recipient_onion_fields, + cur_height, + &None, + None, + ) + .unwrap(); + let onion_routing_packet = + onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash_0_1) + .unwrap(); + // Double check the hard-coded value + assert_eq!(cltv_expiry, 81); + let msg = msgs::UpdateAddHTLC { + channel_id: chan_id, + htlc_id: MIN_AFFORDABLE_HTLC_COUNT as u64 - 1, + amount_msat, + payment_hash: payment_hash_0_1, + cltv_expiry, + onion_routing_packet, + skimmed_fee_msat: None, + blinding_point: None, + }; + + nodes[1].node.handle_update_add_htlc(node_a_id, &msg); + + if !can_afford { + let err = "Remote HTLC add would put them under remote reserve value".to_string(); + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", &err, 3); + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + let reason = ClosureReason::ProcessingError { err }; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], CHANNEL_VALUE_SAT); + check_added_monitors(&nodes[1], 1); + } else { + // Now manually create the commitment_signed message corresponding to the update_add + // nodes[0] just sent. In the code for construction of this message, "local" refers + // to the sender of the message, and "remote" refers to the receiver. + + const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; + + let (local_secret, next_local_point) = { + let per_peer_lock; + let mut peer_state_lock; + + let channel = + get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_id); + let local_chan = if let ChannelPhase::Funded(chan) = &*channel { + chan + } else { + panic!(); + }; + let chan_signer = local_chan.get_signer(); + // Make the signer believe we validated another commitment, so we can release the secret + chan_signer.as_ecdsa().unwrap().get_enforcement_state().last_holder_commitment -= 1; + + ( + chan_signer + .as_ref() + .release_commitment_secret( + INITIAL_COMMITMENT_NUMBER - MIN_AFFORDABLE_HTLC_COUNT as u64 + 1, + ) + .unwrap(), + chan_signer + .as_ref() + .get_per_commitment_point( + INITIAL_COMMITMENT_NUMBER - MIN_AFFORDABLE_HTLC_COUNT as u64, + &secp_ctx, + ) + .unwrap(), + ) + }; + + // Build the remote commitment transaction so we can sign it, and then later use the + // signature for the commitment_signed message. + let local_chan_balance = node_0_balance_sat + - HTLC_AMT_SAT * MIN_AFFORDABLE_HTLC_COUNT as u64 + - 2 * crate::ln::channel::ANCHOR_OUTPUT_VALUE_SATOSHI + - chan_utils::commit_tx_fee_sat( + FEERATE_PER_KW, + MIN_AFFORDABLE_HTLC_COUNT, + &channel_type, + ); + + let accepted_htlc_info = chan_utils::HTLCOutputInCommitment { + offered: false, + amount_msat: HTLC_AMT_SAT * 1000, + cltv_expiry, + payment_hash: payment_hash_0_1, + transaction_output_index: None, + }; + htlcs.push((accepted_htlc_info, ())); + + let commitment_number = INITIAL_COMMITMENT_NUMBER - MIN_AFFORDABLE_HTLC_COUNT as u64; + + let res = { + let per_peer_lock; + let mut peer_state_lock; + + let channel = + get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan_id); + let chan_signer = if let ChannelPhase::Funded(chan) = &*channel { + chan.get_signer() + } else { + panic!(); + }; + + let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data( + commitment_number, + node_1_balance_sat, + local_chan_balance, + channel.context().channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys.funding_pubkey, + channel.context().channel_transaction_parameters.holder_pubkeys.funding_pubkey, + channel.context().build_remote_transaction_keys(), + FEERATE_PER_KW, + &mut htlcs, + &channel.context().channel_transaction_parameters.as_counterparty_broadcastable(), + ); + chan_signer + .as_ecdsa() + .unwrap() + .sign_counterparty_commitment( + &commitment_tx, + Vec::new(), + Vec::new(), + &secp_ctx, + ) + .unwrap() + }; + + let commit_signed_msg = msgs::CommitmentSigned { + channel_id: chan_id, + signature: res.0, + htlc_signatures: res.1, + batch: None, + #[cfg(taproot)] + partial_signature_with_nonce: None, + }; + + // Send the commitment_signed message to the nodes[1]. + nodes[1].node.handle_commitment_signed(node_a_id, &commit_signed_msg); + let _ = nodes[1].node.get_and_clear_pending_msg_events(); + + // Send the RAA to nodes[1]. + let raa_msg = msgs::RevokeAndACK { + channel_id: chan_id, + per_commitment_secret: local_secret, + next_per_commitment_point: next_local_point, + #[cfg(taproot)] + next_local_nonce: None, + }; + nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_msg); + + // The HTLC actually fails here in `fn validate_commitment_signed` due to a fee spike buffer + // violation. It nonetheless passed all checks in `fn validate_update_add_htlc`. + + //expect_pending_htlcs_forwardable!(nodes[1]); + expect_htlc_handling_failed_destinations!( + nodes[1].node.get_and_clear_pending_events(), + &[HTLCDestination::FailedPayment { payment_hash: payment_hash_0_1 }] + ); + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + // Make sure the HTLC failed in the way we expect. + match events[0] { + MessageSendEvent::UpdateHTLCs { + updates: msgs::CommitmentUpdate { ref update_fail_htlcs, .. }, + .. + } => { + assert_eq!(update_fail_htlcs.len(), 1); + update_fail_htlcs[0].clone() + }, + _ => panic!("Unexpected event"), + }; + nodes[1].logger.assert_log("lightning::ln::channel", + format!("Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", raa_msg.channel_id), 1); + + check_added_monitors(&nodes[1], 2); + } +} From b6a8fbc97a32e2e9c690fa0b7406d5937755483c Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 15 Jul 2025 18:38:47 +0000 Subject: [PATCH 099/105] Add CHANGELOG entry for 0.1.5 --- CHANGELOG.md | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7c68aba4c31..e5edcd8eab6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,35 @@ +# 0.1.5 - Jul XXX, 2025 - "Async Path Reduction" + +## Performance Improvements + * `NetworkGraph`'s expensive internal consistency checks have now been + disabled in debug builds in addition to release builds (#3687). + +## Bug Fixes + * Pathfinding which results in a multi-path payment is now substantially + smarter, using fewer paths and better optimizing fees and successes (#3890). + * A counterparty delaying claiming multiple HTLCs with different expiries can + no longer cause our `ChannelMonitor` to continuously rebroadcast invalid + transactions or RBF bump attempts (#3923). + * Reorgs can no longer cause us to fail to claim HTLCs after a counterparty + delayed claiming multiple HTLCs with different expiries (#3923). + * Force-closing a channel while it is blocked on another channel's async + `ChannelMonitorUpdate` can no longer lead to a panic (#3858). + * `ChannelMonitorUpdate`s can no longer be released to storage too early when + doing async updates or on restart. This only impacts async + `ChannelMonitorUpdate` persistence and can lead to loss of funds only in rare + cases with `ChannelMonitorUpdate` persistence order inversions (#3907). + +## Security +0.1.5 fixes a vulnerability which could allow a peer to overdraw their reserve +value, potentially cutting into commitment transaction fees on channels with a +low reserve. + * Due to a bug in checking whether an HTLC is dust during acceptance, near-dust + HTLCs were not counted towards the commitment transaction fee, but did + eventually contribute to it when we built a commitment transaction. This can + be used by a counterparty to overdraw their reserve value, or, for channels + with a low reserve value, cut into the commitment transaction fee (#3933). + + # 0.1.4 - May 23, 2025 - "Careful Validation of Bogus States" ## Bug Fixes From 843a69fe624179937bdef62f45238912b3435372 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 15 Jul 2025 18:39:19 +0000 Subject: [PATCH 100/105] Bump the `lightning` crate version to 0.1.5 --- lightning/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightning/Cargo.toml b/lightning/Cargo.toml index d0c7336653a..75835c92edc 100644 --- a/lightning/Cargo.toml +++ b/lightning/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lightning" -version = "0.1.4" +version = "0.1.5" authors = ["Matt Corallo"] license = "MIT OR Apache-2.0" repository = "https://github.com/lightningdevkit/rust-lightning/" From bb4b1ba091e5f6d1b10634ba3406c2458f12be69 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 16 Jul 2025 10:56:57 +0000 Subject: [PATCH 101/105] Add release date for 0.1.5 --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e5edcd8eab6..b6696486d79 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -# 0.1.5 - Jul XXX, 2025 - "Async Path Reduction" +# 0.1.5 - Jul 16, 2025 - "Async Path Reduction" ## Performance Improvements * `NetworkGraph`'s expensive internal consistency checks have now been From 3af95c490244a7d70a8fddfa001c21a0680c4d7d Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 16 Jul 2025 10:58:34 +0000 Subject: [PATCH 102/105] Copy pins from upstream CI tests Various dependencies have had their MSRV bumped since 0.1 was released. Here we copy the MSRV-compliant pins from upstream so that CI passes for 0.1 again. --- ci/ci-tests.sh | 9 +++++++++ ci/ci-tx-sync-tests.sh | 3 +++ 2 files changed, 12 insertions(+) diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh index 3be6afde89a..20cdaf26431 100755 --- a/ci/ci-tests.sh +++ b/ci/ci-tests.sh @@ -27,6 +27,15 @@ PIN_RELEASE_DEPS # pin the release dependencies in our main workspace # proptest 1.3.0 requires rustc 1.64.0 [ "$RUSTC_MINOR_VERSION" -lt 64 ] && cargo update -p proptest --precise "1.2.0" --verbose +# parking_lot 0.12.4 requires rustc 1.64.0 +[ "$RUSTC_MINOR_VERSION" -lt 64 ] && cargo update -p parking_lot --precise "0.12.3" --verbose + +# parking_lot_core 0.9.11 requires rustc 1.64.0 +[ "$RUSTC_MINOR_VERSION" -lt 64 ] && cargo update -p parking_lot_core --precise "0.9.10" --verbose + +# lock_api 0.4.13 requires rustc 1.64.0 +[ "$RUSTC_MINOR_VERSION" -lt 64 ] && cargo update -p lock_api --precise "0.4.12" --verbose + export RUST_BACKTRACE=1 echo -e "\n\nChecking the workspace, except lightning-transaction-sync." diff --git a/ci/ci-tx-sync-tests.sh b/ci/ci-tx-sync-tests.sh index 3ca2fae6725..5f926a8e37b 100755 --- a/ci/ci-tx-sync-tests.sh +++ b/ci/ci-tx-sync-tests.sh @@ -17,6 +17,9 @@ PIN_RELEASE_DEPS # pin the release dependencies # Starting with version 0.5.11, the `home` crate has an MSRV of rustc 1.81.0. [ "$RUSTC_MINOR_VERSION" -lt 81 ] && cargo update -p home --precise "0.5.9" --verbose +# Starting with version 1.2.0, the `idna_adapter` crate has an MSRV of rustc 1.81.0. +[ "$RUSTC_MINOR_VERSION" -lt 81 ] && cargo update -p idna_adapter --precise "1.1.0" --verbose + export RUST_BACKTRACE=1 echo -e "\n\nChecking Transaction Sync Clients with features." From 13363533fa4660f25f8f7b3a0b82017355255f76 Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 24 Jul 2025 10:21:53 +0200 Subject: [PATCH 103/105] Drop `incremental-mutants` CI job Previously, the `incremental-mutants` CI job was failing on ~every PR that made actual logic changes, and nobody seemed to really make any effort to address the failures. The failing CI jobs therefore just resulted in additional which in turn could have us getting used to failing CI, introducing some risk of acutal failures slipping through. Of course, it also took up some (considerable?) time in the CI queue that might be better spent on other jobs if no contributors are actually benefitting from the CI job. Here we therefore drop `incremental-mutants` from our CI for the time being. --- .github/workflows/build.yml | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 83ae38a1b9e..ebfd8658304 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -291,20 +291,3 @@ jobs: rustup component add rustfmt - name: Run rustfmt checks run: ci/rustfmt.sh - - incremental-mutants: - runs-on: ubuntu-latest - if: github.ref_name != 'main' # `main` has no diff with itself - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - name: Relative diff - run: | - git branch -av - git diff origin/main.. | tee git.diff - - uses: Swatinem/rust-cache@v2 - - name: Mutants - run: | - cargo install cargo-mutants - cargo mutants --no-shuffle -j 2 -vV --in-diff git.diff From ddd31efbf82b3f2b82124f6ed09f010be91ee7fd Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Thu, 24 Jul 2025 10:48:40 +0200 Subject: [PATCH 104/105] Fix `check_commit` jobs on `0.1` branch --- .github/workflows/build.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ebfd8658304..f597c29b5a0 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -198,9 +198,9 @@ jobs: git fetch upstream export GIT_COMMITTER_EMAIL="rl-ci@example.com" export GIT_COMMITTER_NAME="RL CI" - git rebase upstream/main + git rebase upstream/${{ github.base_ref }} - name: For each commit, run cargo check (including in fuzz) - run: ci/check-each-commit.sh upstream/main + run: ci/check-each-commit.sh upstream/${{ github.base_ref }} check_release: runs-on: ubuntu-latest From ccb20c724f990aaae91050fbd0de01ab80ffe0ee Mon Sep 17 00:00:00 2001 From: Elias Rohrer Date: Mon, 28 Jul 2025 15:07:31 +0200 Subject: [PATCH 105/105] Move `check_commits` to a dedicated workflow Previously, we introduced a chance dynamically determining the base branch for the check_commits CI job. Unfortunately it used the base_ref variable, which is only set for pull_requests, not for pushes. Here, we hence move `check_commits` to a dedicated workflow that only is run on PRs. --- .github/workflows/build.yml | 23 ------------------- .github/workflows/check_commits.yml | 34 +++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 23 deletions(-) create mode 100644 .github/workflows/check_commits.yml diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f597c29b5a0..e943bc06d5d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -179,29 +179,6 @@ jobs: cd bench RUSTFLAGS="--cfg=ldk_bench --cfg=require_route_graph_test" cargo bench - check_commits: - runs-on: ubuntu-latest - env: - TOOLCHAIN: stable - steps: - - name: Checkout source code - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - name: Install Rust ${{ env.TOOLCHAIN }} toolchain - run: | - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain ${{ env.TOOLCHAIN }} - rustup override set ${{ env.TOOLCHAIN }} - - name: Fetch full tree and rebase on upstream - run: | - git remote add upstream https://github.com/lightningdevkit/rust-lightning - git fetch upstream - export GIT_COMMITTER_EMAIL="rl-ci@example.com" - export GIT_COMMITTER_NAME="RL CI" - git rebase upstream/${{ github.base_ref }} - - name: For each commit, run cargo check (including in fuzz) - run: ci/check-each-commit.sh upstream/${{ github.base_ref }} - check_release: runs-on: ubuntu-latest env: diff --git a/.github/workflows/check_commits.yml b/.github/workflows/check_commits.yml new file mode 100644 index 00000000000..2fb44f669c6 --- /dev/null +++ b/.github/workflows/check_commits.yml @@ -0,0 +1,34 @@ +name: CI check_commits + +on: + pull_request: + branches-ignore: + - master + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + check_commits: + runs-on: ubuntu-latest + env: + TOOLCHAIN: stable + steps: + - name: Checkout source code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Install Rust ${{ env.TOOLCHAIN }} toolchain + run: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain ${{ env.TOOLCHAIN }} + rustup override set ${{ env.TOOLCHAIN }} + - name: Fetch full tree and rebase on upstream + run: | + git remote add upstream https://github.com/lightningdevkit/rust-lightning + git fetch upstream + export GIT_COMMITTER_EMAIL="rl-ci@example.com" + export GIT_COMMITTER_NAME="RL CI" + git rebase upstream/${{ github.base_ref }} + - name: For each commit, run cargo check (including in fuzz) + run: ci/check-each-commit.sh upstream/${{ github.base_ref }}