diff --git a/.github/workflows/audit.yml b/.github/workflows/audit.yml index d32c55cd5..e2ae378dd 100644 --- a/.github/workflows/audit.yml +++ b/.github/workflows/audit.yml @@ -19,8 +19,8 @@ jobs: runs-on: ${{ matrix.platform }} steps: - name: Checkout source code - uses: actions/checkout@v3 + uses: actions/checkout@v6 - name: Run security audit - uses: rustsec/audit-check@v1.4.1 + uses: rustsec/audit-check@v2 with: token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/benchmarks.yml b/.github/workflows/benchmarks.yml index f5cf79033..6d0056e9a 100644 --- a/.github/workflows/benchmarks.yml +++ b/.github/workflows/benchmarks.yml @@ -13,20 +13,20 @@ jobs: TOOLCHAIN: stable steps: - name: Checkout source code - uses: actions/checkout@v3 + uses: actions/checkout@v6 - name: Install Rust toolchain run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain stable rustup override set stable - name: Enable caching for bitcoind id: cache-bitcoind - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: bin/bitcoind-${{ runner.os }}-${{ runner.arch }} key: bitcoind-29.0-${{ runner.os }}-${{ runner.arch }} - name: Enable caching for electrs id: cache-electrs - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: bin/electrs-${{ runner.os }}-${{ runner.arch }} key: electrs-${{ runner.os }}-${{ runner.arch }} diff --git a/.github/workflows/cln-integration.yml b/.github/workflows/cln-integration.yml index 2becf086a..81eb82250 100644 --- a/.github/workflows/cln-integration.yml +++ b/.github/workflows/cln-integration.yml @@ -11,20 +11,43 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - - name: Install dependencies - run: | - sudo apt-get update -y - sudo apt-get install -y socat + - name: Create temporary directory for CLN data + run: echo "CLN_DATA_DIR=$(mktemp -d)" >> $GITHUB_ENV - name: Start bitcoind, electrs, and lightningd run: docker compose -p ldk-node -f tests/docker/docker-compose-cln.yml up -d + env: + CLN_DATA_DIR: ${{ env.CLN_DATA_DIR }} + + - name: Wait for CLN to be ready + run: | + for i in $(seq 1 30); do + if docker exec ldk-node-cln-1 lightning-cli --regtest getinfo 2>/dev/null | grep -q '"id"'; then + echo "CLN is ready" + break + fi + echo "Waiting for CLN... ($i/30)" + sleep 2 + done + docker exec ldk-node-cln-1 lightning-cli --regtest getinfo || { + echo "ERROR: CLN not responding" + docker compose -p ldk-node -f tests/docker/docker-compose-cln.yml logs cln + exit 1 + } - - name: Forward lightningd RPC socket + - name: Set permissions for CLN data directory run: | - docker exec ldk-node-cln-1 sh -c "socat -d -d TCP-LISTEN:9937,fork,reuseaddr UNIX-CONNECT:/root/.lightning/regtest/lightning-rpc&" - socat -d -d UNIX-LISTEN:/tmp/lightning-rpc,reuseaddr,fork TCP:127.0.0.1:9937& + sudo chown -R $(id -u):$(id -g) $CLN_DATA_DIR + sudo chmod 755 $CLN_DATA_DIR + sudo find $CLN_DATA_DIR -type d -exec chmod 755 {} + + sudo find $CLN_DATA_DIR -type f -exec chmod 644 {} + + env: + CLN_DATA_DIR: ${{ env.CLN_DATA_DIR }} - name: Run CLN integration tests - run: RUSTFLAGS="--cfg cln_test" cargo test --test integration_tests_cln + run: CLN_SOCKET_PATH=$CLN_DATA_DIR/regtest/lightning-rpc + RUSTFLAGS="--cfg cln_test" cargo test --test integration_tests_cln -- --show-output --test-threads=1 + env: + CLN_DATA_DIR: ${{ env.CLN_DATA_DIR }} diff --git a/.github/workflows/cron-weekly-rustfmt.yml b/.github/workflows/cron-weekly-rustfmt.yml index d6326f03b..9e54ab9f3 100644 --- a/.github/workflows/cron-weekly-rustfmt.yml +++ b/.github/workflows/cron-weekly-rustfmt.yml @@ -13,7 +13,7 @@ jobs: name: Nightly rustfmt runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: dtolnay/rust-toolchain@nightly with: components: rustfmt @@ -23,7 +23,7 @@ jobs: - name: Get the current date run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_ENV - name: Create Pull Request - uses: peter-evans/create-pull-request@v7 + uses: peter-evans/create-pull-request@v8 with: author: Fmt Bot title: Automated nightly rustfmt (${{ env.date }}) diff --git a/.github/workflows/eclair-integration.yml b/.github/workflows/eclair-integration.yml new file mode 100644 index 000000000..56d51b77e --- /dev/null +++ b/.github/workflows/eclair-integration.yml @@ -0,0 +1,56 @@ +name: CI Checks - Eclair Integration Tests + +on: [push, pull_request] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + check-eclair: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Start bitcoind and electrs + run: docker compose -p ldk-node -f tests/docker/docker-compose-eclair.yml up -d bitcoin electrs + + - name: Wait for bitcoind to be healthy + run: | + for i in $(seq 1 30); do + if docker compose -p ldk-node -f tests/docker/docker-compose-eclair.yml exec bitcoin bitcoin-cli -regtest -rpcuser=user -rpcpassword=pass getblockchaininfo > /dev/null 2>&1; then + echo "bitcoind is ready" + exit 0 + fi + echo "Waiting for bitcoind... ($i/30)" + sleep 2 + done + echo "ERROR: bitcoind not ready" + exit 1 + + - name: Create wallets on bitcoind + run: | + docker compose -p ldk-node -f tests/docker/docker-compose-eclair.yml exec bitcoin bitcoin-cli -regtest -rpcuser=user -rpcpassword=pass createwallet eclair + docker compose -p ldk-node -f tests/docker/docker-compose-eclair.yml exec bitcoin bitcoin-cli -regtest -rpcuser=user -rpcpassword=pass -rpcwallet=eclair getnewaddress + docker compose -p ldk-node -f tests/docker/docker-compose-eclair.yml exec bitcoin bitcoin-cli -regtest -rpcuser=user -rpcpassword=pass createwallet ldk_node_test + + - name: Start Eclair + run: docker compose -p ldk-node -f tests/docker/docker-compose-eclair.yml up -d eclair + + - name: Wait for Eclair to be ready + run: | + for i in $(seq 1 60); do + if curl -sf -u :eclairpassword -X POST http://127.0.0.1:8080/getinfo > /dev/null 2>&1; then + echo "Eclair is ready" + exit 0 + fi + echo "Waiting for Eclair... ($i/60)" + sleep 5 + done + echo "Eclair failed to start" + docker compose -p ldk-node -f tests/docker/docker-compose-eclair.yml logs eclair + exit 1 + + - name: Run Eclair integration tests + run: RUSTFLAGS="--cfg eclair_test" cargo test --test integration_tests_eclair -- --show-output --test-threads=1 diff --git a/.github/workflows/hrn-integration.yml b/.github/workflows/hrn-integration.yml new file mode 100644 index 000000000..f7ded7bc5 --- /dev/null +++ b/.github/workflows/hrn-integration.yml @@ -0,0 +1,45 @@ +name: CI Checks - HRN Integration Tests + +on: [push, pull_request] + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build-and-test: + runs-on: ubuntu-latest + + steps: + - name: Checkout source code + uses: actions/checkout@v3 + - name: Install Rust stable toolchain + run: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain stable + - name: Enable caching for bitcoind + id: cache-bitcoind + uses: actions/cache@v4 + with: + path: bin/bitcoind-${{ runner.os }}-${{ runner.arch }} + key: bitcoind-29.0-${{ runner.os }}-${{ runner.arch }} + - name: Enable caching for electrs + id: cache-electrs + uses: actions/cache@v4 + with: + path: bin/electrs-${{ runner.os }}-${{ runner.arch }} + key: electrs-${{ runner.os }}-${{ runner.arch }} + - name: Download bitcoind/electrs + if: "steps.cache-bitcoind.outputs.cache-hit != 'true' || steps.cache-electrs.outputs.cache-hit != 'true'" + run: | + source ./scripts/download_bitcoind_electrs.sh + mkdir -p bin + mv "$BITCOIND_EXE" bin/bitcoind-${{ runner.os }}-${{ runner.arch }} + mv "$ELECTRS_EXE" bin/electrs-${{ runner.os }}-${{ runner.arch }} + - name: Set bitcoind/electrs environment variables + run: | + echo "BITCOIND_EXE=$( pwd )/bin/bitcoind-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" + echo "ELECTRS_EXE=$( pwd )/bin/electrs-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" + - name: Run HRN Integration Tests + run: | + RUSTFLAGS="--cfg no_download --cfg hrn_tests $RUSTFLAGS" cargo test --test integration_tests_hrn + RUSTFLAGS="--cfg no_download --cfg hrn_tests $RUSTFLAGS" cargo test --test integration_tests_hrn --features uniffi \ No newline at end of file diff --git a/.github/workflows/kotlin.yml b/.github/workflows/kotlin.yml index 01a840d60..f4d55e3bc 100644 --- a/.github/workflows/kotlin.yml +++ b/.github/workflows/kotlin.yml @@ -16,10 +16,10 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Set up JDK - uses: actions/setup-java@v3 + uses: actions/setup-java@v5 with: distribution: temurin java-version: 11 @@ -47,7 +47,7 @@ jobs: run: ./scripts/uniffi_bindgen_generate_kotlin_android.sh - name: Start bitcoind and electrs - run: docker compose up -d + run: docker compose -p ldk-node -f tests/docker/docker-compose.yml up -d - name: Run ldk-node-jvm tests run: | diff --git a/.github/workflows/lnd-integration.yml b/.github/workflows/lnd-integration.yml index 6f71f19d7..caefbdb6b 100644 --- a/.github/workflows/lnd-integration.yml +++ b/.github/workflows/lnd-integration.yml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Check and install CMake if needed # lnd_grpc_rust (via prost-build v0.10.4) requires CMake >= 3.5 but is incompatible with CMake >= 4.0. @@ -33,7 +33,6 @@ jobs: fi - name: Create temporary directory for LND data - id: create-temp-dir run: echo "LND_DATA_DIR=$(mktemp -d)" >> $GITHUB_ENV - name: Start bitcoind, electrs, and LND @@ -41,16 +40,24 @@ jobs: env: LND_DATA_DIR: ${{ env.LND_DATA_DIR }} - - name: Set permissions for LND data directory - # In PR 4622 (https://github.com/lightningnetwork/lnd/pull/4622), - # LND sets file permissions to 0700, preventing test code from accessing them. - # This step ensures the test suite has the necessary permissions. - run: sudo chmod -R 755 $LND_DATA_DIR + - name: Wait for LND macaroon and set permissions + run: | + for i in $(seq 1 30); do + if docker exec ldk-node-lnd test -f /root/.lnd/data/chain/bitcoin/regtest/admin.macaroon 2>/dev/null; then + echo "LND macaroon found" + break + fi + echo "Waiting for LND macaroon... ($i/30)" + sleep 2 + done + sudo chmod 755 $LND_DATA_DIR + sudo find $LND_DATA_DIR -type d -exec chmod 755 {} + + sudo find $LND_DATA_DIR -type f -exec chmod 644 {} + env: LND_DATA_DIR: ${{ env.LND_DATA_DIR }} - name: Run LND integration tests run: LND_CERT_PATH=$LND_DATA_DIR/tls.cert LND_MACAROON_PATH=$LND_DATA_DIR/data/chain/bitcoin/regtest/admin.macaroon - RUSTFLAGS="--cfg lnd_test" cargo test --test integration_tests_lnd -- --exact --show-output + RUSTFLAGS="--cfg lnd_test" cargo test --test integration_tests_lnd -- --show-output --test-threads=1 env: LND_DATA_DIR: ${{ env.LND_DATA_DIR }} diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index 802f7c3d4..e154faa7e 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -15,7 +15,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Install uv uses: astral-sh/setup-uv@v7 @@ -24,7 +24,7 @@ jobs: run: ./scripts/uniffi_bindgen_generate_python.sh - name: Start bitcoind and electrs - run: docker compose up -d + run: docker compose -p ldk-node -f tests/docker/docker-compose.yml up -d - name: Run Python unit tests env: diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 18589e612..b2575aca1 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -34,7 +34,7 @@ jobs: runs-on: ${{ matrix.platform }} steps: - name: Checkout source code - uses: actions/checkout@v3 + uses: actions/checkout@v6 - name: Install Rust ${{ matrix.toolchain }} toolchain run: | curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain ${{ matrix.toolchain }} @@ -50,13 +50,13 @@ jobs: run: echo "RUSTFLAGS=-D warnings" >> "$GITHUB_ENV" - name: Enable caching for bitcoind id: cache-bitcoind - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: bin/bitcoind-${{ runner.os }}-${{ runner.arch }} key: bitcoind-29.0-${{ runner.os }}-${{ runner.arch }} - name: Enable caching for electrs id: cache-electrs - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: bin/electrs-${{ runner.os }}-${{ runner.arch }} key: electrs-${{ runner.os }}-${{ runner.arch }} @@ -90,6 +90,21 @@ jobs: run: | RUSTFLAGS="--cfg no_download --cfg cycle_tests" cargo test --features uniffi + linting: + name: Linting + runs-on: ubuntu-latest + steps: + - name: Checkout source code + uses: actions/checkout@v6 + - name: Install Rust and clippy + run: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain stable + rustup component add clippy + - name: Ban `unwrap` in library code + run: | + cargo clippy --lib --verbose --color always -- -A warnings -D clippy::unwrap_used -A clippy::tabs_in_doc_comments + cargo clippy --lib --features uniffi --verbose --color always -- -A warnings -D clippy::unwrap_used -A clippy::tabs_in_doc_comments + doc: name: Documentation runs-on: ubuntu-latest @@ -99,4 +114,4 @@ jobs: - uses: actions/checkout@v6 - uses: dtolnay/rust-toolchain@nightly - uses: dtolnay/install@cargo-docs-rs - - run: cargo docs-rs + - run: cargo docs-rs \ No newline at end of file diff --git a/.github/workflows/semver.yml b/.github/workflows/semver.yml index 2a3b14ef8..0fdfbe213 100644 --- a/.github/workflows/semver.yml +++ b/.github/workflows/semver.yml @@ -6,6 +6,6 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout source code - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Check SemVer uses: obi1kenobi/cargo-semver-checks-action@v2 diff --git a/.github/workflows/swift.yml b/.github/workflows/swift.yml index 3410d09aa..c1e385e2d 100644 --- a/.github/workflows/swift.yml +++ b/.github/workflows/swift.yml @@ -12,7 +12,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Set default Rust version to stable run: rustup default stable diff --git a/.github/workflows/vss-integration.yml b/.github/workflows/vss-integration.yml index b5c4e9a0b..959175162 100644 --- a/.github/workflows/vss-integration.yml +++ b/.github/workflows/vss-integration.yml @@ -27,11 +27,11 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v6 with: path: ldk-node - name: Checkout VSS - uses: actions/checkout@v3 + uses: actions/checkout@v6 with: repository: lightningdevkit/vss-server path: vss-server diff --git a/.github/workflows/vss-no-auth-integration.yml b/.github/workflows/vss-no-auth-integration.yml index 8a5408092..950ff3e5f 100644 --- a/.github/workflows/vss-no-auth-integration.yml +++ b/.github/workflows/vss-no-auth-integration.yml @@ -27,11 +27,11 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v6 with: path: ldk-node - name: Checkout VSS - uses: actions/checkout@v3 + uses: actions/checkout@v6 with: repository: lightningdevkit/vss-server path: vss-server diff --git a/Cargo.toml b/Cargo.toml index a7daf5438..d34710a6e 100755 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,18 +38,20 @@ default = [] #lightning-transaction-sync = { version = "0.2.0", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } #lightning-liquidity = { version = "0.2.0", features = ["std"] } #lightning-macros = { version = "0.2.0" } - -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "dcf0c203e166da2348bef12b2e5eff4a250cdec7", features = ["std"] } -lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "dcf0c203e166da2348bef12b2e5eff4a250cdec7" } -lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "dcf0c203e166da2348bef12b2e5eff4a250cdec7", features = ["std"] } -lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "dcf0c203e166da2348bef12b2e5eff4a250cdec7" } -lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "dcf0c203e166da2348bef12b2e5eff4a250cdec7", features = ["tokio"] } -lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "dcf0c203e166da2348bef12b2e5eff4a250cdec7" } -lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "dcf0c203e166da2348bef12b2e5eff4a250cdec7" } -lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "dcf0c203e166da2348bef12b2e5eff4a250cdec7", features = ["rest-client", "rpc-client", "tokio"] } -lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "dcf0c203e166da2348bef12b2e5eff4a250cdec7", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } -lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "dcf0c203e166da2348bef12b2e5eff4a250cdec7", features = ["std"] } -lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "dcf0c203e166da2348bef12b2e5eff4a250cdec7" } +#lightning-dns-resolver = { version = "0.3.0" } + +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "369a2cf9c8ef810deea0cd2b4cf6ed0691b78144", features = ["std"] } +lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "369a2cf9c8ef810deea0cd2b4cf6ed0691b78144" } +lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "369a2cf9c8ef810deea0cd2b4cf6ed0691b78144", features = ["std"] } +lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "369a2cf9c8ef810deea0cd2b4cf6ed0691b78144" } +lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "369a2cf9c8ef810deea0cd2b4cf6ed0691b78144", features = ["tokio"] } +lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "369a2cf9c8ef810deea0cd2b4cf6ed0691b78144" } +lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "369a2cf9c8ef810deea0cd2b4cf6ed0691b78144" } +lightning-block-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "369a2cf9c8ef810deea0cd2b4cf6ed0691b78144", features = ["rest-client", "rpc-client", "tokio"] } +lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "369a2cf9c8ef810deea0cd2b4cf6ed0691b78144", features = ["esplora-async-https", "time", "electrum-rustls-ring"] } +lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "369a2cf9c8ef810deea0cd2b4cf6ed0691b78144", features = ["std"] } +lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "369a2cf9c8ef810deea0cd2b4cf6ed0691b78144" } +lightning-dns-resolver = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "369a2cf9c8ef810deea0cd2b4cf6ed0691b78144" } bdk_chain = { version = "0.23.0", default-features = false, features = ["std"] } bdk_esplora = { version = "0.22.0", default-features = false, features = ["async-https-rustls", "tokio"]} @@ -79,13 +81,13 @@ async-trait = { version = "0.1", default-features = false } vss-client = { package = "vss-client-ng", version = "0.5" } prost = { version = "0.11.6", default-features = false} #bitcoin-payment-instructions = { version = "0.6" } -bitcoin-payment-instructions = { git = "https://github.com/joostjager/bitcoin-payment-instructions", branch = "ldk-dcf0c203e166da2348bef12b2e5eff4a250cdec7" } +bitcoin-payment-instructions = { git = "https://github.com/jkczyz/bitcoin-payment-instructions", rev = "679dac50cc0d81ec4d31da94b93d467e5308f16a" } [target.'cfg(windows)'.dependencies] winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] -lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "dcf0c203e166da2348bef12b2e5eff4a250cdec7", features = ["std", "_test_utils"] } +lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "369a2cf9c8ef810deea0cd2b4cf6ed0691b78144", features = ["std", "_test_utils"] } rand = { version = "0.9.2", default-features = false, features = ["std", "thread_rng", "os_rng"] } proptest = "1.0.0" regex = "1.5.6" @@ -125,7 +127,9 @@ check-cfg = [ "cfg(tokio_unstable)", "cfg(cln_test)", "cfg(lnd_test)", + "cfg(eclair_test)", "cfg(cycle_tests)", + "cfg(hrn_tests)", ] [[bench]] @@ -144,6 +148,7 @@ harness = false #lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync" } #lightning-liquidity = { path = "../rust-lightning/lightning-liquidity" } #lightning-macros = { path = "../rust-lightning/lightning-macros" } +#lightning-dns-resolver = { path = "../rust-lightning/lightning-dns-resolver" } #lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } #lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } @@ -156,6 +161,7 @@ harness = false #lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } #lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } #lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } +#lightning-dns-resolver = { git = "https://github.com/lightningdevkit/rust-lightning", branch = "main" } #lightning = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } #lightning-types = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } @@ -168,6 +174,7 @@ harness = false #lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } #lightning-liquidity = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } #lightning-macros = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } +#lightning-dns-resolver = { git = "https://github.com/lightningdevkit/rust-lightning", rev = "21e9a9c0ef80021d0669f2a366f55d08ba8d9b03" } #vss-client-ng = { path = "../vss-client" } #vss-client-ng = { git = "https://github.com/lightningdevkit/vss-client", branch = "main" } @@ -184,3 +191,4 @@ harness = false #lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync" } #lightning-liquidity = { path = "../rust-lightning/lightning-liquidity" } #lightning-macros = { path = "../rust-lightning/lightning-macros" } +#lightning-dns-resolver = { path = "../rust-lightning/lightning-dns-resolver" } diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000..9f84a5cad --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,9 @@ +# Security Policy + +## Reporting a Vulnerability + +Security vulnerabilities for `ldk-node` are handled under the same policy as +`rust-lightning`(LDK), on which this library is built. + +Please refer to the [rust-lightning's SECURITY.md](https://github.com/lightningdevkit/rust-lightning/blob/main/SECURITY.md) +for instructions on how to responsibly disclose vulnerabilities. diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 014993690..f87c7b294 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -113,6 +113,10 @@ interface Node { [Throws=NodeError] UserChannelId open_announced_channel_with_all(PublicKey node_id, SocketAddress address, u64? push_to_counterparty_msat, ChannelConfig? channel_config); [Throws=NodeError] + UserChannelId open_0reserve_channel(PublicKey node_id, SocketAddress address, u64 channel_amount_sats, u64? push_to_counterparty_msat, ChannelConfig? channel_config); + [Throws=NodeError] + UserChannelId open_0reserve_channel_with_all(PublicKey node_id, SocketAddress address, u64? push_to_counterparty_msat, ChannelConfig? channel_config); + [Throws=NodeError] void splice_in([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id, u64 splice_amount_sats); [Throws=NodeError] void splice_in_with_all([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id); @@ -152,8 +156,8 @@ typedef interface OnchainPayment; interface FeeRate { [Name=from_sat_per_kwu] constructor(u64 sat_kwu); - [Name=from_sat_per_vb_unchecked] - constructor(u64 sat_vb); + [Name=from_sat_per_vb_u32] + constructor(u32 sat_vb); u64 to_sat_per_kwu(); u64 to_sat_per_vb_floor(); u64 to_sat_per_vb_ceil(); @@ -229,12 +233,6 @@ enum NodeError { typedef dictionary NodeStatus; -[Remote] -dictionary BestBlock { - BlockHash block_hash; - u32 height; -}; - typedef enum BuildError; [Trait, WithForeign] @@ -412,3 +410,7 @@ typedef string LSPSDateTime; typedef string ScriptBuf; typedef enum Event; + +typedef interface HRNResolverConfig; + +typedef dictionary HumanReadableNamesConfig; diff --git a/build.rs b/build.rs index f011148e7..2e080ddcd 100644 --- a/build.rs +++ b/build.rs @@ -7,5 +7,6 @@ fn main() { #[cfg(feature = "uniffi")] - uniffi::generate_scaffolding("bindings/ldk_node.udl").unwrap(); + uniffi::generate_scaffolding("bindings/ldk_node.udl") + .expect("the checked-in UniFFI UDL should always generate scaffolding"); } diff --git a/src/balance.rs b/src/balance.rs index 6c6ad946d..9310354ea 100644 --- a/src/balance.rs +++ b/src/balance.rs @@ -231,8 +231,16 @@ impl LightningBalance { inbound_claiming_htlc_rounded_msat, inbound_htlc_rounded_msat, } => { - // unwrap safety: confirmed_balance_candidate_index is guaranteed to index into balance_candidates - let balance = balance_candidates.get(confirmed_balance_candidate_index).unwrap(); + // When confirmed_balance_candidate_index is 0, no specific alternative + // funding has been confirmed yet, so use the last candidate (most current + // splice/RBF attempt), matching LDK's claimable_amount_satoshis behavior. + let balance = if confirmed_balance_candidate_index != 0 { + &balance_candidates[confirmed_balance_candidate_index] + } else { + balance_candidates + .last() + .expect("balance_candidates always contains at least the current funding") + }; Self::ClaimableOnChannelClose { channel_id, diff --git a/src/builder.rs b/src/builder.rs index cd8cc184f..8637ae334 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -8,6 +8,7 @@ use std::collections::HashMap; use std::convert::TryInto; use std::default::Default; +use std::net::ToSocketAddrs; use std::path::PathBuf; use std::sync::{Arc, Mutex, Once, RwLock}; use std::time::SystemTime; @@ -18,13 +19,15 @@ use bdk_wallet::{KeychainKind, Wallet as BdkWallet}; use bitcoin::bip32::{ChildNumber, Xpriv}; use bitcoin::key::Secp256k1; use bitcoin::secp256k1::PublicKey; -use bitcoin::{BlockHash, Network}; +use bitcoin::Network; +use bitcoin_payment_instructions::dns_resolver::DNSHrnResolver; use bitcoin_payment_instructions::onion_message_resolver::LDKOnionMessageDNSSECHrnResolver; -use lightning::chain::{chainmonitor, BestBlock}; +use lightning::chain::{chainmonitor, BestBlock as BlockLocator}; use lightning::ln::channelmanager::{self, ChainParameters, ChannelManagerReadArgs}; use lightning::ln::msgs::{RoutingMessageHandler, SocketAddress}; use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler}; use lightning::log_trace; +use lightning::onion_message::dns_resolution::DNSResolverMessageHandler; use lightning::routing::gossip::NodeAlias; use lightning::routing::router::DefaultRouter; use lightning::routing::scoring::{ @@ -39,14 +42,15 @@ use lightning::util::persist::{ }; use lightning::util::ser::ReadableArgs; use lightning::util::sweep::OutputSweeper; +use lightning_dns_resolver::OMDomainResolver; use lightning_persister::fs_store::v1::FilesystemStore; use vss_client::headers::VssHeaderProvider; use crate::chain::ChainSource; use crate::config::{ default_user_config, may_announce_channel, AnnounceError, AsyncPaymentsRole, - BitcoindRestClientConfig, Config, ElectrumSyncConfig, EsploraSyncConfig, TorConfig, - DEFAULT_ESPLORA_SERVER_URL, DEFAULT_LOG_FILENAME, DEFAULT_LOG_LEVEL, + BitcoindRestClientConfig, Config, ElectrumSyncConfig, EsploraSyncConfig, HRNResolverConfig, + TorConfig, DEFAULT_ESPLORA_SERVER_URL, DEFAULT_LOG_FILENAME, DEFAULT_LOG_LEVEL, }; use crate::connection::ConnectionManager; use crate::entropy::NodeEntropy; @@ -54,10 +58,10 @@ use crate::event::EventQueue; use crate::fee_estimator::OnchainFeeEstimator; use crate::gossip::GossipSource; use crate::io::sqlite_store::SqliteStore; +use crate::io::tier_store::TierStore; use crate::io::utils::{ - read_event_queue, read_external_pathfinding_scores_from_cache, read_network_graph, - read_node_metrics, read_output_sweeper, read_payments, read_peer_info, read_pending_payments, - read_scorer, write_node_metrics, + read_all_objects, read_event_queue, read_external_pathfinding_scores_from_cache, + read_network_graph, read_node_metrics, read_output_sweeper, read_peer_info, read_scorer, }; use crate::io::vss_store::VssStoreBuilder; use crate::io::{ @@ -77,8 +81,8 @@ use crate::runtime::{Runtime, RuntimeSpawner}; use crate::tx_broadcaster::TransactionBroadcaster; use crate::types::{ AsyncPersister, ChainMonitor, ChannelManager, DynStore, DynStoreRef, DynStoreWrapper, - GossipSync, Graph, KeysManager, MessageRouter, OnionMessenger, PaymentStore, PeerManager, - PendingPaymentStore, SyncAndAsyncKVStore, + GossipSync, Graph, HRNResolver, KeysManager, MessageRouter, OnionMessenger, PaymentStore, + PeerManager, PendingPaymentStore, SyncAndAsyncKVStore, }; use crate::wallet::persist::KVStoreWalletPersister; use crate::wallet::Wallet; @@ -151,6 +155,21 @@ impl std::fmt::Debug for LogWriterConfig { } } +#[derive(Default)] +struct TierStoreConfig { + ephemeral: Option>, + backup_storage_dir_path: Option, +} + +impl std::fmt::Debug for TierStoreConfig { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TierStoreConfig") + .field("ephemeral", &self.ephemeral.as_ref().map(|_| "Arc")) + .field("backup_storage_dir_path", &self.backup_storage_dir_path) + .finish() + } +} + /// An error encountered during building a [`Node`]. /// /// [`Node`]: crate::Node @@ -189,10 +208,19 @@ pub enum BuildError { WalletSetupFailed, /// We failed to setup the logger. LoggerSetupFailed, + /// We failed to setup the configured chain source. + ChainSourceSetupFailed, /// The given network does not match the node's previously configured network. NetworkMismatch, /// The role of the node in an asynchronous payments context is not compatible with the current configuration. AsyncPaymentsConfigMismatch, + /// An attempt to setup a DNS Resolver failed. + DNSResolverSetupFailed, + /// The configured backup storage path conflicts with the primary storage path. + /// + /// Backup storage must use a distinct local directory so that the primary and + /// backup stores do not point to the same SQLite database. + BackupStorePathConflict, } impl fmt::Display for BuildError { @@ -216,6 +244,7 @@ impl fmt::Display for BuildError { Self::KVStoreSetupFailed => write!(f, "Failed to setup KVStore."), Self::WalletSetupFailed => write!(f, "Failed to setup onchain wallet."), Self::LoggerSetupFailed => write!(f, "Failed to setup the logger."), + Self::ChainSourceSetupFailed => write!(f, "Failed to setup the chain source."), Self::InvalidNodeAlias => write!(f, "Given node alias is invalid."), Self::NetworkMismatch => { write!(f, "Given network does not match the node's previously configured network.") @@ -226,6 +255,15 @@ impl fmt::Display for BuildError { "The async payments role is not compatible with the current configuration." ) }, + Self::DNSResolverSetupFailed => { + write!(f, "An attempt to setup a DNS resolver has failed.") + }, + Self::BackupStorePathConflict => { + write!( + f, + "The configured backup storage path conflicts with the primary storage path." + ) + }, } } } @@ -278,6 +316,7 @@ pub struct NodeBuilder { liquidity_source_config: Option, log_writer_config: Option, async_payments_role: Option, + tier_store_config: Option, runtime_handle: Option, pathfinding_scores_sync_config: Option, recovery_mode: bool, @@ -296,6 +335,7 @@ impl NodeBuilder { let gossip_source_config = None; let liquidity_source_config = None; let log_writer_config = None; + let tier_store_config = None; let runtime_handle = None; let pathfinding_scores_sync_config = None; let recovery_mode = false; @@ -305,6 +345,7 @@ impl NodeBuilder { gossip_source_config, liquidity_source_config, log_writer_config, + tier_store_config, runtime_handle, async_payments_role: None, pathfinding_scores_sync_config, @@ -614,9 +655,46 @@ impl NodeBuilder { self } + /// Configures a local SQLite backup store for disaster recovery. + /// + /// When building with tiered storage, a SQLite store will be created at the + /// given directory path using [`SQLITE_BACKUP_DB_FILE_NAME`] as its database + /// file name. It receives a second durable copy of data written to the + /// primary store. + /// + /// Writes and removals for primary-backed data only succeed once both the + /// primary and backup SQLite stores complete successfully. + /// + /// The configured path must point to a distinct local directory from the + /// primary storage path. If the backup path equals the primary storage path, + /// building will fail with [`BuildError::BackupStorePathConflict`]. + /// + /// If not set, durable data will be stored only in the primary store. + /// + /// [`SQLITE_BACKUP_DB_FILE_NAME`]: crate::io::sqlite_store::SQLITE_BACKUP_DB_FILE_NAME + pub fn set_backup_storage_dir_path(&mut self, backup_storage_dir_path: String) -> &mut Self { + let tier_store_config = self.tier_store_config.get_or_insert(TierStoreConfig::default()); + tier_store_config.backup_storage_dir_path = Some(backup_storage_dir_path.into()); + self + } + + /// Configures the ephemeral store for non-critical, frequently-accessed data. + /// + /// When building with tiered storage, this store is used for ephemeral data like + /// the network graph and scorer data to reduce latency for reads. Data stored here + /// can be rebuilt if lost. + /// + /// If not set, non-critical data will be stored in the primary store. + pub fn set_ephemeral_store(&mut self, ephemeral_store: Arc) -> &mut Self { + let tier_store_config = self.tier_store_config.get_or_insert(TierStoreConfig::default()); + tier_store_config.ephemeral = Some(ephemeral_store); + self + } + /// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options /// previously configured. pub fn build(&self, node_entropy: NodeEntropy) -> Result { + let logger = setup_logger(&self.log_writer_config, &self.config)?; let storage_dir_path = self.config.storage_dir_path.clone(); fs::create_dir_all(storage_dir_path.clone()) .map_err(|_| BuildError::StoragePathAccessFailed)?; @@ -625,20 +703,26 @@ impl NodeBuilder { Some(io::sqlite_store::SQLITE_DB_FILE_NAME.to_string()), Some(io::sqlite_store::KV_TABLE_NAME.to_string()), ) - .map_err(|_| BuildError::KVStoreSetupFailed)?; - self.build_with_store(node_entropy, kv_store) + .map_err(|e| { + log_error!(logger, "Failed to setup Sqlite store: {}", e); + BuildError::KVStoreSetupFailed + })?; + self.build_with_store_and_logger(node_entropy, kv_store, logger) } /// Builds a [`Node`] instance with a [`FilesystemStore`] backend and according to the options /// previously configured. pub fn build_with_fs_store(&self, node_entropy: NodeEntropy) -> Result { + let logger = setup_logger(&self.log_writer_config, &self.config)?; let mut storage_dir_path: PathBuf = self.config.storage_dir_path.clone().into(); storage_dir_path.push("fs_store"); - fs::create_dir_all(storage_dir_path.clone()) - .map_err(|_| BuildError::StoragePathAccessFailed)?; + fs::create_dir_all(storage_dir_path.clone()).map_err(|e| { + log_error!(logger, "Failed to setup Filesystem store: {}", e); + BuildError::StoragePathAccessFailed + })?; let kv_store = FilesystemStore::new(storage_dir_path); - self.build_with_store(node_entropy, kv_store) + self.build_with_store_and_logger(node_entropy, kv_store, logger) } /// Builds a [`Node`] instance with a [VSS] backend and according to the options @@ -669,7 +753,7 @@ impl NodeBuilder { BuildError::KVStoreSetupFailed })?; - self.build_with_store(node_entropy, vss_store) + self.build_with_store_and_logger(node_entropy, vss_store, logger) } /// Builds a [`Node`] instance with a [VSS] backend and according to the options @@ -706,7 +790,7 @@ impl NodeBuilder { BuildError::KVStoreSetupFailed })?; - self.build_with_store(node_entropy, vss_store) + self.build_with_store_and_logger(node_entropy, vss_store, logger) } /// Builds a [`Node`] instance with a [VSS] backend and according to the options @@ -733,7 +817,7 @@ impl NodeBuilder { BuildError::KVStoreSetupFailed })?; - self.build_with_store(node_entropy, vss_store) + self.build_with_store_and_logger(node_entropy, vss_store, logger) } /// Builds a [`Node`] instance with a [VSS] backend and according to the options @@ -758,15 +842,28 @@ impl NodeBuilder { BuildError::KVStoreSetupFailed })?; - self.build_with_store(node_entropy, vss_store) + self.build_with_store_and_logger(node_entropy, vss_store, logger) } /// Builds a [`Node`] instance according to the options previously configured. + /// + /// The provided `kv_store` will be used as the primary storage backend. Optionally, + /// an ephemeral store for frequently-accessed non-critical data (e.g., network graph, scorer) + /// and a local SQLite backup store for disaster recovery can be configured via + /// [`set_ephemeral_store`] and [`set_backup_storage_dir_path`]. + /// + /// [`set_ephemeral_store`]: Self::set_ephemeral_store + /// [`set_backup_storage_dir_path`]: Self::set_backup_storage_dir_path pub fn build_with_store( &self, node_entropy: NodeEntropy, kv_store: S, ) -> Result { let logger = setup_logger(&self.log_writer_config, &self.config)?; + self.build_with_store_and_logger(node_entropy, kv_store, logger) + } + fn build_with_store_and_logger( + &self, node_entropy: NodeEntropy, kv_store: S, logger: Arc, + ) -> Result { let runtime = if let Some(handle) = self.runtime_handle.as_ref() { Arc::new(Runtime::with_handle(handle.clone(), Arc::clone(&logger))) } else { @@ -776,6 +873,36 @@ impl NodeBuilder { })?) }; + let ts_config = self.tier_store_config.as_ref(); + let primary_store = Arc::new(DynStoreWrapper(kv_store)); + let mut tier_store = TierStore::new(primary_store, Arc::clone(&logger)); + if let Some(config) = ts_config { + config.ephemeral.as_ref().map(|s| tier_store.set_ephemeral_store(Arc::clone(s))); + if let Some(backup_storage_dir_path) = config.backup_storage_dir_path.as_ref() { + let primary_storage_dir_path = PathBuf::from(&self.config.storage_dir_path); + if primary_storage_dir_path == *backup_storage_dir_path { + log_error!( + logger, + "Backup storage path must differ from primary storage path: {}", + backup_storage_dir_path.display() + ); + return Err(BuildError::BackupStorePathConflict); + } + + let backup_store = SqliteStore::new( + backup_storage_dir_path.clone(), + Some(io::sqlite_store::SQLITE_BACKUP_DB_FILE_NAME.to_string()), + Some(io::sqlite_store::KV_TABLE_NAME.to_string()), + ) + .map_err(|e| { + log_error!(logger, "Failed to setup backup SQLite store: {}", e); + BuildError::KVStoreSetupFailed + })?; + let backup_store: Arc = Arc::new(DynStoreWrapper(backup_store)); + tier_store.set_backup_store(backup_store); + } + } + let seed_bytes = node_entropy.to_seed_bytes(); let config = Arc::new(self.config.clone()); @@ -790,7 +917,7 @@ impl NodeBuilder { seed_bytes, runtime, logger, - Arc::new(DynStoreWrapper(kv_store)), + Arc::new(DynStoreWrapper(tier_store)), ) } } @@ -861,7 +988,7 @@ impl ArcedNodeBuilder { pub fn set_chain_source_esplora( &self, server_url: String, sync_config: Option, ) { - self.inner.write().unwrap().set_chain_source_esplora(server_url, sync_config); + self.inner.write().expect("lock").set_chain_source_esplora(server_url, sync_config); } /// Configures the [`Node`] instance to source its chain data from the given Esplora server. @@ -875,7 +1002,7 @@ impl ArcedNodeBuilder { &self, server_url: String, headers: HashMap, sync_config: Option, ) { - self.inner.write().unwrap().set_chain_source_esplora_with_headers( + self.inner.write().expect("lock").set_chain_source_esplora_with_headers( server_url, headers, sync_config, @@ -889,7 +1016,7 @@ impl ArcedNodeBuilder { pub fn set_chain_source_electrum( &self, server_url: String, sync_config: Option, ) { - self.inner.write().unwrap().set_chain_source_electrum(server_url, sync_config); + self.inner.write().expect("lock").set_chain_source_electrum(server_url, sync_config); } /// Configures the [`Node`] instance to connect to a Bitcoin Core node via RPC. @@ -903,7 +1030,7 @@ impl ArcedNodeBuilder { pub fn set_chain_source_bitcoind_rpc( &self, rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, ) { - self.inner.write().unwrap().set_chain_source_bitcoind_rpc( + self.inner.write().expect("lock").set_chain_source_bitcoind_rpc( rpc_host, rpc_port, rpc_user, @@ -924,7 +1051,7 @@ impl ArcedNodeBuilder { &self, rest_host: String, rest_port: u16, rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, ) { - self.inner.write().unwrap().set_chain_source_bitcoind_rest( + self.inner.write().expect("lock").set_chain_source_bitcoind_rest( rest_host, rest_port, rpc_host, @@ -937,20 +1064,20 @@ impl ArcedNodeBuilder { /// Configures the [`Node`] instance to source its gossip data from the Lightning peer-to-peer /// network. pub fn set_gossip_source_p2p(&self) { - self.inner.write().unwrap().set_gossip_source_p2p(); + self.inner.write().expect("lock").set_gossip_source_p2p(); } /// Configures the [`Node`] instance to source its gossip data from the given RapidGossipSync /// server. pub fn set_gossip_source_rgs(&self, rgs_server_url: String) { - self.inner.write().unwrap().set_gossip_source_rgs(rgs_server_url); + self.inner.write().expect("lock").set_gossip_source_rgs(rgs_server_url); } /// Configures the [`Node`] instance to source its external scores from the given URL. /// /// The external scores are merged into the local scoring system to improve routing. pub fn set_pathfinding_scores_source(&self, url: String) { - self.inner.write().unwrap().set_pathfinding_scores_source(url); + self.inner.write().expect("lock").set_pathfinding_scores_source(url); } /// Configures the [`Node`] instance to source inbound liquidity from the given @@ -964,7 +1091,7 @@ impl ArcedNodeBuilder { pub fn set_liquidity_source_lsps1( &self, node_id: PublicKey, address: SocketAddress, token: Option, ) { - self.inner.write().unwrap().set_liquidity_source_lsps1(node_id, address, token); + self.inner.write().expect("lock").set_liquidity_source_lsps1(node_id, address, token); } /// Configures the [`Node`] instance to source just-in-time inbound liquidity from the given @@ -978,7 +1105,7 @@ impl ArcedNodeBuilder { pub fn set_liquidity_source_lsps2( &self, node_id: PublicKey, address: SocketAddress, token: Option, ) { - self.inner.write().unwrap().set_liquidity_source_lsps2(node_id, address, token); + self.inner.write().expect("lock").set_liquidity_source_lsps2(node_id, address, token); } /// Configures the [`Node`] instance to provide an [LSPS2] service, issuing just-in-time @@ -988,12 +1115,12 @@ impl ArcedNodeBuilder { /// /// [LSPS2]: https://github.com/BitcoinAndLightningLayerSpecs/lsp/blob/main/LSPS2/README.md pub fn set_liquidity_provider_lsps2(&self, service_config: LSPS2ServiceConfig) { - self.inner.write().unwrap().set_liquidity_provider_lsps2(service_config); + self.inner.write().expect("lock").set_liquidity_provider_lsps2(service_config); } /// Sets the used storage directory path. pub fn set_storage_dir_path(&self, storage_dir_path: String) { - self.inner.write().unwrap().set_storage_dir_path(storage_dir_path); + self.inner.write().expect("lock").set_storage_dir_path(storage_dir_path); } /// Configures the [`Node`] instance to write logs to the filesystem. @@ -1012,29 +1139,29 @@ impl ArcedNodeBuilder { pub fn set_filesystem_logger( &self, log_file_path: Option, log_level: Option, ) { - self.inner.write().unwrap().set_filesystem_logger(log_file_path, log_level); + self.inner.write().expect("lock").set_filesystem_logger(log_file_path, log_level); } /// Configures the [`Node`] instance to write logs to the [`log`](https://crates.io/crates/log) facade. pub fn set_log_facade_logger(&self) { - self.inner.write().unwrap().set_log_facade_logger(); + self.inner.write().expect("lock").set_log_facade_logger(); } /// Configures the [`Node`] instance to write logs to the provided custom [`LogWriter`]. pub fn set_custom_logger(&self, log_writer: Arc) { - self.inner.write().unwrap().set_custom_logger(log_writer); + self.inner.write().expect("lock").set_custom_logger(log_writer); } /// Sets the Bitcoin network used. pub fn set_network(&self, network: Network) { - self.inner.write().unwrap().set_network(network); + self.inner.write().expect("lock").set_network(network); } /// Sets the IP address and TCP port on which [`Node`] will listen for incoming network connections. pub fn set_listening_addresses( &self, listening_addresses: Vec, ) -> Result<(), BuildError> { - self.inner.write().unwrap().set_listening_addresses(listening_addresses).map(|_| ()) + self.inner.write().expect("lock").set_listening_addresses(listening_addresses).map(|_| ()) } /// Sets the IP address and TCP port which [`Node`] will announce to the gossip network that it accepts connections on. @@ -1045,7 +1172,11 @@ impl ArcedNodeBuilder { pub fn set_announcement_addresses( &self, announcement_addresses: Vec, ) -> Result<(), BuildError> { - self.inner.write().unwrap().set_announcement_addresses(announcement_addresses).map(|_| ()) + self.inner + .write() + .expect("lock") + .set_announcement_addresses(announcement_addresses) + .map(|_| ()) } /// Configures the [`Node`] instance to use a Tor SOCKS proxy for outbound connections to peers with OnionV3 addresses. @@ -1054,7 +1185,7 @@ impl ArcedNodeBuilder { /// /// **Note**: If unset, connecting to peer OnionV3 addresses will fail. pub fn set_tor_config(&self, tor_config: TorConfig) -> Result<(), BuildError> { - self.inner.write().unwrap().set_tor_config(tor_config).map(|_| ()) + self.inner.write().expect("lock").set_tor_config(tor_config).map(|_| ()) } /// Sets the node alias that will be used when broadcasting announcements to the gossip @@ -1062,14 +1193,14 @@ impl ArcedNodeBuilder { /// /// The provided alias must be a valid UTF-8 string and no longer than 32 bytes in total. pub fn set_node_alias(&self, node_alias: String) -> Result<(), BuildError> { - self.inner.write().unwrap().set_node_alias(node_alias).map(|_| ()) + self.inner.write().expect("lock").set_node_alias(node_alias).map(|_| ()) } /// Sets the role of the node in an asynchronous payments context. pub fn set_async_payments_role( &self, role: Option, ) -> Result<(), BuildError> { - self.inner.write().unwrap().set_async_payments_role(role).map(|_| ()) + self.inner.write().expect("lock").set_async_payments_role(role).map(|_| ()) } /// Configures the [`Node`] to resync chain data from genesis on first startup, recovering any @@ -1078,13 +1209,45 @@ impl ArcedNodeBuilder { /// This should only be set on first startup when importing an older wallet from a previously /// used [`NodeEntropy`]. pub fn set_wallet_recovery_mode(&self) { - self.inner.write().unwrap().set_wallet_recovery_mode(); + self.inner.write().expect("lock").set_wallet_recovery_mode(); + } + + /// Configures a local SQLite backup store for disaster recovery. + /// + /// When building with tiered storage, a SQLite store will be created at the + /// given directory path using [`SQLITE_BACKUP_DB_FILE_NAME`] as its database + /// file name. It receives a second durable copy of data written to the + /// primary store. + /// + /// Writes and removals for primary-backed data only succeed once both the + /// primary and backup SQLite stores complete successfully. + /// + /// The configured path must point to a distinct local directory from the + /// primary storage path. If the backup path equals the primary storage path, + /// building will fail with [`BuildError::BackupStorePathConflict`]. + /// + /// If not set, durable data will be stored only in the primary store. + /// + /// [`SQLITE_BACKUP_DB_FILE_NAME`]: crate::io::sqlite_store::SQLITE_BACKUP_DB_FILE_NAME + pub fn set_backup_storage_dir_path(&self, backup_storage_dir_path: String) { + self.inner.write().expect("lock").set_backup_storage_dir_path(backup_storage_dir_path); + } + + /// Configures the ephemeral store for non-critical, frequently-accessed data. + /// + /// When building with tiered storage, this store is used for ephemeral data like + /// the network graph and scorer data to reduce latency for reads. Data stored here + /// can be rebuilt if lost. + /// + /// If not set, non-critical data will be stored in the primary store. + pub fn set_ephemeral_store(&self, ephemeral_store: Arc) { + self.inner.write().expect("lock").set_ephemeral_store(ephemeral_store); } /// Builds a [`Node`] instance with a [`SqliteStore`] backend and according to the options /// previously configured. pub fn build(&self, node_entropy: Arc) -> Result, BuildError> { - self.inner.read().unwrap().build(*node_entropy).map(Arc::new) + self.inner.read().expect("lock").build(*node_entropy).map(Arc::new) } /// Builds a [`Node`] instance with a [`FilesystemStore`] backend and according to the options @@ -1092,7 +1255,7 @@ impl ArcedNodeBuilder { pub fn build_with_fs_store( &self, node_entropy: Arc, ) -> Result, BuildError> { - self.inner.read().unwrap().build_with_fs_store(*node_entropy).map(Arc::new) + self.inner.read().expect("lock").build_with_fs_store(*node_entropy).map(Arc::new) } /// Builds a [`Node`] instance with a [VSS] backend and according to the options @@ -1118,7 +1281,7 @@ impl ArcedNodeBuilder { ) -> Result, BuildError> { self.inner .read() - .unwrap() + .expect("lock") .build_with_vss_store(*node_entropy, vss_url, store_id, fixed_headers) .map(Arc::new) } @@ -1151,7 +1314,7 @@ impl ArcedNodeBuilder { ) -> Result, BuildError> { self.inner .read() - .unwrap() + .expect("lock") .build_with_vss_store_and_lnurl_auth( *node_entropy, vss_url, @@ -1180,7 +1343,7 @@ impl ArcedNodeBuilder { ) -> Result, BuildError> { self.inner .read() - .unwrap() + .expect("lock") .build_with_vss_store_and_fixed_headers(*node_entropy, vss_url, store_id, fixed_headers) .map(Arc::new) } @@ -1203,7 +1366,7 @@ impl ArcedNodeBuilder { let adapter = Arc::new(crate::ffi::VssHeaderProviderAdapter::new(header_provider)); self.inner .read() - .unwrap() + .expect("lock") .build_with_vss_store_and_header_provider(*node_entropy, vss_url, store_id, adapter) .map(Arc::new) } @@ -1214,7 +1377,7 @@ impl ArcedNodeBuilder { pub fn build_with_store( &self, node_entropy: Arc, kv_store: S, ) -> Result, BuildError> { - self.inner.read().unwrap().build_with_store(*node_entropy, kv_store).map(Arc::new) + self.inner.read().expect("lock").build_with_store(*node_entropy, kv_store).map(Arc::new) } } @@ -1263,9 +1426,19 @@ fn build_with_store_internal( let (payment_store_res, node_metris_res, pending_payment_store_res) = runtime.block_on(async move { tokio::join!( - read_payments(&*kv_store_ref, Arc::clone(&logger_ref)), + read_all_objects( + &*kv_store_ref, + PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + Arc::clone(&logger_ref), + ), read_node_metrics(&*kv_store_ref, Arc::clone(&logger_ref)), - read_pending_payments(&*kv_store_ref, Arc::clone(&logger_ref)) + read_all_objects( + &*kv_store_ref, + PENDING_PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, + PENDING_PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, + Arc::clone(&logger_ref), + ) ) }); @@ -1310,6 +1483,7 @@ fn build_with_store_internal( Arc::clone(&logger), Arc::clone(&node_metrics), ) + .map_err(|()| BuildError::ChainSourceSetupFailed)? }, Some(ChainDataSourceConfig::Electrum { server_url, sync_config }) => { let sync_config = sync_config.unwrap_or(ElectrumSyncConfig::default()); @@ -1379,6 +1553,7 @@ fn build_with_store_internal( Arc::clone(&logger), Arc::clone(&node_metrics), ) + .map_err(|()| BuildError::ChainSourceSetupFailed)? }, }; let chain_source = Arc::new(chain_source); @@ -1610,7 +1785,7 @@ fn build_with_store_internal( // Restore external pathfinding scores from cache if possible. match external_scores_res { Ok(external_scores) => { - scorer.lock().unwrap().merge(external_scores, cur_time); + scorer.lock().expect("lock").merge(external_scores, cur_time); log_trace!(logger, "External scores from cache merged successfully"); }, Err(e) => { @@ -1639,11 +1814,6 @@ fn build_with_store_internal( // If we act as an LSPS2 service, we allow forwarding to unannounced channels. user_config.accept_forwards_to_priv_channels = true; - - // If we act as an LSPS2 service, set the HTLC-value-in-flight to 100% of the channel value - // to ensure we can forward the initial payment. - user_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = - 100; } if let Some(role) = async_payments_role { @@ -1677,8 +1847,8 @@ fn build_with_store_internal( user_config, channel_monitor_references, ); - let (_hash, channel_manager) = - <(BlockHash, ChannelManager)>::read(&mut &*reader, read_args).map_err(|e| { + let (_best_block, channel_manager) = + <(BlockLocator, ChannelManager)>::read(&mut &*reader, read_args).map_err(|e| { log_error!(logger, "Failed to read channel manager from store: {}", e); BuildError::ReadFailed })?; @@ -1686,7 +1856,7 @@ fn build_with_store_internal( } else { // We're starting a fresh node. let best_block = - chain_tip_opt.unwrap_or_else(|| BestBlock::from_network(config.network)); + chain_tip_opt.unwrap_or_else(|| BlockLocator::from_network(config.network)); let chain_params = ChainParameters { network: config.network.into(), best_block }; channelmanager::ChannelManager::new( @@ -1717,7 +1887,56 @@ fn build_with_store_internal( })?; } - let hrn_resolver = Arc::new(LDKOnionMessageDNSSECHrnResolver::new(Arc::clone(&network_graph))); + let hrn_resolver; + let mut blip32_resolver = None; + + let runtime_handle = runtime.handle(); + + let om_resolver: Arc = match &config + .hrn_config + .resolution_config + { + HRNResolverConfig::Blip32 => { + let hrn_res = + Arc::new(LDKOnionMessageDNSSECHrnResolver::new(Arc::clone(&network_graph))); + hrn_resolver = HRNResolver::Onion(Arc::clone(&hrn_res)); + blip32_resolver = Some(Arc::clone(&hrn_res)); + + hrn_res as Arc + }, + HRNResolverConfig::Dns { dns_server_address, enable_hrn_resolution_service, .. } => { + let addr = dns_server_address + .to_socket_addrs() + .map_err(|_| BuildError::DNSResolverSetupFailed)? + .next() + .ok_or_else(|| { + log_error!(logger, "No valid address found for: {}", dns_server_address); + BuildError::DNSResolverSetupFailed + })?; + let hrn_res = Arc::new(DNSHrnResolver(addr)); + hrn_resolver = HRNResolver::Local(hrn_res); + + if *enable_hrn_resolution_service { + if let Err(_) = may_announce_channel(&config) { + log_error!( + logger, + "HRN resolution service enabled, but node is not announceable." + ); + return Err(BuildError::DNSResolverSetupFailed); + } + + Arc::new(OMDomainResolver::::with_runtime( + addr, + None, + Some(runtime_handle.clone()), + )) as Arc + } else { + // The user wants to use DNS to pay others, but NOT provide a service to others. + Arc::new(IgnoringMessageHandler {}) + as Arc + } + }, + }; // Initialize the PeerManager let onion_messenger: Arc = @@ -1730,7 +1949,7 @@ fn build_with_store_internal( message_router, Arc::clone(&channel_manager), Arc::clone(&channel_manager), - Arc::clone(&hrn_resolver), + Arc::clone(&om_resolver), IgnoringMessageHandler {}, )) } else { @@ -1742,7 +1961,7 @@ fn build_with_store_internal( message_router, Arc::clone(&channel_manager), Arc::clone(&channel_manager), - Arc::clone(&hrn_resolver), + Arc::clone(&om_resolver), IgnoringMessageHandler {}, )) }; @@ -1761,21 +1980,11 @@ fn build_with_store_internal( Arc::clone(&logger), )); - // Reset the RGS sync timestamp in case we somehow switch gossip sources - { - let mut locked_node_metrics = node_metrics.write().unwrap(); - locked_node_metrics.latest_rgs_snapshot_timestamp = None; - write_node_metrics(&*locked_node_metrics, &*kv_store, Arc::clone(&logger)) - .map_err(|e| { - log_error!(logger, "Failed writing to store: {}", e); - BuildError::WriteFailed - })?; - } p2p_source }, GossipSourceConfig::RapidGossipSync(rgs_server) => { let latest_sync_timestamp = - node_metrics.read().unwrap().latest_rgs_snapshot_timestamp.unwrap_or(0); + network_graph.get_last_rapid_gossip_sync_timestamp().unwrap_or(0); Arc::new(GossipSource::new_rgs( rgs_server.clone(), latest_sync_timestamp, @@ -1873,12 +2082,14 @@ fn build_with_store_internal( Arc::clone(&keys_manager), )); - let peer_manager_clone = Arc::downgrade(&peer_manager); - hrn_resolver.register_post_queue_action(Box::new(move || { - if let Some(upgraded_pointer) = peer_manager_clone.upgrade() { - upgraded_pointer.process_events(); - } - })); + if let Some(res) = blip32_resolver { + let pm_weak = Arc::downgrade(&peer_manager); + res.register_post_queue_action(Box::new(move || { + if let Some(upgraded_pm) = pm_weak.upgrade() { + upgraded_pm.process_events(); + } + })); + } liquidity_source.as_ref().map(|l| l.set_peer_manager(Arc::downgrade(&peer_manager))); diff --git a/src/chain/bitcoind.rs b/src/chain/bitcoind.rs index 26924d8af..7ece757ae 100644 --- a/src/chain/bitcoind.rs +++ b/src/chain/bitcoind.rs @@ -5,7 +5,7 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -use std::collections::{HashMap, VecDeque}; +use std::collections::HashMap; use std::fmt; use std::future::Future; use std::sync::atomic::{AtomicU64, Ordering}; @@ -16,7 +16,7 @@ use base64::prelude::BASE64_STANDARD; use base64::Engine; use bitcoin::{BlockHash, FeeRate, Network, OutPoint, Transaction, Txid}; use lightning::chain::chaininterface::ConfirmationTarget as LdkConfirmationTarget; -use lightning::chain::{BestBlock, Listen}; +use lightning::chain::{BestBlock as BlockLocator, Listen}; use lightning::util::ser::Writeable; use lightning_block_sync::gossip::UtxoSource; use lightning_block_sync::http::{HttpClientError, JsonResponse}; @@ -25,7 +25,7 @@ use lightning_block_sync::poll::{ChainPoller, ChainTip, ValidatedBlockHeader}; use lightning_block_sync::rest::RestClient; use lightning_block_sync::rpc::{RpcClient, RpcClientError}; use lightning_block_sync::{ - BlockData, BlockHeaderData, BlockSource, BlockSourceError, BlockSourceErrorKind, Cache, + BlockData, BlockHeaderData, BlockSource, BlockSourceError, BlockSourceErrorKind, HeaderCache, SpvClient, }; use serde::Serialize; @@ -39,7 +39,7 @@ use crate::fee_estimator::{ apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, ConfirmationTarget, OnchainFeeEstimator, }; -use crate::io::utils::write_node_metrics; +use crate::io::utils::update_and_persist_node_metrics; use crate::logger::{log_bytes, log_debug, log_error, log_info, log_trace, LdkLogger, Logger}; use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::{Error, NodeMetrics}; @@ -49,7 +49,6 @@ const CHAIN_POLLING_TIMEOUT_SECS: u64 = 10; pub(super) struct BitcoindChainSource { api_client: Arc, - header_cache: tokio::sync::Mutex, latest_chain_tip: RwLock>, wallet_polling_status: Mutex, fee_estimator: Arc, @@ -72,12 +71,10 @@ impl BitcoindChainSource { rpc_password.clone(), )); - let header_cache = tokio::sync::Mutex::new(BoundedHeaderCache::new()); let latest_chain_tip = RwLock::new(None); let wallet_polling_status = Mutex::new(WalletSyncStatus::Completed); Self { api_client, - header_cache, latest_chain_tip, wallet_polling_status, fee_estimator, @@ -103,13 +100,11 @@ impl BitcoindChainSource { rpc_password, )); - let header_cache = tokio::sync::Mutex::new(BoundedHeaderCache::new()); let latest_chain_tip = RwLock::new(None); let wallet_polling_status = Mutex::new(WalletSyncStatus::Completed); Self { api_client, - header_cache, latest_chain_tip, wallet_polling_status, fee_estimator, @@ -132,7 +127,7 @@ impl BitcoindChainSource { // First register for the wallet polling status to make sure `Node::sync_wallets` calls // wait on the result before proceeding. { - let mut status_lock = self.wallet_polling_status.lock().unwrap(); + let mut status_lock = self.wallet_polling_status.lock().expect("lock"); if status_lock.register_or_subscribe_pending_sync().is_some() { debug_assert!(false, "Sync already in progress. This should never happen."); } @@ -153,14 +148,14 @@ impl BitcoindChainSource { return; } - let channel_manager_best_block_hash = channel_manager.current_best_block().block_hash; - let sweeper_best_block_hash = output_sweeper.current_best_block().block_hash; - let onchain_wallet_best_block_hash = onchain_wallet.current_best_block().block_hash; + let onchain_wallet_best_block = onchain_wallet.current_best_block(); + let channel_manager_best_block = channel_manager.current_best_block(); + let sweeper_best_block = output_sweeper.current_best_block(); let mut chain_listeners = vec![ - (onchain_wallet_best_block_hash, &*onchain_wallet as &(dyn Listen + Send + Sync)), - (channel_manager_best_block_hash, &*channel_manager as &(dyn Listen + Send + Sync)), - (sweeper_best_block_hash, &*output_sweeper as &(dyn Listen + Send + Sync)), + (onchain_wallet_best_block, &*onchain_wallet as &(dyn Listen + Send + Sync)), + (channel_manager_best_block, &*channel_manager as &(dyn Listen + Send + Sync)), + (sweeper_best_block, &*output_sweeper as &(dyn Listen + Send + Sync)), ]; // TODO: Eventually we might want to see if we can synchronize `ChannelMonitor`s @@ -168,49 +163,50 @@ impl BitcoindChainSource { // trivial as we load them on initialization (in the `Builder`) and only gain // network access during `start`. For now, we just make sure we get the worst known // block hash and sychronize them via `ChainMonitor`. - if let Some(worst_channel_monitor_block_hash) = chain_monitor + if let Some(worst_channel_monitor_best_block) = chain_monitor .list_monitors() .iter() .flat_map(|channel_id| chain_monitor.get_monitor(*channel_id)) .map(|m| m.current_best_block()) .min_by_key(|b| b.height) - .map(|b| b.block_hash) { chain_listeners.push(( - worst_channel_monitor_block_hash, + worst_channel_monitor_best_block, &*chain_monitor as &(dyn Listen + Send + Sync), )); } - let mut locked_header_cache = self.header_cache.lock().await; let now = SystemTime::now(); match synchronize_listeners( self.api_client.as_ref(), self.config.network, - &mut *locked_header_cache, chain_listeners.clone(), ) .await { - Ok(chain_tip) => { + Ok((_header_cache, chain_tip)) => { { + let elapsed_ms = now.elapsed().map(|d| d.as_millis()).unwrap_or(0); log_info!( self.logger, "Finished synchronizing listeners in {}ms", - now.elapsed().unwrap().as_millis() + elapsed_ms, ); - *self.latest_chain_tip.write().unwrap() = Some(chain_tip); + *self.latest_chain_tip.write().expect("lock") = Some(chain_tip); let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_lightning_wallet_sync_timestamp = - unix_time_secs_opt; - locked_node_metrics.latest_onchain_wallet_sync_timestamp = - unix_time_secs_opt; - write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger) - .unwrap_or_else(|e| { - log_error!(self.logger, "Failed to persist node metrics: {}", e); - }); + update_and_persist_node_metrics( + &self.node_metrics, + &*self.kv_store, + &*self.logger, + |m| { + m.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; + m.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; + }, + ) + .unwrap_or_else(|e| { + log_error!(self.logger, "Failed to persist node metrics: {}", e); + }); } break; }, @@ -262,7 +258,7 @@ impl BitcoindChainSource { } // Now propagate the initial result to unblock waiting subscribers. - self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(Ok(())); + self.wallet_polling_status.lock().expect("lock").propagate_result_to_subscribers(Ok(())); let mut chain_polling_interval = tokio::time::interval(Duration::from_secs(CHAIN_POLLING_INTERVAL_SECS)); @@ -329,7 +325,7 @@ impl BitcoindChainSource { } } - pub(super) async fn poll_best_block(&self) -> Result { + pub(super) async fn poll_best_block(&self) -> Result { self.poll_chain_tip().await.map(|tip| tip.to_best_block()) } @@ -346,7 +342,7 @@ impl BitcoindChainSource { match validate_res { Ok(tip) => { - *self.latest_chain_tip.write().unwrap() = Some(tip); + *self.latest_chain_tip.write().expect("lock") = Some(tip); Ok(tip) }, Err(e) => { @@ -361,7 +357,7 @@ impl BitcoindChainSource { chain_monitor: Arc, output_sweeper: Arc, ) -> Result<(), Error> { let receiver_res = { - let mut status_lock = self.wallet_polling_status.lock().unwrap(); + let mut status_lock = self.wallet_polling_status.lock().expect("lock"); status_lock.register_or_subscribe_pending_sync() }; @@ -383,7 +379,7 @@ impl BitcoindChainSource { ) .await; - self.wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); + self.wallet_polling_status.lock().expect("lock").propagate_result_to_subscribers(res); res } @@ -392,11 +388,10 @@ impl BitcoindChainSource { &self, onchain_wallet: Arc, channel_manager: Arc, chain_monitor: Arc, output_sweeper: Arc, ) -> Result<(), Error> { - let latest_chain_tip_opt = self.latest_chain_tip.read().unwrap().clone(); + let latest_chain_tip_opt = self.latest_chain_tip.read().expect("lock").clone(); let chain_tip = if let Some(tip) = latest_chain_tip_opt { tip } else { self.poll_chain_tip().await? }; - let mut locked_header_cache = self.header_cache.lock().await; let chain_poller = ChainPoller::new(Arc::clone(&self.api_client), self.config.network); let chain_listener = ChainListener { onchain_wallet: Arc::clone(&onchain_wallet), @@ -405,17 +400,14 @@ impl BitcoindChainSource { output_sweeper, }; let mut spv_client = - SpvClient::new(chain_tip, chain_poller, &mut *locked_header_cache, &chain_listener); + SpvClient::new(chain_tip, chain_poller, HeaderCache::new(), &chain_listener); let now = SystemTime::now(); match spv_client.poll_best_tip().await { Ok((ChainTip::Better(tip), true)) => { - log_trace!( - self.logger, - "Finished polling best tip in {}ms", - now.elapsed().unwrap().as_millis() - ); - *self.latest_chain_tip.write().unwrap() = Some(tip); + let elapsed_ms = now.elapsed().map(|d| d.as_millis()).unwrap_or(0); + log_trace!(self.logger, "Finished polling best tip in {}ms", elapsed_ms); + *self.latest_chain_tip.write().expect("lock") = Some(tip); }, Ok(_) => {}, Err(e) => { @@ -434,12 +426,13 @@ impl BitcoindChainSource { .await { Ok((unconfirmed_txs, evicted_txids)) => { + let elapsed_ms = now.elapsed().map(|d| d.as_millis()).unwrap_or(0); log_trace!( self.logger, "Finished polling mempool of size {} and {} evicted transactions in {}ms", unconfirmed_txs.len(), evicted_txids.len(), - now.elapsed().unwrap().as_millis() + elapsed_ms, ); onchain_wallet.apply_mempool_txs(unconfirmed_txs, evicted_txids).unwrap_or_else( |e| { @@ -455,11 +448,10 @@ impl BitcoindChainSource { let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; - locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; - - write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; + update_and_persist_node_metrics(&self.node_metrics, &*self.kv_store, &*self.logger, |m| { + m.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; + m.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; + })?; Ok(()) } @@ -569,11 +561,9 @@ impl BitcoindChainSource { let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; - write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; - } + update_and_persist_node_metrics(&self.node_metrics, &*self.kv_store, &*self.logger, |m| { + m.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt + })?; Ok(()) } @@ -1351,46 +1341,6 @@ pub(crate) enum FeeRateEstimationMode { Conservative, } -const MAX_HEADER_CACHE_ENTRIES: usize = 100; - -pub(crate) struct BoundedHeaderCache { - header_map: HashMap, - recently_seen: VecDeque, -} - -impl BoundedHeaderCache { - pub(crate) fn new() -> Self { - let header_map = HashMap::new(); - let recently_seen = VecDeque::new(); - Self { header_map, recently_seen } - } -} - -impl Cache for BoundedHeaderCache { - fn look_up(&self, block_hash: &BlockHash) -> Option<&ValidatedBlockHeader> { - self.header_map.get(block_hash) - } - - fn block_connected(&mut self, block_hash: BlockHash, block_header: ValidatedBlockHeader) { - self.recently_seen.push_back(block_hash); - self.header_map.insert(block_hash, block_header); - - if self.header_map.len() >= MAX_HEADER_CACHE_ENTRIES { - // Keep dropping old entries until we've actually removed a header entry. - while let Some(oldest_entry) = self.recently_seen.pop_front() { - if self.header_map.remove(&oldest_entry).is_some() { - break; - } - } - } - } - - fn block_disconnected(&mut self, block_hash: &BlockHash) -> Option { - self.recently_seen.retain(|e| e != block_hash); - self.header_map.remove(block_hash) - } -} - pub(crate) struct ChainListener { pub(crate) onchain_wallet: Arc, pub(crate) channel_manager: Arc, diff --git a/src/chain/electrum.rs b/src/chain/electrum.rs index 7b08c3845..c62cbb526 100644 --- a/src/chain/electrum.rs +++ b/src/chain/electrum.rs @@ -30,7 +30,7 @@ use crate::fee_estimator::{ apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, ConfirmationTarget, OnchainFeeEstimator, }; -use crate::io::utils::write_node_metrics; +use crate::io::utils::update_and_persist_node_metrics; use crate::logger::{log_bytes, log_debug, log_error, log_trace, LdkLogger, Logger}; use crate::runtime::Runtime; use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; @@ -76,7 +76,7 @@ impl ElectrumChainSource { } pub(super) fn start(&self, runtime: Arc) -> Result<(), Error> { - self.electrum_runtime_status.write().unwrap().start( + self.electrum_runtime_status.write().expect("lock").start( self.server_url.clone(), self.sync_config.clone(), Arc::clone(&runtime), @@ -86,14 +86,14 @@ impl ElectrumChainSource { } pub(super) fn stop(&self) { - self.electrum_runtime_status.write().unwrap().stop(); + self.electrum_runtime_status.write().expect("lock").stop(); } pub(crate) async fn sync_onchain_wallet( &self, onchain_wallet: Arc, ) -> Result<(), Error> { let receiver_res = { - let mut status_lock = self.onchain_wallet_sync_status.lock().unwrap(); + let mut status_lock = self.onchain_wallet_sync_status.lock().expect("lock"); status_lock.register_or_subscribe_pending_sync() }; if let Some(mut sync_receiver) = receiver_res { @@ -107,26 +107,27 @@ impl ElectrumChainSource { let res = self.sync_onchain_wallet_inner(onchain_wallet).await; - self.onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + self.onchain_wallet_sync_status.lock().expect("lock").propagate_result_to_subscribers(res); res } async fn sync_onchain_wallet_inner(&self, onchain_wallet: Arc) -> Result<(), Error> { - let electrum_client: Arc = - if let Some(client) = self.electrum_runtime_status.read().unwrap().client().as_ref() { - Arc::clone(client) - } else { - debug_assert!( - false, - "We should have started the chain source before syncing the onchain wallet" - ); - return Err(Error::FeerateEstimationUpdateFailed); - }; + let electrum_client: Arc = if let Some(client) = + self.electrum_runtime_status.read().expect("lock").client().as_ref() + { + Arc::clone(client) + } else { + debug_assert!( + false, + "We should have started the chain source before syncing the onchain wallet" + ); + return Err(Error::FeerateEstimationUpdateFailed); + }; // If this is our first sync, do a full scan with the configured gap limit. // Otherwise just do an incremental sync. let incremental_sync = - self.node_metrics.read().unwrap().latest_onchain_wallet_sync_timestamp.is_some(); + self.node_metrics.read().expect("lock").latest_onchain_wallet_sync_timestamp.is_some(); let apply_wallet_update = |update_res: Result, now: Instant| match update_res { @@ -140,16 +141,12 @@ impl ElectrumChainSource { ); let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_onchain_wallet_sync_timestamp = - unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - &*self.kv_store, - &*self.logger, - )?; - } + update_and_persist_node_metrics( + &self.node_metrics, + &*self.kv_store, + &*self.logger, + |m| m.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt, + )?; Ok(()) }, Err(e) => Err(e), @@ -184,7 +181,7 @@ impl ElectrumChainSource { output_sweeper: Arc, ) -> Result<(), Error> { let receiver_res = { - let mut status_lock = self.lightning_wallet_sync_status.lock().unwrap(); + let mut status_lock = self.lightning_wallet_sync_status.lock().expect("lock"); status_lock.register_or_subscribe_pending_sync() }; if let Some(mut sync_receiver) = receiver_res { @@ -199,7 +196,10 @@ impl ElectrumChainSource { let res = self.sync_lightning_wallet_inner(channel_manager, chain_monitor, output_sweeper).await; - self.lightning_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + self.lightning_wallet_sync_status + .lock() + .expect("lock") + .propagate_result_to_subscribers(res); res } @@ -217,27 +217,29 @@ impl ElectrumChainSource { sync_sweeper as Arc, ]; - let electrum_client: Arc = - if let Some(client) = self.electrum_runtime_status.read().unwrap().client().as_ref() { - Arc::clone(client) - } else { - debug_assert!( - false, - "We should have started the chain source before syncing the lightning wallet" - ); - return Err(Error::TxSyncFailed); - }; + let electrum_client: Arc = if let Some(client) = + self.electrum_runtime_status.read().expect("lock").client().as_ref() + { + Arc::clone(client) + } else { + debug_assert!( + false, + "We should have started the chain source before syncing the lightning wallet" + ); + return Err(Error::TxSyncFailed); + }; let res = electrum_client.sync_confirmables(confirmables).await; if let Ok(_) = res { let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; - write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; - } + update_and_persist_node_metrics( + &self.node_metrics, + &*self.kv_store, + &*self.logger, + |m| m.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt, + )?; } res @@ -245,7 +247,7 @@ impl ElectrumChainSource { pub(crate) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { let electrum_client: Arc = if let Some(client) = - self.electrum_runtime_status.read().unwrap().client().as_ref() + self.electrum_runtime_status.read().expect("lock").client().as_ref() { Arc::clone(client) } else { @@ -266,23 +268,22 @@ impl ElectrumChainSource { let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; - write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; - } + update_and_persist_node_metrics(&self.node_metrics, &*self.kv_store, &*self.logger, |m| { + m.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt + })?; Ok(()) } pub(crate) async fn process_broadcast_package(&self, package: Vec) { - let electrum_client: Arc = - if let Some(client) = self.electrum_runtime_status.read().unwrap().client().as_ref() { - Arc::clone(client) - } else { - debug_assert!(false, "We should have started the chain source before broadcasting"); - return; - }; + let electrum_client: Arc = if let Some(client) = + self.electrum_runtime_status.read().expect("lock").client().as_ref() + { + Arc::clone(client) + } else { + debug_assert!(false, "We should have started the chain source before broadcasting"); + return; + }; for tx in package { electrum_client.broadcast(tx).await; @@ -292,10 +293,10 @@ impl ElectrumChainSource { impl Filter for ElectrumChainSource { fn register_tx(&self, txid: &Txid, script_pubkey: &Script) { - self.electrum_runtime_status.write().unwrap().register_tx(txid, script_pubkey) + self.electrum_runtime_status.write().expect("lock").register_tx(txid, script_pubkey) } fn register_output(&self, output: lightning::chain::WatchedOutput) { - self.electrum_runtime_status.write().unwrap().register_output(output) + self.electrum_runtime_status.write().expect("lock").register_output(output) } } diff --git a/src/chain/esplora.rs b/src/chain/esplora.rs index 245db72f6..5825a0984 100644 --- a/src/chain/esplora.rs +++ b/src/chain/esplora.rs @@ -22,7 +22,7 @@ use crate::fee_estimator::{ apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, OnchainFeeEstimator, }; -use crate::io::utils::write_node_metrics; +use crate::io::utils::update_and_persist_node_metrics; use crate::logger::{log_bytes, log_debug, log_error, log_trace, LdkLogger, Logger}; use crate::types::{ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; use crate::{Error, NodeMetrics}; @@ -45,7 +45,7 @@ impl EsploraChainSource { server_url: String, headers: HashMap, sync_config: EsploraSyncConfig, fee_estimator: Arc, kv_store: Arc, config: Arc, logger: Arc, node_metrics: Arc>, - ) -> Self { + ) -> Result { let mut client_builder = esplora_client::Builder::new(&server_url); client_builder = client_builder.timeout(sync_config.timeouts_config.per_request_timeout_secs as u64); @@ -54,13 +54,15 @@ impl EsploraChainSource { client_builder = client_builder.header(header_name, header_value); } - let esplora_client = client_builder.build_async().unwrap(); + let esplora_client = client_builder.build_async().map_err(|e| { + log_error!(logger, "Failed to build Esplora client: {}", e); + })?; let tx_sync = Arc::new(EsploraSyncClient::from_client(esplora_client.clone(), Arc::clone(&logger))); let onchain_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); let lightning_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); - Self { + Ok(Self { sync_config, esplora_client, onchain_wallet_sync_status, @@ -71,14 +73,14 @@ impl EsploraChainSource { config, logger, node_metrics, - } + }) } pub(super) async fn sync_onchain_wallet( &self, onchain_wallet: Arc, ) -> Result<(), Error> { let receiver_res = { - let mut status_lock = self.onchain_wallet_sync_status.lock().unwrap(); + let mut status_lock = self.onchain_wallet_sync_status.lock().expect("lock"); status_lock.register_or_subscribe_pending_sync() }; if let Some(mut sync_receiver) = receiver_res { @@ -92,7 +94,7 @@ impl EsploraChainSource { let res = self.sync_onchain_wallet_inner(onchain_wallet).await; - self.onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + self.onchain_wallet_sync_status.lock().expect("lock").propagate_result_to_subscribers(res); res } @@ -101,7 +103,7 @@ impl EsploraChainSource { // If this is our first sync, do a full scan with the configured gap limit. // Otherwise just do an incremental sync. let incremental_sync = - self.node_metrics.read().unwrap().latest_onchain_wallet_sync_timestamp.is_some(); + self.node_metrics.read().expect("lock").latest_onchain_wallet_sync_timestamp.is_some(); macro_rules! get_and_apply_wallet_update { ($sync_future: expr) => {{ @@ -120,16 +122,13 @@ impl EsploraChainSource { .duration_since(UNIX_EPOCH) .ok() .map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; - write_node_metrics( - &*locked_node_metrics, - &*self.kv_store, - &*self.logger - )?; - } - Ok(()) + update_and_persist_node_metrics( + &self.node_metrics, + &*self.kv_store, + &*self.logger, + |m| m.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt, + )?; + Ok(()) }, Err(e) => Err(e), }, @@ -207,7 +206,7 @@ impl EsploraChainSource { output_sweeper: Arc, ) -> Result<(), Error> { let receiver_res = { - let mut status_lock = self.lightning_wallet_sync_status.lock().unwrap(); + let mut status_lock = self.lightning_wallet_sync_status.lock().expect("lock"); status_lock.register_or_subscribe_pending_sync() }; if let Some(mut sync_receiver) = receiver_res { @@ -222,7 +221,10 @@ impl EsploraChainSource { let res = self.sync_lightning_wallet_inner(channel_manager, chain_monitor, output_sweeper).await; - self.lightning_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + self.lightning_wallet_sync_status + .lock() + .expect("lock") + .propagate_result_to_subscribers(res); res } @@ -258,12 +260,12 @@ impl EsploraChainSource { let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_lightning_wallet_sync_timestamp = - unix_time_secs_opt; - write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; - } + update_and_persist_node_metrics( + &self.node_metrics, + &*self.kv_store, + &*self.logger, + |m| m.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt, + )?; Ok(()) }, Err(e) => { @@ -343,11 +345,9 @@ impl EsploraChainSource { ); let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = self.node_metrics.write().unwrap(); - locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; - write_node_metrics(&*locked_node_metrics, &*self.kv_store, &*self.logger)?; - } + update_and_persist_node_metrics(&self.node_metrics, &*self.kv_store, &*self.logger, |m| { + m.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt + })?; Ok(()) } diff --git a/src/chain/mod.rs b/src/chain/mod.rs index 49c011a78..b70620b99 100644 --- a/src/chain/mod.rs +++ b/src/chain/mod.rs @@ -14,7 +14,7 @@ use std::sync::{Arc, Mutex, RwLock}; use std::time::Duration; use bitcoin::{Script, Txid}; -use lightning::chain::{BestBlock, Filter}; +use lightning::chain::{BestBlock as BlockLocator, Filter}; use crate::chain::bitcoind::{BitcoindChainSource, UtxoSourceClient}; use crate::chain::electrum::ElectrumChainSource; @@ -101,7 +101,7 @@ impl ChainSource { fee_estimator: Arc, tx_broadcaster: Arc, kv_store: Arc, config: Arc, logger: Arc, node_metrics: Arc>, - ) -> (Self, Option) { + ) -> Result<(Self, Option), ()> { let esplora_chain_source = EsploraChainSource::new( server_url, headers, @@ -111,10 +111,10 @@ impl ChainSource { config, Arc::clone(&logger), node_metrics, - ); + )?; let kind = ChainSourceKind::Esplora(esplora_chain_source); let registered_txids = Mutex::new(Vec::new()); - (Self { kind, registered_txids, tx_broadcaster, logger }, None) + Ok((Self { kind, registered_txids, tx_broadcaster, logger }, None)) } pub(crate) fn new_electrum( @@ -122,7 +122,7 @@ impl ChainSource { fee_estimator: Arc, tx_broadcaster: Arc, kv_store: Arc, config: Arc, logger: Arc, node_metrics: Arc>, - ) -> (Self, Option) { + ) -> (Self, Option) { let electrum_chain_source = ElectrumChainSource::new( server_url, sync_config, @@ -142,7 +142,7 @@ impl ChainSource { fee_estimator: Arc, tx_broadcaster: Arc, kv_store: Arc, config: Arc, logger: Arc, node_metrics: Arc>, - ) -> (Self, Option) { + ) -> (Self, Option) { let bitcoind_chain_source = BitcoindChainSource::new_rpc( rpc_host, rpc_port, @@ -165,7 +165,7 @@ impl ChainSource { fee_estimator: Arc, tx_broadcaster: Arc, kv_store: Arc, config: Arc, rest_client_config: BitcoindRestClientConfig, logger: Arc, node_metrics: Arc>, - ) -> (Self, Option) { + ) -> (Self, Option) { let bitcoind_chain_source = BitcoindChainSource::new_rest( rpc_host, rpc_port, @@ -215,7 +215,7 @@ impl ChainSource { } pub(crate) fn registered_txids(&self) -> Vec { - self.registered_txids.lock().unwrap().clone() + self.registered_txids.lock().expect("lock").clone() } pub(crate) fn is_transaction_based(&self) -> bool { @@ -472,7 +472,7 @@ impl ChainSource { impl Filter for ChainSource { fn register_tx(&self, txid: &Txid, script_pubkey: &Script) { - self.registered_txids.lock().unwrap().push(*txid); + self.registered_txids.lock().expect("lock").push(*txid); match &self.kind { ChainSourceKind::Esplora(esplora_chain_source) => { esplora_chain_source.register_tx(txid, script_pubkey) diff --git a/src/config.rs b/src/config.rs index 71e4d2314..014d6216a 100644 --- a/src/config.rs +++ b/src/config.rs @@ -8,6 +8,7 @@ //! Objects for configuring the node. use std::fmt; +use std::str::FromStr; use std::time::Duration; use bitcoin::secp256k1::PublicKey; @@ -117,17 +118,18 @@ pub(crate) const LNURL_AUTH_TIMEOUT_SECS: u64 = 15; /// ### Defaults /// /// | Parameter | Value | -/// |----------------------------------------|--------------------| -/// | `storage_dir_path` | /tmp/ldk_node/ | -/// | `network` | Bitcoin | -/// | `listening_addresses` | None | -/// | `announcement_addresses` | None | -/// | `node_alias` | None | -/// | `trusted_peers_0conf` | [] | -/// | `probing_liquidity_limit_multiplier` | 3 | -/// | `anchor_channels_config` | Some(..) | -/// | `route_parameters` | None | -/// | `tor_config` | None | +/// |----------------------------------------|--------------------------------------| +/// | `storage_dir_path` | /tmp/ldk_node/ | +/// | `network` | Bitcoin | +/// | `listening_addresses` | None | +/// | `announcement_addresses` | None | +/// | `node_alias` | None | +/// | `trusted_peers_0conf` | [] | +/// | `probing_liquidity_limit_multiplier` | 3 | +/// | `anchor_channels_config` | Some(..) | +/// | `route_parameters` | None | +/// | `tor_config` | None | +/// | `hrn_config` | HumanReadableNamesConfig::default() | /// /// See [`AnchorChannelsConfig`] and [`RouteParametersConfig`] for more information regarding their /// respective default values. @@ -199,6 +201,10 @@ pub struct Config { /// /// **Note**: If unset, connecting to peer OnionV3 addresses will fail. pub tor_config: Option, + /// Configuration options for Human-Readable Names ([BIP 353]). + /// + /// [BIP 353]: https://github.com/bitcoin/bips/blob/master/bip-0353.mediawiki + pub hrn_config: HumanReadableNamesConfig, } impl Default for Config { @@ -214,6 +220,62 @@ impl Default for Config { tor_config: None, route_parameters: None, node_alias: None, + hrn_config: HumanReadableNamesConfig::default(), + } + } +} + +/// Configuration options for how our node resolves Human-Readable Names (BIP 353). +/// +/// [BIP 353]: https://github.com/bitcoin/bips/blob/master/bip-0353.mediawiki +#[derive(Debug, Clone)] +#[cfg_attr(feature = "uniffi", derive(uniffi::Enum))] +pub enum HRNResolverConfig { + /// Use [bLIP-32] to ask other nodes to resolve names for us. + /// + /// [bLIP-32]: https://github.com/lightning/blips/blob/master/blip-0032.md + Blip32, + /// Resolve names locally using a specific DNS server. + Dns { + /// The IP and port of the DNS server. + /// + /// **Default:** `8.8.8.8:53` (Google Public DNS) + dns_server_address: SocketAddress, + /// If set to true, this allows others to use our node for HRN resolutions. + /// + /// **Default:** `false` + /// + /// **Note:** Enabling `enable_hrn_resolution_service` allows your node to act + /// as a resolver for the rest of the network. For this to work, your node must + /// be announceable (publicly visible in the network graph) so that other nodes + /// can route resolution requests to you via Onion Messages. This does not affect + /// your node's ability to resolve names for its own outgoing payments. + enable_hrn_resolution_service: bool, + }, +} + +/// Configuration options for Human-Readable Names ([BIP 353]). +/// +/// [BIP 353]: https://github.com/bitcoin/bips/blob/master/bip-0353.mediawiki +#[derive(Debug, Clone)] +#[cfg_attr(feature = "uniffi", derive(uniffi::Record))] +pub struct HumanReadableNamesConfig { + /// This sets how our node resolves names when we want to send a payment. + /// + /// By default, this uses the `Dns` variant with the following settings: + /// * **DNS Server**: `8.8.8.8:53` (Google Public DNS) + /// * **Resolution Service**: Disabled (`false`) + pub resolution_config: HRNResolverConfig, +} + +impl Default for HumanReadableNamesConfig { + fn default() -> Self { + HumanReadableNamesConfig { + resolution_config: HRNResolverConfig::Dns { + dns_server_address: SocketAddress::from_str("8.8.8.8:53") + .expect("Socket address conversion failed."), + enable_hrn_resolution_service: false, + }, } } } diff --git a/src/connection.rs b/src/connection.rs index a1d24e36d..b8946ffe3 100644 --- a/src/connection.rs +++ b/src/connection.rs @@ -238,7 +238,7 @@ where fn register_or_subscribe_pending_connection( &self, node_id: &PublicKey, ) -> Option>> { - let mut pending_connections_lock = self.pending_connections.lock().unwrap(); + let mut pending_connections_lock = self.pending_connections.lock().expect("lock"); match pending_connections_lock.entry(*node_id) { hash_map::Entry::Occupied(mut entry) => { let (tx, rx) = tokio::sync::oneshot::channel(); @@ -254,7 +254,7 @@ where fn propagate_result_to_subscribers(&self, node_id: &PublicKey, res: Result<(), Error>) { // Send the result to any other tasks that might be waiting on it by now. - let mut pending_connections_lock = self.pending_connections.lock().unwrap(); + let mut pending_connections_lock = self.pending_connections.lock().expect("lock"); if let Some(connection_ready_senders) = pending_connections_lock.remove(node_id) { for sender in connection_ready_senders { let _ = sender.send(res).map_err(|e| { diff --git a/src/data_store.rs b/src/data_store.rs index ac5c78fb7..f80ec0891 100644 --- a/src/data_store.rs +++ b/src/data_store.rs @@ -65,7 +65,7 @@ where } pub(crate) fn insert(&self, object: SO) -> Result { - let mut locked_objects = self.objects.lock().unwrap(); + let mut locked_objects = self.objects.lock().expect("lock"); self.persist(&object)?; let updated = locked_objects.insert(object.id(), object).is_some(); @@ -73,7 +73,7 @@ where } pub(crate) fn insert_or_update(&self, object: SO) -> Result { - let mut locked_objects = self.objects.lock().unwrap(); + let mut locked_objects = self.objects.lock().expect("lock"); let updated; match locked_objects.entry(object.id()) { @@ -95,7 +95,7 @@ where } pub(crate) fn remove(&self, id: &SO::Id) -> Result<(), Error> { - let removed = self.objects.lock().unwrap().remove(id).is_some(); + let removed = self.objects.lock().expect("lock").remove(id).is_some(); if removed { let store_key = id.encode_to_hex_str(); KVStoreSync::remove( @@ -121,11 +121,11 @@ where } pub(crate) fn get(&self, id: &SO::Id) -> Option { - self.objects.lock().unwrap().get(id).cloned() + self.objects.lock().expect("lock").get(id).cloned() } pub(crate) fn update(&self, update: SO::Update) -> Result { - let mut locked_objects = self.objects.lock().unwrap(); + let mut locked_objects = self.objects.lock().expect("lock"); if let Some(object) = locked_objects.get_mut(&update.id()) { let updated = object.update(update); @@ -141,7 +141,7 @@ where } pub(crate) fn list_filter bool>(&self, f: F) -> Vec { - self.objects.lock().unwrap().values().filter(f).cloned().collect::>() + self.objects.lock().expect("lock").values().filter(f).cloned().collect::>() } fn persist(&self, object: &SO) -> Result<(), Error> { @@ -169,7 +169,7 @@ where } pub(crate) fn contains_key(&self, id: &SO::Id) -> bool { - self.objects.lock().unwrap().contains_key(id) + self.objects.lock().expect("lock").contains_key(id) } } diff --git a/src/event.rs b/src/event.rs index f06d701bc..65fe683ec 100644 --- a/src/event.rs +++ b/src/event.rs @@ -18,20 +18,18 @@ use lightning::events::bump_transaction::BumpTransactionEvent; #[cfg(not(feature = "uniffi"))] use lightning::events::PaidBolt12Invoice; use lightning::events::{ - ClosureReason, Event as LdkEvent, FundingInfo, PaymentFailureReason, PaymentPurpose, - ReplayEvent, + ClosureReason, Event as LdkEvent, FundingInfo, HTLCLocator as LdkHtlcLocator, + PaymentFailureReason, PaymentPurpose, ReplayEvent, }; -use lightning::impl_writeable_tlv_based_enum; -use lightning::ln::channelmanager::PaymentId; +use lightning::ln::channelmanager::{PaymentId, TrustedChannelFeatures}; use lightning::ln::types::ChannelId; use lightning::routing::gossip::NodeId; use lightning::sign::EntropySource; -use lightning::util::config::{ - ChannelConfigOverrides, ChannelConfigUpdate, ChannelHandshakeConfigUpdate, -}; +use lightning::util::config::{ChannelConfigOverrides, ChannelConfigUpdate}; use lightning::util::errors::APIError; use lightning::util::persist::KVStore; use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer}; +use lightning::{impl_writeable_tlv_based, impl_writeable_tlv_based_enum}; use lightning_liquidity::lsps2::utils::compute_opening_fee; use lightning_types::payment::{PaymentHash, PaymentPreimage}; @@ -61,6 +59,40 @@ use crate::{ UserChannelId, }; +/// Identifies the channel and counterparty that a HTLC was processed with. +#[derive(Debug, Clone, PartialEq, Eq)] +#[cfg_attr(feature = "uniffi", derive(uniffi::Record))] +pub struct HTLCLocator { + /// The channel that the HTLC was sent or received on. + pub channel_id: ChannelId, + /// The `user_channel_id` for the channel. + /// + /// Will only be `None` for events serialized with LDK Node v0.3.0 or prior, or if the + /// payment was settled via an on-chain transaction. + pub user_channel_id: Option, + /// The node id of the counterparty for this HTLC. + /// + /// This is only `None` for HTLCs received prior to LDK Node v0.5 or for events serialized by + /// versions prior to v0.5. + pub node_id: Option, +} + +impl_writeable_tlv_based!(HTLCLocator, { + (1, channel_id, required), + (3, user_channel_id, option), + (5, node_id, option), +}); + +impl From for HTLCLocator { + fn from(value: LdkHtlcLocator) -> Self { + HTLCLocator { + channel_id: value.channel_id, + user_channel_id: value.user_channel_id.map(|u| UserChannelId(u)), + node_id: value.node_id, + } + } +} + /// An event emitted by [`Node`], which should be handled by the user. /// /// [`Node`]: [`crate::Node`] @@ -128,29 +160,14 @@ pub enum Event { }, /// A payment has been forwarded. PaymentForwarded { - /// The channel id of the incoming channel between the previous node and us. - prev_channel_id: ChannelId, - /// The channel id of the outgoing channel between the next node and us. - next_channel_id: ChannelId, - /// The `user_channel_id` of the incoming channel between the previous node and us. - /// - /// Will only be `None` for events serialized with LDK Node v0.3.0 or prior. - prev_user_channel_id: Option, - /// The `user_channel_id` of the outgoing channel between the next node and us. - /// - /// This will be `None` if the payment was settled via an on-chain transaction. See the - /// caveat described for the `total_fee_earned_msat` field. - next_user_channel_id: Option, - /// The node id of the previous node. - /// - /// This is only `None` for HTLCs received prior to LDK Node v0.5 or for events serialized by - /// versions prior to v0.5. - prev_node_id: Option, - /// The node id of the next node. - /// - /// This is only `None` for HTLCs received prior to LDK Node v0.5 or for events serialized by - /// versions prior to v0.5. - next_node_id: Option, + /// The set of incoming HTLCs that were forwarded to our node. Contains a single HTLC for + /// source-routed payments, and may contain multiple HTLCs when we acted as a trampoline + /// router. + prev_htlcs: Vec, + /// The set of outgoing HTLCs forwarded by our node. Contains a single HTLC for regular + /// source-routed payments, and may contain multiple HTLCs when we acted as a trampoline + /// router. + next_htlcs: Vec, /// The total fee, in milli-satoshis, which was earned as a result of the payment. /// /// Note that if we force-closed the channel over which we forwarded an HTLC while the HTLC @@ -323,16 +340,27 @@ impl_writeable_tlv_based_enum!(Event, (7, custom_records, optional_vec), }, (7, PaymentForwarded) => { - (0, prev_channel_id, required), - (1, prev_node_id, option), - (2, next_channel_id, required), - (3, next_node_id, option), - (4, prev_user_channel_id, option), - (6, next_user_channel_id, option), + // Legacy fields: read from old data, never written. + (0, legacy_prev_channel_id, (legacy, ChannelId, |_| Ok(()), |_: &Event| None::>)), + (1, legacy_prev_node_id, (legacy, PublicKey, |_| Ok(()), |_: &Event| None::>)), + (2, legacy_next_channel_id, (legacy, ChannelId, |_| Ok(()), |_: &Event| None::>)), + (3, legacy_next_node_id, (legacy, PublicKey, |_| Ok(()), |_: &Event| None::>)), + (4, legacy_prev_user_channel_id, (legacy, u128, |_| Ok(()), |_: &Event| None::>)), + (6, legacy_next_user_channel_id, (legacy, u128, |_| Ok(()), |_: &Event| None::>)), (8, total_fee_earned_msat, option), (10, skimmed_fee_msat, option), (12, claim_from_onchain_tx, required), (14, outbound_amount_forwarded_msat, option), + (15, prev_htlcs, (default_value_vec, vec![HTLCLocator { + channel_id: legacy_prev_channel_id.ok_or(lightning::ln::msgs::DecodeError::InvalidValue)?, + user_channel_id: legacy_prev_user_channel_id.map(UserChannelId), + node_id: legacy_prev_node_id, + }])), + (17, next_htlcs, (default_value_vec, vec![HTLCLocator { + channel_id: legacy_next_channel_id.ok_or(lightning::ln::msgs::DecodeError::InvalidValue)?, + user_channel_id: legacy_next_user_channel_id.map(UserChannelId), + node_id: legacy_next_node_id, + }])), }, (8, SplicePending) => { (1, channel_id, required), @@ -370,21 +398,21 @@ where pub(crate) async fn add_event(&self, event: Event) -> Result<(), Error> { let data = { - let mut locked_queue = self.queue.lock().unwrap(); + let mut locked_queue = self.queue.lock().expect("lock"); locked_queue.push_back(event); EventQueueSerWrapper(&locked_queue).encode() }; self.persist_queue(data).await?; - if let Some(waker) = self.waker.lock().unwrap().take() { + if let Some(waker) = self.waker.lock().expect("lock").take() { waker.wake(); } Ok(()) } pub(crate) fn next_event(&self) -> Option { - let locked_queue = self.queue.lock().unwrap(); + let locked_queue = self.queue.lock().expect("lock"); locked_queue.front().cloned() } @@ -394,14 +422,14 @@ where pub(crate) async fn event_handled(&self) -> Result<(), Error> { let data = { - let mut locked_queue = self.queue.lock().unwrap(); + let mut locked_queue = self.queue.lock().expect("lock"); locked_queue.pop_front(); EventQueueSerWrapper(&locked_queue).encode() }; self.persist_queue(data).await?; - if let Some(waker) = self.waker.lock().unwrap().take() { + if let Some(waker) = self.waker.lock().expect("lock").take() { waker.wake(); } Ok(()) @@ -485,10 +513,10 @@ impl Future for EventFuture { fn poll( self: core::pin::Pin<&mut Self>, cx: &mut core::task::Context<'_>, ) -> core::task::Poll { - if let Some(event) = self.event_queue.lock().unwrap().front() { + if let Some(event) = self.event_queue.lock().expect("lock").front() { Poll::Ready(event.clone()) } else { - *self.waker.lock().unwrap() = Some(cx.waker().clone()); + *self.waker.lock().expect("lock") = Some(cx.waker().clone()); Poll::Pending } } @@ -1091,11 +1119,14 @@ where }; self.payment_store.get(&payment_id).map(|payment| { + let amount_msat = payment.amount_msat.expect( + "outbound payments should record their amount before they can succeed", + ); log_info!( self.logger, "Successfully sent payment of {}msat{} from \ payment hash {:?} with preimage {:?}", - payment.amount_msat.unwrap(), + amount_msat, if let Some(fee) = fee_paid_msat { format!(" (fee {} msat)", fee) } else { @@ -1256,7 +1287,9 @@ where } let user_channel_id: u128 = u128::from_ne_bytes( - self.keys_manager.get_secure_random_bytes()[..16].try_into().unwrap(), + self.keys_manager.get_secure_random_bytes()[..16] + .try_into() + .expect("slice is exactly 16 bytes"), ); let allow_0conf = self.config.trusted_peers_0conf.contains(&counterparty_node_id); let mut channel_override_config = None; @@ -1268,27 +1301,32 @@ where if lsp_node_id == counterparty_node_id { // When we're an LSPS2 client, allow claiming underpaying HTLCs as the LSP will skim off some fee. We'll // check that they don't take too much before claiming. - // - // We also set maximum allowed inbound HTLC value in flight - // to 100%. We should eventually be able to set this on a per-channel basis, but for - // now we just bump the default for all channels. channel_override_config = Some(ChannelConfigOverrides { - handshake_overrides: Some(ChannelHandshakeConfigUpdate { - max_inbound_htlc_value_in_flight_percent_of_channel: Some(100), - ..Default::default() - }), update_overrides: Some(ChannelConfigUpdate { accept_underpaying_htlcs: Some(true), ..Default::default() }), + ..Default::default() }); + + // LSPS2 channels are unannounced; rely on LDK's default of 100% + // inbound HTLC value-in-flight so the LSP can forward the initial + // payment in full. + debug_assert_eq!( + self.channel_manager + .get_current_config() + .channel_handshake_config + .unannounced_channel_max_inbound_htlc_value_in_flight_percentage, + 100 + ); } } let res = if allow_0conf { - self.channel_manager.accept_inbound_channel_from_trusted_peer_0conf( + self.channel_manager.accept_inbound_channel_from_trusted_peer( &temporary_channel_id, &counterparty_node_id, user_channel_id, + TrustedChannelFeatures::ZeroConf, channel_override_config, ) } else { @@ -1395,32 +1433,29 @@ where } } - // We only allow multiple HTLCs in/out for trampoline forwards, which have not yet - // been fully implemented in LDK, so we do not lose any information by just - // reporting the first HTLC in each vec. - debug_assert_eq!(prev_htlcs.len(), 1, "unexpected number of prev_htlcs"); - debug_assert_eq!(next_htlcs.len(), 1, "unexpected number of next_htlcs"); - let prev_htlc = prev_htlcs - .first() - .expect("we expect at least one prev_htlc for PaymentForwarded"); - let next_htlc = next_htlcs - .first() - .expect("we expect at least one next_htlc for PaymentForwarded"); - + // We only expect multiple next_htlcs when we have a trampoline forward, and we do + // not support JIT channels in combination with trampoline. We're not at risk of + // double-reporting a skimmed fee when we have multiple next_htlcs because we + // expect our skimmed fee to be zero. + if skimmed_fee_msat.is_some() { + debug_assert_eq!( + next_htlcs.len(), + 1, + "unexpected skimmed fee for trampoline forward, fee may be double counted" + ); + } if let Some(liquidity_source) = self.liquidity_source.as_ref() { let skimmed_fee_msat = skimmed_fee_msat.unwrap_or(0); - liquidity_source - .handle_payment_forwarded(Some(next_htlc.channel_id), skimmed_fee_msat) - .await; + for next_htlc in next_htlcs.iter() { + liquidity_source + .handle_payment_forwarded(Some(next_htlc.channel_id), skimmed_fee_msat) + .await; + } } let event = Event::PaymentForwarded { - prev_channel_id: prev_htlc.channel_id, - next_channel_id: next_htlc.channel_id, - prev_user_channel_id: prev_htlc.user_channel_id.map(UserChannelId), - next_user_channel_id: next_htlc.user_channel_id.map(UserChannelId), - prev_node_id: prev_htlc.node_id, - next_node_id: next_htlc.node_id, + prev_htlcs: prev_htlcs.into_iter().map(HTLCLocator::from).collect(), + next_htlcs: next_htlcs.into_iter().map(HTLCLocator::from).collect(), total_fee_earned_msat, skimmed_fee_msat, claim_from_onchain_tx, @@ -1446,10 +1481,14 @@ where counterparty_node_id, ); + let former_temporary_channel_id = former_temporary_channel_id.expect( + "LDK Node has only ever persisted ChannelPending events from rust-lightning 0.0.115 or later", + ); + let event = Event::ChannelPending { channel_id, user_channel_id: UserChannelId(user_channel_id), - former_temporary_channel_id: former_temporary_channel_id.unwrap(), + former_temporary_channel_id, counterparty_node_id, funding_txo, }; diff --git a/src/fee_estimator.rs b/src/fee_estimator.rs index b787ecd33..34fe7b64c 100644 --- a/src/fee_estimator.rs +++ b/src/fee_estimator.rs @@ -48,7 +48,7 @@ impl OnchainFeeEstimator { pub(crate) fn set_fee_rate_cache( &self, fee_rate_cache_update: HashMap, ) -> bool { - let mut locked_fee_rate_cache = self.fee_rate_cache.write().unwrap(); + let mut locked_fee_rate_cache = self.fee_rate_cache.write().expect("lock"); if fee_rate_cache_update != *locked_fee_rate_cache { *locked_fee_rate_cache = fee_rate_cache_update; true @@ -60,7 +60,7 @@ impl OnchainFeeEstimator { impl FeeEstimator for OnchainFeeEstimator { fn estimate_fee_rate(&self, confirmation_target: ConfirmationTarget) -> FeeRate { - let locked_fee_rate_cache = self.fee_rate_cache.read().unwrap(); + let locked_fee_rate_cache = self.fee_rate_cache.read().expect("lock"); let fallback_sats_kwu = get_fallback_rate_for_target(confirmation_target); diff --git a/src/ffi/types.rs b/src/ffi/types.rs index 5a1420882..ad293bc3e 100644 --- a/src/ffi/types.rs +++ b/src/ffi/types.rs @@ -25,6 +25,7 @@ pub use bitcoin::{Address, BlockHash, Network, OutPoint, ScriptBuf, Txid}; pub use lightning::chain::channelmonitor::BalanceSource; use lightning::events::PaidBolt12Invoice as LdkPaidBolt12Invoice; pub use lightning::events::{ClosureReason, PaymentFailureReason}; +use lightning::ln::channel_state::ChannelShutdownState; use lightning::ln::channelmanager::PaymentId; use lightning::ln::msgs::DecodeError; pub use lightning::ln::types::ChannelId; @@ -917,7 +918,9 @@ uniffi::custom_type!(PaymentHash, String, { } }, lower: |obj| { - Sha256::from_slice(&obj.0).unwrap().to_string() + Sha256::from_slice(&obj.0) + .expect("PaymentHash should always contain exactly 32 bytes") + .to_string() }, }); @@ -1415,6 +1418,26 @@ uniffi::custom_type!(LSPSDateTime, String, { }, }); +/// The shutdown state of a channel as returned in [`ChannelDetails::channel_shutdown_state`]. +/// +/// [`ChannelDetails::channel_shutdown_state`]: crate::ChannelDetails::channel_shutdown_state +#[uniffi::remote(Enum)] +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub enum ChannelShutdownState { + /// Channel has not sent or received a shutdown message. + NotShuttingDown, + /// Local node has sent a shutdown message for this channel. + ShutdownInitiated, + /// Shutdown message exchanges have concluded and the channels are in the midst of + /// resolving all existing open HTLCs before closing can continue. + ResolvingHTLCs, + /// All HTLCs have been resolved, nodes are currently negotiating channel close onchain fee rates. + NegotiatingClosingFee, + /// We've successfully negotiated a closing_signed dance. At this point `ChannelManager` is about + /// to drop the channel. + ShutdownComplete, +} + /// The reason the channel was closed. See individual variants for more details. #[uniffi::remote(Enum)] #[derive(Clone, Debug, PartialEq, Eq)] diff --git a/src/io/mod.rs b/src/io/mod.rs index e080d39f7..bf6366c45 100644 --- a/src/io/mod.rs +++ b/src/io/mod.rs @@ -10,6 +10,7 @@ pub mod sqlite_store; #[cfg(test)] pub(crate) mod test_utils; +pub(crate) mod tier_store; pub(crate) mod utils; pub mod vss_store; diff --git a/src/io/sqlite_store/mod.rs b/src/io/sqlite_store/mod.rs index 94e8360fc..098765d0d 100644 --- a/src/io/sqlite_store/mod.rs +++ b/src/io/sqlite_store/mod.rs @@ -26,6 +26,8 @@ mod migrations; /// LDK Node's database file name. pub const SQLITE_DB_FILE_NAME: &str = "ldk_node_data.sqlite"; +/// LDK Node's backup database file name. +pub const SQLITE_BACKUP_DB_FILE_NAME: &str = "ldk_node_data_backup.sqlite"; /// LDK Node's table in which we store all data. pub const KV_TABLE_NAME: &str = "ldk_node_data"; @@ -288,7 +290,10 @@ impl SqliteStoreInner { })?; let sql = format!("SELECT user_version FROM pragma_user_version"); - let version_res: u16 = connection.query_row(&sql, [], |row| row.get(0)).unwrap(); + let version_res: u16 = connection.query_row(&sql, [], |row| row.get(0)).map_err(|e| { + let msg = format!("Failed to read PRAGMA user_version: {}", e); + io::Error::new(io::ErrorKind::Other, msg) + })?; if version_res == 0 { // New database, set our SCHEMA_USER_VERSION and continue @@ -364,7 +369,7 @@ impl SqliteStoreInner { } fn get_inner_lock_ref(&self, locking_key: String) -> Arc> { - let mut outer_lock = self.write_version_locks.lock().unwrap(); + let mut outer_lock = self.write_version_locks.lock().expect("lock"); Arc::clone(&outer_lock.entry(locking_key).or_default()) } @@ -373,7 +378,7 @@ impl SqliteStoreInner { ) -> io::Result> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "read")?; - let locked_conn = self.connection.lock().unwrap(); + let locked_conn = self.connection.lock().expect("lock"); let sql = format!("SELECT value FROM {} WHERE primary_namespace=:primary_namespace AND secondary_namespace=:secondary_namespace AND key=:key;", self.kv_table_name); @@ -423,7 +428,7 @@ impl SqliteStoreInner { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "write")?; self.execute_locked_write(inner_lock_ref, locking_key, version, || { - let locked_conn = self.connection.lock().unwrap(); + let locked_conn = self.connection.lock().expect("lock"); let sort_order = self.next_sort_order.fetch_add(1, Ordering::Relaxed); @@ -467,7 +472,7 @@ impl SqliteStoreInner { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "remove")?; self.execute_locked_write(inner_lock_ref, locking_key, version, || { - let locked_conn = self.connection.lock().unwrap(); + let locked_conn = self.connection.lock().expect("lock"); let sql = format!("DELETE FROM {} WHERE primary_namespace=:primary_namespace AND secondary_namespace=:secondary_namespace AND key=:key;", self.kv_table_name); @@ -500,7 +505,7 @@ impl SqliteStoreInner { ) -> io::Result> { check_namespace_key_validity(primary_namespace, secondary_namespace, None, "list")?; - let locked_conn = self.connection.lock().unwrap(); + let locked_conn = self.connection.lock().expect("lock"); let sql = format!( "SELECT key FROM {} WHERE primary_namespace=:primary_namespace AND secondary_namespace=:secondary_namespace", @@ -546,7 +551,7 @@ impl SqliteStoreInner { "list_paginated", )?; - let locked_conn = self.connection.lock().unwrap(); + let locked_conn = self.connection.lock().expect("lock"); // Fetch one extra row beyond PAGE_SIZE to determine whether a next page exists. let fetch_limit = (PAGE_SIZE + 1) as i64; @@ -644,7 +649,7 @@ impl SqliteStoreInner { &self, inner_lock_ref: Arc>, locking_key: String, version: u64, callback: F, ) -> Result<(), lightning::io::Error> { let res = { - let mut last_written_version = inner_lock_ref.lock().unwrap(); + let mut last_written_version = inner_lock_ref.lock().expect("lock"); // Check if we already have a newer version written/removed. This is used in async contexts to realize eventual // consistency. @@ -670,7 +675,7 @@ impl SqliteStoreInner { // to prevent leaking memory. The two arcs that are expected are the one in the map and the one held here in // inner_lock_ref. The outer lock is obtained first, to avoid a new arc being cloned after we've already // counted. - let mut outer_lock = self.write_version_locks.lock().unwrap(); + let mut outer_lock = self.write_version_locks.lock().expect("lock"); let strong_count = Arc::strong_count(&inner_lock_ref); debug_assert!(strong_count >= 2, "Unexpected SqliteStore strong count"); diff --git a/src/io/tier_store.rs b/src/io/tier_store.rs new file mode 100644 index 000000000..0a85fb6a5 --- /dev/null +++ b/src/io/tier_store.rs @@ -0,0 +1,905 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use crate::io::utils::check_namespace_key_validity; +use crate::logger::{LdkLogger, Logger}; +use crate::types::DynStore; + +use lightning::util::persist::{ + KVStore, KVStoreSync, NETWORK_GRAPH_PERSISTENCE_KEY, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + SCORER_PERSISTENCE_KEY, SCORER_PERSISTENCE_PRIMARY_NAMESPACE, +}; +use lightning::{io, log_error}; + +use std::future::Future; +use std::sync::Arc; + +/// A 3-tiered [`KVStore`]/[`KVStoreSync`] implementation that routes data across +/// storage backends that may be local or remote: +/// - a primary store for durable, authoritative persistence, +/// - an optional backup store that maintains an additional durable copy of +/// primary-backed data, and +/// - an optional ephemeral store for non-critical, rebuildable cached data. +/// +/// When a backup store is configured, writes and removals for primary-backed data +/// are issued to the primary and backup stores concurrently and only succeed once +/// both stores complete successfully. +/// +/// Reads and lists do not consult the backup store during normal operation. +/// Ephemeral data is read from and written to the ephemeral store when configured. +/// +/// Note that dual-store writes and removals are not atomic across the primary and +/// backup stores. If one store succeeds and the other fails, the operation +/// returns an error even though one store may already reflect the change. +pub(crate) struct TierStore { + inner: Arc, +} + +impl TierStore { + pub fn new(primary_store: Arc, logger: Arc) -> Self { + let inner = Arc::new(TierStoreInner::new(primary_store, Arc::clone(&logger))); + + Self { inner } + } + + /// Configures a backup store for primary-backed data. + /// + /// Once set, writes and removals targeting the primary tier succeed only if both + /// the primary and backup stores succeed. The two operations are issued + /// concurrently, and any failure is returned to the caller. + /// + /// Note: dual-store writes/removals are not atomic. An error may be returned + /// after the primary store has already been updated if the backup store fails. + /// + /// The backup store is not consulted for normal reads or lists. + pub fn set_backup_store(&mut self, backup: Arc) { + debug_assert_eq!(Arc::strong_count(&self.inner), 1); + + let inner = Arc::get_mut(&mut self.inner).expect( + "TierStore should not be shared during configuration. No other references should exist", + ); + + inner.backup_store = Some(backup); + } + + /// Configures the ephemeral store for non-critical, rebuildable data. + /// + /// When configured, selected cache-like data is routed to this store instead of + /// the primary store. + pub fn set_ephemeral_store(&mut self, ephemeral: Arc) { + debug_assert_eq!(Arc::strong_count(&self.inner), 1); + + let inner = Arc::get_mut(&mut self.inner).expect( + "TierStore should not be shared during configuration. No other references should exist", + ); + + inner.ephemeral_store = Some(ephemeral); + } +} + +impl KVStore for TierStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> impl Future, io::Error>> + 'static + Send { + let inner = Arc::clone(&self.inner); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + + async move { inner.read_internal(primary_namespace, secondary_namespace, key).await } + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> impl Future> + 'static + Send { + let inner = Arc::clone(&self.inner); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + + async move { inner.write_internal(primary_namespace, secondary_namespace, key, buf).await } + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> impl Future> + 'static + Send { + let inner = Arc::clone(&self.inner); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + let key = key.to_string(); + + async move { inner.remove_internal(primary_namespace, secondary_namespace, key, lazy).await } + } + + fn list( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> impl Future, io::Error>> + 'static + Send { + let inner = Arc::clone(&self.inner); + + let primary_namespace = primary_namespace.to_string(); + let secondary_namespace = secondary_namespace.to_string(); + + async move { inner.list_internal(primary_namespace, secondary_namespace).await } + } +} + +impl KVStoreSync for TierStore { + fn read( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> io::Result> { + self.inner.read_internal_sync( + primary_namespace.to_string(), + secondary_namespace.to_string(), + key.to_string(), + ) + } + + fn write( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> io::Result<()> { + self.inner.write_internal_sync( + primary_namespace.to_string(), + secondary_namespace.to_string(), + key.to_string(), + buf, + ) + } + + fn remove( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> io::Result<()> { + self.inner.remove_internal_sync( + primary_namespace.to_string(), + secondary_namespace.to_string(), + key.to_string(), + lazy, + ) + } + + fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { + self.inner + .list_internal_sync(primary_namespace.to_string(), secondary_namespace.to_string()) + } +} + +struct TierStoreInner { + /// The authoritative store for durable data. + primary_store: Arc, + /// The store used for non-critical, rebuildable cached data. + ephemeral_store: Option>, + /// An optional second durable store for primary-backed data. + backup_store: Option>, + logger: Arc, +} + +impl TierStoreInner { + /// Creates a tier store with the primary data store. + pub fn new(primary_store: Arc, logger: Arc) -> Self { + Self { primary_store, ephemeral_store: None, backup_store: None, logger } + } + + /// Reads from the primary data store. + async fn read_primary( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> io::Result> { + match KVStore::read( + self.primary_store.as_ref(), + primary_namespace, + secondary_namespace, + key, + ) + .await + { + Ok(data) => Ok(data), + Err(e) => { + log_error!( + self.logger, + "Failed to read from primary store for key {}/{}/{}: {}.", + primary_namespace, + secondary_namespace, + key, + e + ); + Err(e) + }, + } + } + + fn read_primary_sync( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> io::Result> { + match KVStoreSync::read( + self.primary_store.as_ref(), + primary_namespace, + secondary_namespace, + key, + ) { + Ok(data) => Ok(data), + Err(e) => { + log_error!( + self.logger, + "Failed to read from primary store for key {}/{}/{}: {}.", + primary_namespace, + secondary_namespace, + key, + e + ); + Err(e) + }, + } + } + + /// Lists keys from the primary data store. + async fn list_primary( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> io::Result> { + match KVStore::list(self.primary_store.as_ref(), primary_namespace, secondary_namespace) + .await + { + Ok(keys) => Ok(keys), + Err(e) => { + log_error!( + self.logger, + "Failed to list from primary store for namespace {}/{}: {}.", + primary_namespace, + secondary_namespace, + e + ); + Err(e) + }, + } + } + + fn list_primary_sync( + &self, primary_namespace: &str, secondary_namespace: &str, + ) -> io::Result> { + match KVStoreSync::list(self.primary_store.as_ref(), primary_namespace, secondary_namespace) + { + Ok(keys) => Ok(keys), + Err(e) => { + log_error!( + self.logger, + "Failed to list keys in namespace {}/{} from primary store: {}.", + primary_namespace, + secondary_namespace, + e + ); + Err(e) + }, + } + } + + async fn write_primary_backup_async( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> io::Result<()> { + let primary_fut = KVStore::write( + self.primary_store.as_ref(), + primary_namespace, + secondary_namespace, + key, + buf.clone(), + ); + + if let Some(backup_store) = self.backup_store.as_ref() { + let backup_fut = KVStore::write( + backup_store.as_ref(), + primary_namespace, + secondary_namespace, + key, + buf, + ); + + let (primary_res, backup_res) = tokio::join!(primary_fut, backup_fut); + + self.handle_primary_backup_results( + "write", + primary_namespace, + secondary_namespace, + key, + primary_res, + backup_res, + ) + } else { + primary_fut.await + } + } + + fn write_primary_backup_sync( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, + ) -> io::Result<()> { + if let Some(backup_store) = self.backup_store.as_ref() { + let primary_res = KVStoreSync::write( + self.primary_store.as_ref(), + primary_namespace, + secondary_namespace, + key, + buf.clone(), + ); + let backup_res = KVStoreSync::write( + backup_store.as_ref(), + primary_namespace, + secondary_namespace, + key, + buf, + ); + + self.handle_primary_backup_results( + "write", + primary_namespace, + secondary_namespace, + key, + primary_res, + backup_res, + ) + } else { + KVStoreSync::write( + self.primary_store.as_ref(), + primary_namespace, + secondary_namespace, + key, + buf, + ) + } + } + + async fn remove_primary_backup_async( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> io::Result<()> { + let primary_fut = KVStore::remove( + self.primary_store.as_ref(), + primary_namespace, + secondary_namespace, + key, + lazy, + ); + + if let Some(backup_store) = self.backup_store.as_ref() { + let backup_fut = KVStore::remove( + backup_store.as_ref(), + primary_namespace, + secondary_namespace, + key, + lazy, + ); + + let (primary_res, backup_res) = tokio::join!(primary_fut, backup_fut); + + self.handle_primary_backup_results( + "removal", + primary_namespace, + secondary_namespace, + key, + primary_res, + backup_res, + ) + } else { + primary_fut.await + } + } + + fn remove_primary_backup_sync( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, + ) -> io::Result<()> { + if let Some(backup_store) = self.backup_store.as_ref() { + let primary_res = KVStoreSync::remove( + self.primary_store.as_ref(), + primary_namespace, + secondary_namespace, + key, + lazy, + ); + let backup_res = KVStoreSync::remove( + backup_store.as_ref(), + primary_namespace, + secondary_namespace, + key, + lazy, + ); + + self.handle_primary_backup_results( + "removal", + primary_namespace, + secondary_namespace, + key, + primary_res, + backup_res, + ) + } else { + KVStoreSync::remove( + self.primary_store.as_ref(), + primary_namespace, + secondary_namespace, + key, + lazy, + ) + } + } + + async fn read_internal( + &self, primary_namespace: String, secondary_namespace: String, key: String, + ) -> io::Result> { + check_namespace_key_validity( + primary_namespace.as_str(), + secondary_namespace.as_str(), + Some(key.as_str()), + "read", + )?; + + if let Some(eph_store) = + self.ephemeral_store(&primary_namespace, &secondary_namespace, &key) + { + // We don't retry ephemeral-store reads here. Local failures are treated as + // terminal for this access path rather than falling back to another store. + KVStore::read(eph_store.as_ref(), &primary_namespace, &secondary_namespace, &key).await + } else { + self.read_primary(&primary_namespace, &secondary_namespace, &key).await + } + } + + fn read_internal_sync( + &self, primary_namespace: String, secondary_namespace: String, key: String, + ) -> io::Result> { + check_namespace_key_validity( + primary_namespace.as_str(), + secondary_namespace.as_str(), + Some(key.as_str()), + "read", + )?; + + if let Some(eph_store) = + self.ephemeral_store(&primary_namespace, &secondary_namespace, &key) + { + KVStoreSync::read(eph_store.as_ref(), &primary_namespace, &secondary_namespace, &key) + } else { + self.read_primary_sync(&primary_namespace, &secondary_namespace, &key) + } + } + + async fn write_internal( + &self, primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, + ) -> io::Result<()> { + check_namespace_key_validity( + primary_namespace.as_str(), + secondary_namespace.as_str(), + Some(key.as_str()), + "write", + )?; + + if let Some(eph_store) = + self.ephemeral_store(&primary_namespace, &secondary_namespace, &key) + { + KVStore::write( + eph_store.as_ref(), + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + buf, + ) + .await + } else { + self.write_primary_backup_async( + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + buf, + ) + .await + } + } + + fn write_internal_sync( + &self, primary_namespace: String, secondary_namespace: String, key: String, buf: Vec, + ) -> io::Result<()> { + check_namespace_key_validity( + primary_namespace.as_str(), + secondary_namespace.as_str(), + Some(key.as_str()), + "write", + )?; + + if let Some(ephemeral_store) = + self.ephemeral_store(&primary_namespace, &secondary_namespace, &key) + { + KVStoreSync::write( + ephemeral_store.as_ref(), + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + buf, + ) + } else { + self.write_primary_backup_sync( + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + buf, + ) + } + } + + async fn remove_internal( + &self, primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, + ) -> io::Result<()> { + check_namespace_key_validity( + primary_namespace.as_str(), + secondary_namespace.as_str(), + Some(key.as_str()), + "remove", + )?; + + if let Some(eph_store) = + self.ephemeral_store(&primary_namespace, &secondary_namespace, &key) + { + KVStore::remove( + eph_store.as_ref(), + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + lazy, + ) + .await + } else { + self.remove_primary_backup_async( + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + lazy, + ) + .await + } + } + + fn remove_internal_sync( + &self, primary_namespace: String, secondary_namespace: String, key: String, lazy: bool, + ) -> io::Result<()> { + check_namespace_key_validity( + primary_namespace.as_str(), + secondary_namespace.as_str(), + Some(key.as_str()), + "remove", + )?; + + if let Some(ephemeral_store) = + self.ephemeral_store(&primary_namespace, &secondary_namespace, &key) + { + KVStoreSync::remove( + ephemeral_store.as_ref(), + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + lazy, + ) + } else { + self.remove_primary_backup_sync( + primary_namespace.as_str(), + secondary_namespace.as_str(), + key.as_str(), + lazy, + ) + } + } + + async fn list_internal( + &self, primary_namespace: String, secondary_namespace: String, + ) -> io::Result> { + check_namespace_key_validity( + primary_namespace.as_str(), + secondary_namespace.as_str(), + None, + "list", + )?; + + match (primary_namespace.as_str(), secondary_namespace.as_str()) { + ( + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + ) + | (SCORER_PERSISTENCE_PRIMARY_NAMESPACE, _) => { + if let Some(eph_store) = self.ephemeral_store.as_ref() { + // We don't retry ephemeral-store lists here. Local failures are treated as + // terminal for this access path rather than falling back to another store. + KVStore::list(eph_store.as_ref(), &primary_namespace, &secondary_namespace) + .await + } else { + self.list_primary(&primary_namespace, &secondary_namespace).await + } + }, + _ => self.list_primary(&primary_namespace, &secondary_namespace).await, + } + } + + fn list_internal_sync( + &self, primary_namespace: String, secondary_namespace: String, + ) -> io::Result> { + check_namespace_key_validity( + primary_namespace.as_str(), + secondary_namespace.as_str(), + None, + "list", + )?; + + match (primary_namespace.as_str(), secondary_namespace.as_str()) { + ( + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + ) + | (SCORER_PERSISTENCE_PRIMARY_NAMESPACE, _) => { + if let Some(ephemeral_store) = self.ephemeral_store.as_ref() { + KVStoreSync::list( + ephemeral_store.as_ref(), + &primary_namespace, + &secondary_namespace, + ) + } else { + self.list_primary_sync(&primary_namespace, &secondary_namespace) + } + }, + _ => self.list_primary_sync(&primary_namespace, &secondary_namespace), + } + } + + fn ephemeral_store( + &self, primary_namespace: &str, secondary_namespace: &str, key: &str, + ) -> Option<&Arc> { + self.ephemeral_store + .as_ref() + .filter(|_s| is_ephemeral_cached_key(primary_namespace, secondary_namespace, key)) + } + + fn handle_primary_backup_results( + &self, op: &str, primary_namespace: &str, secondary_namespace: &str, key: &str, + primary_res: io::Result<()>, backup_res: io::Result<()>, + ) -> io::Result<()> { + match (primary_res, backup_res) { + (Ok(()), Ok(())) => Ok(()), + (Err(primary_err), Ok(())) => Err(primary_err), + (Ok(()), Err(backup_err)) => Err(backup_err), + (Err(primary_err), Err(backup_err)) => { + log_error!( + self.logger, + "Primary and backup {}s both failed for key {}/{}/{}: primary={}, backup={}", + op, + primary_namespace, + secondary_namespace, + key, + primary_err, + backup_err + ); + Err(primary_err) + }, + } + } +} + +fn is_ephemeral_cached_key(pn: &str, sn: &str, key: &str) -> bool { + matches!( + (pn, sn, key), + (NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, _, NETWORK_GRAPH_PERSISTENCE_KEY) + | (SCORER_PERSISTENCE_PRIMARY_NAMESPACE, _, SCORER_PERSISTENCE_KEY) + ) +} + +#[cfg(test)] +mod tests { + use std::panic::RefUnwindSafe; + use std::path::PathBuf; + use std::sync::Arc; + + use lightning::util::logger::Level; + use lightning::util::persist::{ + CHANNEL_MANAGER_PERSISTENCE_KEY, CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + }; + use lightning_persister::fs_store::v1::FilesystemStore; + + use crate::io::test_utils::{do_read_write_remove_list_persist, random_storage_path}; + use crate::io::tier_store::TierStore; + use crate::logger::Logger; + use crate::types::DynStore; + use crate::types::DynStoreWrapper; + + use super::*; + + impl RefUnwindSafe for TierStore {} + + struct CleanupDir(PathBuf); + impl Drop for CleanupDir { + fn drop(&mut self) { + let _ = std::fs::remove_dir_all(&self.0); + } + } + + fn setup_tier_store(primary_store: Arc, logger: Arc) -> TierStore { + TierStore::new(primary_store, logger) + } + + #[test] + fn write_read_list_remove() { + let base_dir = random_storage_path(); + let log_path = base_dir.join("tier_store_test.log").to_string_lossy().into_owned(); + let logger = Arc::new(Logger::new_fs_writer(log_path, Level::Trace).unwrap()); + + let _cleanup = CleanupDir(base_dir.clone()); + + let primary_store: Arc = + Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("primary")))); + let tier = setup_tier_store(primary_store, logger); + + do_read_write_remove_list_persist(&tier); + } + + #[test] + fn ephemeral_routing() { + let base_dir = random_storage_path(); + let log_path = base_dir.join("tier_store_test.log").to_string_lossy().into_owned(); + let logger = Arc::new(Logger::new_fs_writer(log_path, Level::Trace).unwrap()); + + let _cleanup = CleanupDir(base_dir.clone()); + + let primary_store: Arc = + Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("primary")))); + let mut tier = setup_tier_store(Arc::clone(&primary_store), logger); + + let ephemeral_store: Arc = + Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("ephemeral")))); + tier.set_ephemeral_store(Arc::clone(&ephemeral_store)); + + let data = vec![42u8; 32]; + + KVStoreSync::write( + &tier, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_KEY, + data.clone(), + ) + .unwrap(); + + KVStoreSync::write( + &tier, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + data.clone(), + ) + .unwrap(); + + let primary_read_ng = KVStoreSync::read( + &*primary_store, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_KEY, + ); + let ephemeral_read_ng = KVStoreSync::read( + &*ephemeral_store, + NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, + NETWORK_GRAPH_PERSISTENCE_KEY, + ); + + let primary_read_cm = KVStoreSync::read( + &*primary_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + let ephemeral_read_cm = KVStoreSync::read( + &*ephemeral_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + + assert!(primary_read_ng.is_err()); + assert_eq!(ephemeral_read_ng.unwrap(), data); + + assert!(ephemeral_read_cm.is_err()); + assert_eq!(primary_read_cm.unwrap(), data); + } + + #[test] + fn backup_write_is_part_of_success_path() { + let base_dir = random_storage_path(); + let log_path = base_dir.join("tier_store_test.log").to_string_lossy().into_owned(); + let logger = Arc::new(Logger::new_fs_writer(log_path, Level::Trace).unwrap()); + + let _cleanup = CleanupDir(base_dir.clone()); + + let primary_store: Arc = + Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("primary")))); + let mut tier = setup_tier_store(Arc::clone(&primary_store), logger); + + let backup_store: Arc = + Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("backup")))); + tier.set_backup_store(Arc::clone(&backup_store)); + + let data = vec![42u8; 32]; + + KVStoreSync::write( + &tier, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + data.clone(), + ) + .unwrap(); + + let primary_read = KVStoreSync::read( + &*primary_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + let backup_read = KVStoreSync::read( + &*backup_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_KEY, + ); + + assert_eq!(primary_read.unwrap(), data); + assert_eq!(backup_read.unwrap(), data); + } + + #[test] + fn backup_remove_is_part_of_success_path() { + let base_dir = random_storage_path(); + let log_path = base_dir.join("tier_store_test.log").to_string_lossy().into_owned(); + let logger = Arc::new(Logger::new_fs_writer(log_path, Level::Trace).unwrap()); + + let _cleanup = CleanupDir(base_dir.clone()); + + let primary_store: Arc = + Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("primary")))); + let mut tier = setup_tier_store(Arc::clone(&primary_store), logger); + + let backup_store: Arc = + Arc::new(DynStoreWrapper(FilesystemStore::new(base_dir.join("backup")))); + tier.set_backup_store(Arc::clone(&backup_store)); + + let data = vec![42u8; 32]; + let key = CHANNEL_MANAGER_PERSISTENCE_KEY; + + KVStoreSync::write( + &tier, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + key, + data, + ) + .unwrap(); + + KVStoreSync::remove( + &tier, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + key, + true, + ) + .unwrap(); + + let primary_read = KVStoreSync::read( + &*primary_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + key, + ); + let backup_read = KVStoreSync::read( + &*backup_store, + CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE, + CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE, + key, + ); + + assert!(primary_read.is_err()); + assert!(backup_read.is_err()); + } +} diff --git a/src/io/utils.rs b/src/io/utils.rs index eef71ec0b..5b51b8859 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -11,7 +11,7 @@ use std::ops::Deref; #[cfg(unix)] use std::os::unix::fs::OpenOptionsExt; use std::path::Path; -use std::sync::Arc; +use std::sync::{Arc, RwLock}; use bdk_chain::indexer::keychain_txout::ChangeSet as BdkIndexerChangeSet; use bdk_chain::local_chain::ChangeSet as BdkLocalChainChangeSet; @@ -44,11 +44,10 @@ use crate::io::{ NODE_METRICS_KEY, NODE_METRICS_PRIMARY_NAMESPACE, NODE_METRICS_SECONDARY_NAMESPACE, }; use crate::logger::{log_error, LdkLogger, Logger}; -use crate::payment::PendingPaymentDetails; use crate::peer_store::PeerStore; use crate::types::{Broadcaster, DynStore, KeysManager, Sweeper}; use crate::wallet::ser::{ChangeSetDeserWrapper, ChangeSetSerWrapper}; -use crate::{Error, EventQueue, NodeMetrics, PaymentDetails}; +use crate::{Error, EventQueue, NodeMetrics}; pub const EXTERNAL_PATHFINDING_SCORES_CACHE_KEY: &str = "external_pathfinding_scores_cache"; @@ -172,8 +171,8 @@ where log_error!( logger, "Writing data to key {}/{}/{} failed due to: {}", - NODE_METRICS_PRIMARY_NAMESPACE, - NODE_METRICS_SECONDARY_NAMESPACE, + SCORER_PERSISTENCE_PRIMARY_NAMESPACE, + SCORER_PERSISTENCE_SECONDARY_NAMESPACE, EXTERNAL_PATHFINDING_SCORES_CACHE_KEY, e ); @@ -221,21 +220,19 @@ where }) } -/// Read previously persisted payments information from the store. -pub(crate) async fn read_payments( - kv_store: &DynStore, logger: L, -) -> Result, std::io::Error> +/// Read all objects of type `T` from the given namespace, spawning reads in parallel. +pub(crate) async fn read_all_objects( + kv_store: &DynStore, primary_namespace: &str, secondary_namespace: &str, logger: L, +) -> Result, std::io::Error> where + T: Readable, + L: Deref, L::Target: LdkLogger, { + let type_name = std::any::type_name::(); let mut res = Vec::new(); - let mut stored_keys = KVStore::list( - &*kv_store, - PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, - PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, - ) - .await?; + let mut stored_keys = KVStore::list(&*kv_store, primary_namespace, secondary_namespace).await?; const BATCH_SIZE: usize = 50; @@ -244,12 +241,7 @@ where // Fill JoinSet with tasks if possible while set.len() < BATCH_SIZE && !stored_keys.is_empty() { if let Some(next_key) = stored_keys.pop() { - let fut = KVStore::read( - &*kv_store, - PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, - PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, - &next_key, - ); + let fut = KVStore::read(kv_store, primary_namespace, secondary_namespace, &next_key); set.spawn(fut); debug_assert!(set.len() <= BATCH_SIZE); } @@ -259,37 +251,32 @@ where // Exit early if we get an IO error. let reader = read_res .map_err(|e| { - log_error!(logger, "Failed to read PaymentDetails: {}", e); + log_error!(logger, "Failed to read {}: {}", type_name, e); set.abort_all(); e })? .map_err(|e| { - log_error!(logger, "Failed to read PaymentDetails: {}", e); + log_error!(logger, "Failed to read {}: {}", type_name, e); set.abort_all(); e })?; // Refill set for every finished future, if we still have something to do. if let Some(next_key) = stored_keys.pop() { - let fut = KVStore::read( - &*kv_store, - PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, - PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, - &next_key, - ); + let fut = KVStore::read(kv_store, primary_namespace, secondary_namespace, &next_key); set.spawn(fut); debug_assert!(set.len() <= BATCH_SIZE); } // Handle result. - let payment = PaymentDetails::read(&mut &*reader).map_err(|e| { - log_error!(logger, "Failed to deserialize PaymentDetails: {}", e); + let object = T::read(&mut &*reader).map_err(|e| { + log_error!(logger, "Failed to deserialize {}: {}", type_name, e); std::io::Error::new( std::io::ErrorKind::InvalidData, - "Failed to deserialize PaymentDetails", + format!("Failed to deserialize {}", type_name), ) })?; - res.push(payment); + res.push(object); } debug_assert!(set.is_empty()); @@ -346,13 +333,20 @@ where }) } -pub(crate) fn write_node_metrics( - node_metrics: &NodeMetrics, kv_store: &DynStore, logger: L, +/// Take a write lock on `node_metrics`, apply `update`, and persist the result to `kv_store`. +/// +/// The write lock is held across the KV-store write, preserving the invariant that readers only +/// observe the mutation once it has been durably persisted (or the persist has failed). +pub(crate) fn update_and_persist_node_metrics( + node_metrics: &RwLock, kv_store: &DynStore, logger: L, + update: impl FnOnce(&mut NodeMetrics), ) -> Result<(), Error> where L::Target: LdkLogger, { - let data = node_metrics.encode(); + let mut locked_node_metrics = node_metrics.write().expect("lock"); + update(&mut *locked_node_metrics); + let data = locked_node_metrics.encode(); KVStoreSync::write( &*kv_store, NODE_METRICS_PRIMARY_NAMESPACE, @@ -625,83 +619,6 @@ pub(crate) fn read_bdk_wallet_change_set( Ok(Some(change_set)) } -/// Read previously persisted pending payments information from the store. -pub(crate) async fn read_pending_payments( - kv_store: &DynStore, logger: L, -) -> Result, std::io::Error> -where - L::Target: LdkLogger, -{ - let mut res = Vec::new(); - - let mut stored_keys = KVStore::list( - &*kv_store, - PENDING_PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, - PENDING_PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, - ) - .await?; - - const BATCH_SIZE: usize = 50; - - let mut set = tokio::task::JoinSet::new(); - - // Fill JoinSet with tasks if possible - while set.len() < BATCH_SIZE && !stored_keys.is_empty() { - if let Some(next_key) = stored_keys.pop() { - let fut = KVStore::read( - &*kv_store, - PENDING_PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, - PENDING_PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, - &next_key, - ); - set.spawn(fut); - debug_assert!(set.len() <= BATCH_SIZE); - } - } - - while let Some(read_res) = set.join_next().await { - // Exit early if we get an IO error. - let reader = read_res - .map_err(|e| { - log_error!(logger, "Failed to read PendingPaymentDetails: {}", e); - set.abort_all(); - e - })? - .map_err(|e| { - log_error!(logger, "Failed to read PendingPaymentDetails: {}", e); - set.abort_all(); - e - })?; - - // Refill set for every finished future, if we still have something to do. - if let Some(next_key) = stored_keys.pop() { - let fut = KVStore::read( - &*kv_store, - PENDING_PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, - PENDING_PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, - &next_key, - ); - set.spawn(fut); - debug_assert!(set.len() <= BATCH_SIZE); - } - - // Handle result. - let pending_payment = PendingPaymentDetails::read(&mut &*reader).map_err(|e| { - log_error!(logger, "Failed to deserialize PendingPaymentDetails: {}", e); - std::io::Error::new( - std::io::ErrorKind::InvalidData, - "Failed to deserialize PendingPaymentDetails", - ) - })?; - res.push(pending_payment); - } - - debug_assert!(set.is_empty()); - debug_assert!(stored_keys.is_empty()); - - Ok(res) -} - #[cfg(test)] mod tests { use super::read_or_generate_seed_file; diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 2f7a689b2..97883b5d5 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -110,7 +110,9 @@ impl VssStore { .worker_threads(INTERNAL_RUNTIME_WORKERS) .max_blocking_threads(INTERNAL_RUNTIME_WORKERS) .build() - .unwrap(); + .map_err(|e| { + io::Error::new(io::ErrorKind::Other, format!("Failed to build VSS runtime: {}", e)) + })?; let (data_encryption_key, obfuscation_master_key) = derive_data_encryption_and_obfuscation_keys(&vss_seed); @@ -419,7 +421,7 @@ impl VssStoreInner { } fn get_inner_lock_ref(&self, locking_key: String) -> Arc> { - let mut outer_lock = self.locks.lock().unwrap(); + let mut outer_lock = self.locks.lock().expect("lock"); Arc::clone(&outer_lock.entry(locking_key).or_default()) } @@ -526,13 +528,15 @@ impl VssStoreInner { // unwrap safety: resp.value must be always present for a non-erroneous VSS response, otherwise // it is an API-violation which is converted to [`VssError::InternalServerError`] in [`VssClient`] - let storable = Storable::decode(&resp.value.unwrap().value[..]).map_err(|e| { - let msg = format!( - "Failed to decode data read from key {}/{}/{}: {}", - primary_namespace, secondary_namespace, key, e - ); - Error::new(ErrorKind::Other, msg) - })?; + let storable = + Storable::decode(&resp.value.expect("VSS response must contain a value").value[..]) + .map_err(|e| { + let msg = format!( + "Failed to decode data read from key {}/{}/{}: {}", + primary_namespace, secondary_namespace, key, e + ); + Error::new(ErrorKind::Other, msg) + })?; let storable_builder = StorableBuilder::new(VssEntropySource(&self.entropy_source)); let aad = @@ -672,7 +676,7 @@ impl VssStoreInner { // to prevent leaking memory. The two arcs that are expected are the one in the map and the one held here in // inner_lock_ref. The outer lock is obtained first, to avoid a new arc being cloned after we've already // counted. - let mut outer_lock = self.locks.lock().unwrap(); + let mut outer_lock = self.locks.lock().expect("lock"); let strong_count = Arc::strong_count(&inner_lock_ref); debug_assert!(strong_count >= 2, "Unexpected VssStore strong count"); @@ -739,10 +743,12 @@ async fn determine_and_write_schema_version( // unwrap safety: resp.value must be always present for a non-erroneous VSS response, otherwise // it is an API-violation which is converted to [`VssError::InternalServerError`] in [`VssClient`] - let storable = Storable::decode(&resp.value.unwrap().value[..]).map_err(|e| { - let msg = format!("Failed to decode schema version: {}", e); - Error::new(ErrorKind::Other, msg) - })?; + let storable = + Storable::decode(&resp.value.expect("VSS response must contain a value").value[..]) + .map_err(|e| { + let msg = format!("Failed to decode schema version: {}", e); + Error::new(ErrorKind::Other, msg) + })?; let storable_builder = StorableBuilder::new(VssEntropySource(entropy_source)); // Schema version was added starting with V1, so if set at all, we use the key as `aad` diff --git a/src/lib.rs b/src/lib.rs index 2ac4697e8..6d877ae10 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -121,7 +121,7 @@ use bitcoin::secp256k1::PublicKey; pub use bitcoin::FeeRate; #[cfg(not(feature = "uniffi"))] use bitcoin::FeeRate; -use bitcoin::{Address, Amount}; +use bitcoin::{Address, Amount, BlockHash, Network}; #[cfg(feature = "uniffi")] pub use builder::ArcedNodeBuilder as Builder; pub use builder::BuildError; @@ -143,12 +143,13 @@ use fee_estimator::{ConfirmationTarget, FeeEstimator, OnchainFeeEstimator}; use ffi::*; use gossip::GossipSource; use graph::NetworkGraph; -use io::utils::write_node_metrics; +use io::utils::update_and_persist_node_metrics; pub use lightning; -use lightning::chain::BestBlock; +use lightning::chain::BestBlock as BlockLocator; use lightning::impl_writeable_tlv_based; use lightning::ln::chan_utils::FUNDING_TRANSACTION_WITNESS_WEIGHT; -use lightning::ln::channel_state::{ChannelDetails as LdkChannelDetails, ChannelShutdownState}; +use lightning::ln::channel_state::ChannelDetails as LdkChannelDetails; +pub use lightning::ln::channel_state::ChannelShutdownState; use lightning::ln::channelmanager::PaymentId; use lightning::ln::msgs::SocketAddress; use lightning::routing::gossip::NodeAlias; @@ -237,7 +238,7 @@ pub struct Node { node_metrics: Arc>, om_mailbox: Option>, async_payments_role: Option, - hrn_resolver: Arc, + hrn_resolver: HRNResolver, #[cfg(cycle_tests)] _leak_checker: LeakChecker, } @@ -253,7 +254,7 @@ impl Node { /// a thread-safe manner. pub fn start(&self) -> Result<(), Error> { // Acquire a run lock and hold it until we're setup. - let mut is_running_lock = self.is_running.write().unwrap(); + let mut is_running_lock = self.is_running.write().expect("lock"); if *is_running_lock { return Err(Error::AlreadyRunning); } @@ -296,9 +297,7 @@ impl Node { if self.gossip_source.is_rgs() { let gossip_source = Arc::clone(&self.gossip_source); - let gossip_sync_store = Arc::clone(&self.kv_store); let gossip_sync_logger = Arc::clone(&self.logger); - let gossip_node_metrics = Arc::clone(&self.node_metrics); let mut stop_gossip_sync = self.stop_sender.subscribe(); self.runtime.spawn_cancellable_background_task(async move { let mut interval = tokio::time::interval(RGS_SYNC_INTERVAL); @@ -314,20 +313,12 @@ impl Node { _ = interval.tick() => { let now = Instant::now(); match gossip_source.update_rgs_snapshot().await { - Ok(updated_timestamp) => { + Ok(_updated_timestamp) => { log_info!( gossip_sync_logger, "Background sync of RGS gossip data finished in {}ms.", now.elapsed().as_millis() - ); - { - let mut locked_node_metrics = gossip_node_metrics.write().unwrap(); - locked_node_metrics.latest_rgs_snapshot_timestamp = Some(updated_timestamp); - write_node_metrics(&*locked_node_metrics, &*gossip_sync_store, Arc::clone(&gossip_sync_logger)) - .unwrap_or_else(|e| { - log_error!(gossip_sync_logger, "Persistence failed: {}", e); - }); - } + ); } Err(e) => { log_error!( @@ -419,13 +410,27 @@ impl Node { break; } res = listener.accept() => { - let tcp_stream = res.unwrap().0; + let tcp_stream = match res { + Ok((tcp_stream, _)) => tcp_stream, + Err(e) => { + log_error!(logger, "Failed to accept inbound connection: {}", e); + continue; + }, + }; let peer_mgr = Arc::clone(&peer_mgr); + let logger = Arc::clone(&logger); runtime.spawn_cancellable_background_task(async move { + let tcp_stream = match tcp_stream.into_std() { + Ok(tcp_stream) => tcp_stream, + Err(e) => { + log_error!(logger, "Failed to convert inbound connection: {}", e); + return; + }, + }; lightning_net_tokio::setup_inbound( Arc::clone(&peer_mgr), - tcp_stream.into_std().unwrap(), - ) + tcp_stream, + ) .await; }); } @@ -497,7 +502,7 @@ impl Node { return; } _ = interval.tick() => { - let skip_broadcast = match bcast_node_metrics.read().unwrap().latest_node_announcement_broadcast_timestamp { + let skip_broadcast = match bcast_node_metrics.read().expect("lock").latest_node_announcement_broadcast_timestamp { Some(latest_bcast_time_secs) => { // Skip if the time hasn't elapsed yet. let next_bcast_unix_time = SystemTime::UNIX_EPOCH + Duration::from_secs(latest_bcast_time_secs) + NODE_ANN_BCAST_INTERVAL; @@ -537,14 +542,15 @@ impl Node { let unix_time_secs_opt = SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - { - let mut locked_node_metrics = bcast_node_metrics.write().unwrap(); - locked_node_metrics.latest_node_announcement_broadcast_timestamp = unix_time_secs_opt; - write_node_metrics(&*locked_node_metrics, &*bcast_store, Arc::clone(&bcast_logger)) - .unwrap_or_else(|e| { - log_error!(bcast_logger, "Persistence failed: {}", e); - }); - } + update_and_persist_node_metrics( + &bcast_node_metrics, + &*bcast_store, + Arc::clone(&bcast_logger), + |m| m.latest_node_announcement_broadcast_timestamp = unix_time_secs_opt, + ) + .unwrap_or_else(|e| { + log_error!(bcast_logger, "Persistence failed: {}", e); + }); } else { debug_assert!(false, "We checked whether the node may announce, so node alias should always be set"); continue @@ -645,7 +651,13 @@ impl Node { Some(background_scorer), sleeper, true, - || Some(SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap()), + || { + Some( + SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("current time should not be earlier than the Unix epoch"), + ) + }, ) .await .unwrap_or_else(|e| { @@ -683,7 +695,7 @@ impl Node { /// /// After this returns most API methods will return [`Error::NotRunning`]. pub fn stop(&self) -> Result<(), Error> { - let mut is_running_lock = self.is_running.write().unwrap(); + let mut is_running_lock = self.is_running.write().expect("lock"); if !*is_running_lock { return Err(Error::NotRunning); } @@ -747,9 +759,10 @@ impl Node { /// Returns the status of the [`Node`]. pub fn status(&self) -> NodeStatus { - let is_running = *self.is_running.read().unwrap(); + let is_running = *self.is_running.read().expect("lock"); + let network = self.config.network; let current_best_block = self.channel_manager.current_best_block().into(); - let locked_node_metrics = self.node_metrics.read().unwrap(); + let locked_node_metrics = self.node_metrics.read().expect("lock"); let latest_lightning_wallet_sync_timestamp = locked_node_metrics.latest_lightning_wallet_sync_timestamp; let latest_onchain_wallet_sync_timestamp = @@ -757,7 +770,7 @@ impl Node { let latest_fee_rate_cache_update_timestamp = locked_node_metrics.latest_fee_rate_cache_update_timestamp; let latest_rgs_snapshot_timestamp = - locked_node_metrics.latest_rgs_snapshot_timestamp.map(|val| val as u64); + self.network_graph.get_last_rapid_gossip_sync_timestamp().map(|val| val as u64); let latest_pathfinding_scores_sync_timestamp = locked_node_metrics.latest_pathfinding_scores_sync_timestamp; let latest_node_announcement_broadcast_timestamp = @@ -765,6 +778,7 @@ impl Node { NodeStatus { is_running, + network, current_best_block, latest_lightning_wallet_sync_timestamp, latest_onchain_wallet_sync_timestamp, @@ -994,7 +1008,7 @@ impl Node { self.bolt12_payment().into(), Arc::clone(&self.config), Arc::clone(&self.logger), - Arc::clone(&self.hrn_resolver), + self.hrn_resolver.clone(), ) } @@ -1015,7 +1029,7 @@ impl Node { self.bolt12_payment(), Arc::clone(&self.config), Arc::clone(&self.logger), - Arc::clone(&self.hrn_resolver), + self.hrn_resolver.clone(), )) } @@ -1078,7 +1092,7 @@ impl Node { pub fn connect( &self, node_id: PublicKey, address: SocketAddress, persist: bool, ) -> Result<(), Error> { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } @@ -1108,7 +1122,7 @@ impl Node { /// Will also remove the peer from the peer store, i.e., after this has been called we won't /// try to reconnect on restart. pub fn disconnect(&self, counterparty_node_id: PublicKey) -> Result<(), Error> { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } @@ -1128,9 +1142,9 @@ impl Node { fn open_channel_inner( &self, node_id: PublicKey, address: SocketAddress, channel_amount_sats: FundingAmount, push_to_counterparty_msat: Option, channel_config: Option, - announce_for_forwarding: bool, + announce_for_forwarding: bool, disable_counterparty_reserve: bool, ) -> Result { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } @@ -1182,39 +1196,65 @@ impl Node { let mut user_config = default_user_config(&self.config); user_config.channel_handshake_config.announce_for_forwarding = announce_for_forwarding; user_config.channel_config = (channel_config.unwrap_or_default()).clone().into(); - // We set the max inflight to 100% for private channels. - // FIXME: LDK will default to this behavior soon, too, at which point we should drop this - // manual override. + + // Unannounced channels rely on LDK's default of 100% inbound HTLC value-in-flight + // to support large initial payments via LSPS2. if !announce_for_forwarding { - user_config - .channel_handshake_config - .max_inbound_htlc_value_in_flight_percent_of_channel = 100; + debug_assert_eq!( + user_config + .channel_handshake_config + .unannounced_channel_max_inbound_htlc_value_in_flight_percentage, + 100 + ); } let push_msat = push_to_counterparty_msat.unwrap_or(0); let user_channel_id: u128 = u128::from_ne_bytes( - self.keys_manager.get_secure_random_bytes()[..16].try_into().unwrap(), + self.keys_manager.get_secure_random_bytes()[..16] + .try_into() + .expect("a 16-byte slice should convert into a [u8; 16]"), ); - match self.channel_manager.create_channel( - peer_info.node_id, - channel_amount_sats, - push_msat, - user_channel_id, - None, - Some(user_config), - ) { + let result = if disable_counterparty_reserve { + self.channel_manager.create_channel_to_trusted_peer_0reserve( + peer_info.node_id, + channel_amount_sats, + push_msat, + user_channel_id, + None, + Some(user_config), + ) + } else { + self.channel_manager.create_channel( + peer_info.node_id, + channel_amount_sats, + push_msat, + user_channel_id, + None, + Some(user_config), + ) + }; + + let zero_reserve_string = if disable_counterparty_reserve { "0reserve " } else { "" }; + + match result { Ok(_) => { log_info!( self.logger, - "Initiated channel creation with peer {}. ", + "Initiated {}channel creation with peer {}. ", + zero_reserve_string, peer_info.node_id ); self.peer_store.add_peer(peer_info)?; Ok(UserChannelId(user_channel_id)) }, Err(e) => { - log_error!(self.logger, "Failed to initiate channel creation: {:?}", e); + log_error!( + self.logger, + "Failed to initiate {}channel creation: {:?}", + zero_reserve_string, + e + ); Err(Error::ChannelCreationFailed) }, } @@ -1290,6 +1330,7 @@ impl Node { push_to_counterparty_msat, channel_config, false, + false, ) } @@ -1330,6 +1371,7 @@ impl Node { push_to_counterparty_msat, channel_config, true, + false, ) } @@ -1358,6 +1400,7 @@ impl Node { push_to_counterparty_msat, channel_config, false, + false, ) } @@ -1395,6 +1438,70 @@ impl Node { push_to_counterparty_msat, channel_config, true, + false, + ) + } + + /// Connect to a node and open a new unannounced channel, in which the target node can + /// spend its entire balance. + /// + /// This channel allows the target node to try to steal your channel balance with no + /// financial penalty, so this channel should only be opened to nodes you trust. + /// + /// Disconnects and reconnects are handled automatically. + /// + /// If `push_to_counterparty_msat` is set, the given value will be pushed (read: sent) to the + /// channel counterparty on channel open. This can be useful to start out with the balance not + /// entirely shifted to one side, therefore allowing to receive payments from the getgo. + /// + /// If Anchor channels are enabled, this will ensure the configured + /// [`AnchorChannelsConfig::per_channel_reserve_sats`] is available and will be retained before + /// opening the channel. + /// + /// Returns a [`UserChannelId`] allowing to locally keep track of the channel. + /// + /// [`AnchorChannelsConfig::per_channel_reserve_sats`]: crate::config::AnchorChannelsConfig::per_channel_reserve_sats + pub fn open_0reserve_channel( + &self, node_id: PublicKey, address: SocketAddress, channel_amount_sats: u64, + push_to_counterparty_msat: Option, channel_config: Option, + ) -> Result { + self.open_channel_inner( + node_id, + address, + FundingAmount::Exact { amount_sats: channel_amount_sats }, + push_to_counterparty_msat, + channel_config, + false, + true, + ) + } + + /// Connect to a node and open a new unannounced channel, using all available on-chain funds + /// minus fees and anchor reserves. The target node will be able to spend its entire channel + /// balance. + /// + /// This channel allows the target node to try to steal your channel balance with no + /// financial penalty, so this channel should only be opened to nodes you trust. + /// + /// Disconnects and reconnects are handled automatically. + /// + /// If `push_to_counterparty_msat` is set, the given value will be pushed (read: sent) to the + /// channel counterparty on channel open. This can be useful to start out with the balance not + /// entirely shifted to one side, therefore allowing to receive payments from the getgo. + /// + /// Returns a [`UserChannelId`] allowing to locally keep track of the channel. + pub fn open_0reserve_channel_with_all( + &self, node_id: PublicKey, address: SocketAddress, push_to_counterparty_msat: Option, + channel_config: Option, + ) -> Result { + self.open_channel_inner( + node_id, + address, + FundingAmount::Max, + push_to_counterparty_msat, + channel_config, + false, + true, ) } @@ -1469,12 +1576,7 @@ impl Node { let funding_template = self .channel_manager - .splice_channel( - &channel_details.channel_id, - &counterparty_node_id, - min_feerate, - max_feerate, - ) + .splice_channel(&channel_details.channel_id, &counterparty_node_id) .map_err(|e| { log_error!(self.logger, "Failed to splice channel: {:?}", e); Error::ChannelSplicingFailed @@ -1482,12 +1584,14 @@ impl Node { let contribution = self .runtime - .block_on( - funding_template - .splice_in(Amount::from_sat(splice_amount_sats), Arc::clone(&self.wallet)), - ) - .map_err(|()| { - log_error!(self.logger, "Failed to splice channel: coin selection failed"); + .block_on(funding_template.splice_in( + Amount::from_sat(splice_amount_sats), + min_feerate, + max_feerate, + Arc::clone(&self.wallet), + )) + .map_err(|e| { + log_error!(self.logger, "Failed to splice channel: {}", e); Error::ChannelSplicingFailed })?; @@ -1585,12 +1689,7 @@ impl Node { let funding_template = self .channel_manager - .splice_channel( - &channel_details.channel_id, - &counterparty_node_id, - min_feerate, - max_feerate, - ) + .splice_channel(&channel_details.channel_id, &counterparty_node_id) .map_err(|e| { log_error!(self.logger, "Failed to splice channel: {:?}", e); Error::ChannelSplicingFailed @@ -1602,9 +1701,14 @@ impl Node { }]; let contribution = self .runtime - .block_on(funding_template.splice_out(outputs, Arc::clone(&self.wallet))) - .map_err(|()| { - log_error!(self.logger, "Failed to splice channel: coin selection failed"); + .block_on(funding_template.splice_out( + outputs, + min_feerate, + max_feerate, + Arc::clone(&self.wallet), + )) + .map_err(|e| { + log_error!(self.logger, "Failed to splice channel: {}", e); Error::ChannelSplicingFailed })?; @@ -1641,7 +1745,7 @@ impl Node { /// /// [`EsploraSyncConfig::background_sync_config`]: crate::config::EsploraSyncConfig::background_sync_config pub fn sync_wallets(&self) -> Result<(), Error> { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } @@ -1957,12 +2061,30 @@ impl Drop for Node { } } +/// The best known block as identified by its hash and height. +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] +#[cfg_attr(feature = "uniffi", derive(uniffi::Record))] +pub struct BestBlock { + /// The block's hash. + pub block_hash: BlockHash, + /// The height at which the block was confirmed. + pub height: u32, +} + +impl From for BestBlock { + fn from(locator: BlockLocator) -> Self { + Self { block_hash: locator.block_hash, height: locator.height } + } +} + /// Represents the status of the [`Node`]. #[derive(Clone, Debug, PartialEq, Eq)] #[cfg_attr(feature = "uniffi", derive(uniffi::Record))] pub struct NodeStatus { /// Indicates whether the [`Node`] is running. pub is_running: bool, + /// Network (e.g. mainnet, testnet4, signet) on which the [`Node`] is running. + pub network: Network, /// The best block to which our Lightning wallet is currently synced. pub current_best_block: BestBlock, /// The timestamp, in seconds since start of the UNIX epoch, when we last successfully synced @@ -2000,7 +2122,6 @@ pub(crate) struct NodeMetrics { latest_lightning_wallet_sync_timestamp: Option, latest_onchain_wallet_sync_timestamp: Option, latest_fee_rate_cache_update_timestamp: Option, - latest_rgs_snapshot_timestamp: Option, latest_pathfinding_scores_sync_timestamp: Option, latest_node_announcement_broadcast_timestamp: Option, } @@ -2011,7 +2132,6 @@ impl Default for NodeMetrics { latest_lightning_wallet_sync_timestamp: None, latest_onchain_wallet_sync_timestamp: None, latest_fee_rate_cache_update_timestamp: None, - latest_rgs_snapshot_timestamp: None, latest_pathfinding_scores_sync_timestamp: None, latest_node_announcement_broadcast_timestamp: None, } @@ -2023,7 +2143,8 @@ impl_writeable_tlv_based!(NodeMetrics, { (1, latest_pathfinding_scores_sync_timestamp, option), (2, latest_onchain_wallet_sync_timestamp, option), (4, latest_fee_rate_cache_update_timestamp, option), - (6, latest_rgs_snapshot_timestamp, option), + // 6 used to be latest_rgs_snapshot_timestamp + (6, _legacy_latest_rgs_snapshot_timestamp, (legacy, u32, |_| Ok(()), |_: &NodeMetrics| None::> )), (8, latest_node_announcement_broadcast_timestamp, option), // 10 used to be latest_channel_monitor_archival_height (10, _legacy_latest_channel_monitor_archival_height, (legacy, u32, |_| Ok(()), |_: &NodeMetrics| None::> )), @@ -2064,3 +2185,55 @@ pub(crate) fn new_channel_anchor_reserve_sats( } }) } + +#[cfg(test)] +mod tests { + use lightning::util::ser::{Readable, Writeable}; + + use super::*; + + #[test] + fn node_metrics_reads_legacy_rgs_snapshot_timestamp() { + // Pre-#615, `NodeMetrics` persisted `latest_rgs_snapshot_timestamp` as an optional + // `u32` at TLV slot 6. The field has since been retired, but we must still read + // records written by older versions without failing. The shadow struct below + // mirrors main's `NodeMetrics` layout 1:1 so the byte stream we decode matches + // what an older on-disk record actually looked like. + #[derive(Debug)] + struct OldNodeMetrics { + latest_lightning_wallet_sync_timestamp: Option, + latest_onchain_wallet_sync_timestamp: Option, + latest_fee_rate_cache_update_timestamp: Option, + latest_rgs_snapshot_timestamp: Option, + latest_pathfinding_scores_sync_timestamp: Option, + latest_node_announcement_broadcast_timestamp: Option, + } + impl_writeable_tlv_based!(OldNodeMetrics, { + (0, latest_lightning_wallet_sync_timestamp, option), + (1, latest_pathfinding_scores_sync_timestamp, option), + (2, latest_onchain_wallet_sync_timestamp, option), + (4, latest_fee_rate_cache_update_timestamp, option), + (6, latest_rgs_snapshot_timestamp, option), + (8, latest_node_announcement_broadcast_timestamp, option), + // 10 used to be latest_channel_monitor_archival_height + (10, _legacy_latest_channel_monitor_archival_height, (legacy, u32, |_| Ok(()), |_: &OldNodeMetrics| None::> )), + }); + + let old = OldNodeMetrics { + latest_lightning_wallet_sync_timestamp: Some(1_000), + latest_onchain_wallet_sync_timestamp: Some(1_100), + latest_fee_rate_cache_update_timestamp: Some(1_200), + latest_rgs_snapshot_timestamp: Some(1_700_000_000), + latest_pathfinding_scores_sync_timestamp: Some(1_300), + latest_node_announcement_broadcast_timestamp: Some(2_000), + }; + let bytes = old.encode(); + + let new = NodeMetrics::read(&mut &bytes[..]).unwrap(); + assert_eq!(new.latest_lightning_wallet_sync_timestamp, Some(1_000)); + assert_eq!(new.latest_onchain_wallet_sync_timestamp, Some(1_100)); + assert_eq!(new.latest_fee_rate_cache_update_timestamp, Some(1_200)); + assert_eq!(new.latest_pathfinding_scores_sync_timestamp, Some(1_300)); + assert_eq!(new.latest_node_announcement_broadcast_timestamp, Some(2_000)); + } +} diff --git a/src/liquidity.rs b/src/liquidity.rs index 485da941c..30ab2c0df 100644 --- a/src/liquidity.rs +++ b/src/liquidity.rs @@ -142,6 +142,14 @@ pub struct LSPS2ServiceConfig { /// /// [`bLIP-52`]: https://github.com/lightning/blips/blob/master/blip-0052.md#trust-models pub client_trusts_lsp: bool, + /// When set, we will allow clients to spend their entire channel balance in the channels + /// we open to them. This allows clients to try to steal your channel balance with + /// no financial penalty, so this should only be set if you trust your clients. + /// + /// See [`Node::open_0reserve_channel`] to manually open these channels. + /// + /// [`Node::open_0reserve_channel`]: crate::Node::open_0reserve_channel + pub disable_client_reserve: bool, } pub(crate) struct LiquiditySourceBuilder @@ -302,7 +310,7 @@ where L::Target: LdkLogger, { pub(crate) fn set_peer_manager(&self, peer_manager: Weak) { - *self.peer_manager.write().unwrap() = Some(peer_manager); + *self.peer_manager.write().expect("lock") = Some(peer_manager); } pub(crate) fn liquidity_manager(&self) -> Arc { @@ -407,7 +415,7 @@ where if let Some(sender) = lsps1_client .pending_opening_params_requests .lock() - .unwrap() + .expect("lock") .remove(&request_id) { let response = LSPS1OpeningParamsResponse { supported_options }; @@ -463,7 +471,7 @@ where if let Some(sender) = lsps1_client .pending_create_order_requests .lock() - .unwrap() + .expect("lock") .remove(&request_id) { let response = LSPS1OrderStatus { @@ -521,7 +529,7 @@ where if let Some(sender) = lsps1_client .pending_check_order_status_requests .lock() - .unwrap() + .expect("lock") .remove(&request_id) { let response = LSPS1OrderStatus { @@ -642,7 +650,9 @@ where }; let user_channel_id: u128 = u128::from_ne_bytes( - self.keys_manager.get_secure_random_bytes()[..16].try_into().unwrap(), + self.keys_manager.get_secure_random_bytes()[..16] + .try_into() + .expect("a 16-byte slice should convert into a [u8; 16]"), ); let intercept_scid = self.channel_manager.get_intercept_scid(); @@ -717,7 +727,7 @@ where }; let init_features = if let Some(Some(peer_manager)) = - self.peer_manager.read().unwrap().as_ref().map(|weak| weak.upgrade()) + self.peer_manager.read().expect("lock").as_ref().map(|weak| weak.upgrade()) { // Fail if we're not connected to the prospective channel partner. if let Some(peer) = peer_manager.peer_by_node_id(&their_network_key) { @@ -771,13 +781,16 @@ where let mut config = self.channel_manager.get_current_config().clone(); - // We set these LSP-specific values during Node building, here we're making sure it's actually set. + // If we act as an LSPS2 service, the HTLC-value-in-flight must be 100% of the + // channel value to ensure we can forward the initial payment. That cap only + // applies to unannounced channels, so the channel must also be unannounced. debug_assert_eq!( config .channel_handshake_config - .max_inbound_htlc_value_in_flight_percent_of_channel, + .unannounced_channel_max_inbound_htlc_value_in_flight_percentage, 100 ); + debug_assert!(!config.channel_handshake_config.announce_for_forwarding); debug_assert!(config.accept_forwards_to_priv_channels); // We set the forwarding fee to 0 for now as we're getting paid by the channel fee. @@ -786,22 +799,38 @@ where config.channel_config.forwarding_fee_base_msat = 0; config.channel_config.forwarding_fee_proportional_millionths = 0; - match self.channel_manager.create_channel( - their_network_key, - channel_amount_sats, - 0, - user_channel_id, - None, - Some(config), - ) { + let result = if service_config.disable_client_reserve { + self.channel_manager.create_channel_to_trusted_peer_0reserve( + their_network_key, + channel_amount_sats, + 0, + user_channel_id, + None, + Some(config), + ) + } else { + self.channel_manager.create_channel( + their_network_key, + channel_amount_sats, + 0, + user_channel_id, + None, + Some(config), + ) + }; + + match result { Ok(_) => {}, Err(e) => { // TODO: We just silently fail here. Eventually we will need to remember // the pending requests and regularly retry opening the channel until we // succeed. + let zero_reserve_string = + if service_config.disable_client_reserve { "0reserve " } else { "" }; log_error!( self.logger, - "Failed to open LSPS2 channel to {}: {:?}", + "Failed to open LSPS2 {}channel to {}: {:?}", + zero_reserve_string, their_network_key, e ); @@ -828,7 +857,7 @@ where } if let Some(sender) = - lsps2_client.pending_fee_requests.lock().unwrap().remove(&request_id) + lsps2_client.pending_fee_requests.lock().expect("lock").remove(&request_id) { let response = LSPS2FeeResponse { opening_fee_params_menu }; @@ -880,7 +909,7 @@ where } if let Some(sender) = - lsps2_client.pending_buy_requests.lock().unwrap().remove(&request_id) + lsps2_client.pending_buy_requests.lock().expect("lock").remove(&request_id) { let response = LSPS2BuyResponse { intercept_scid, cltv_expiry_delta }; @@ -930,7 +959,7 @@ where let (request_sender, request_receiver) = oneshot::channel(); { let mut pending_opening_params_requests_lock = - lsps1_client.pending_opening_params_requests.lock().unwrap(); + lsps1_client.pending_opening_params_requests.lock().expect("lock"); let request_id = client_handler.request_supported_options(lsps1_client.lsp_node_id); pending_opening_params_requests_lock.insert(request_id, request_sender); } @@ -1013,7 +1042,7 @@ where let request_id; { let mut pending_create_order_requests_lock = - lsps1_client.pending_create_order_requests.lock().unwrap(); + lsps1_client.pending_create_order_requests.lock().expect("lock"); request_id = client_handler.create_order( &lsps1_client.lsp_node_id, order_params.clone(), @@ -1059,7 +1088,7 @@ where let (request_sender, request_receiver) = oneshot::channel(); { let mut pending_check_order_status_requests_lock = - lsps1_client.pending_check_order_status_requests.lock().unwrap(); + lsps1_client.pending_check_order_status_requests.lock().expect("lock"); let request_id = client_handler.check_order_status(&lsps1_client.lsp_node_id, order_id); pending_check_order_status_requests_lock.insert(request_id, request_sender); } @@ -1200,7 +1229,8 @@ where let (fee_request_sender, fee_request_receiver) = oneshot::channel(); { - let mut pending_fee_requests_lock = lsps2_client.pending_fee_requests.lock().unwrap(); + let mut pending_fee_requests_lock = + lsps2_client.pending_fee_requests.lock().expect("lock"); let request_id = client_handler .request_opening_params(lsps2_client.lsp_node_id, lsps2_client.token.clone()); pending_fee_requests_lock.insert(request_id, fee_request_sender); @@ -1233,7 +1263,8 @@ where let (buy_request_sender, buy_request_receiver) = oneshot::channel(); { - let mut pending_buy_requests_lock = lsps2_client.pending_buy_requests.lock().unwrap(); + let mut pending_buy_requests_lock = + lsps2_client.pending_buy_requests.lock().expect("lock"); let request_id = client_handler .select_opening_params(lsps2_client.lsp_node_id, amount_msat, opening_fee_params) .map_err(|e| { diff --git a/src/lnurl_auth.rs b/src/lnurl_auth.rs index 1f95b77b1..1ce44a7c3 100644 --- a/src/lnurl_auth.rs +++ b/src/lnurl_auth.rs @@ -189,7 +189,9 @@ fn linking_key_path(hashing_key: &[u8; 32], domain_name: &str) -> Vec= Self::MAX_MESSAGES_PER_PEER { @@ -27,8 +27,11 @@ impl OnionMessageMailbox { // Enforce a peers limit. If exceeded, evict the peer with the longest queue. if map.len() > Self::MAX_PEERS { - let peer_to_remove = - map.iter().max_by_key(|(_, queue)| queue.len()).map(|(peer, _)| *peer).unwrap(); + let peer_to_remove = map + .iter() + .max_by_key(|(_, queue)| queue.len()) + .map(|(peer, _)| *peer) + .expect("map is non-empty"); map.remove(&peer_to_remove); } @@ -37,7 +40,7 @@ impl OnionMessageMailbox { pub(crate) fn onion_message_peer_connected( &self, peer_node_id: PublicKey, ) -> Vec { - let mut map = self.map.lock().unwrap(); + let mut map = self.map.lock().expect("lock"); if let Some(queue) = map.remove(&peer_node_id) { queue.into() @@ -48,7 +51,7 @@ impl OnionMessageMailbox { #[cfg(test)] pub(crate) fn is_empty(&self) -> bool { - let map = self.map.lock().unwrap(); + let map = self.map.lock().expect("lock"); map.is_empty() } } diff --git a/src/payment/asynchronous/static_invoice_store.rs b/src/payment/asynchronous/static_invoice_store.rs index cd0e2ebd2..6fb406334 100644 --- a/src/payment/asynchronous/static_invoice_store.rs +++ b/src/payment/asynchronous/static_invoice_store.rs @@ -63,7 +63,7 @@ impl StaticInvoiceStore { fn check_rate_limit( limiter: &Mutex, recipient_id: &[u8], ) -> Result<(), lightning::io::Error> { - let mut limiter = limiter.lock().unwrap(); + let mut limiter = limiter.lock().expect("lock"); if !limiter.allow(recipient_id) { Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, "Rate limit exceeded")) } else { diff --git a/src/payment/bolt11.rs b/src/payment/bolt11.rs index f2857e814..18c489e27 100644 --- a/src/payment/bolt11.rs +++ b/src/payment/bolt11.rs @@ -241,7 +241,7 @@ impl Bolt11Payment { pub fn send( &self, invoice: &Bolt11Invoice, route_parameters: Option, ) -> Result { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } @@ -275,7 +275,8 @@ impl Bolt11Payment { ) { Ok(()) => { let payee_pubkey = invoice.recover_payee_pub_key(); - let amt_msat = invoice.amount_milli_satoshis().unwrap(); + let amt_msat = + invoice.amount_milli_satoshis().expect("invoice amount should be set"); log_info!(self.logger, "Initiated sending {}msat to {}", amt_msat, payee_pubkey); let kind = PaymentKind::Bolt11 { @@ -342,7 +343,7 @@ impl Bolt11Payment { &self, invoice: &Bolt11Invoice, amount_msat: u64, route_parameters: Option, ) -> Result { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } @@ -776,7 +777,7 @@ impl Bolt11Payment { pub fn send_probes( &self, invoice: &Bolt11Invoice, route_parameters: Option, ) -> Result<(), Error> { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } @@ -831,7 +832,7 @@ impl Bolt11Payment { &self, invoice: &Bolt11Invoice, amount_msat: u64, route_parameters: Option, ) -> Result<(), Error> { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } diff --git a/src/payment/bolt12.rs b/src/payment/bolt12.rs index 980e20696..2e5a5fb45 100644 --- a/src/payment/bolt12.rs +++ b/src/payment/bolt12.rs @@ -89,7 +89,7 @@ impl Bolt12Payment { &self, offer: &Offer, amount_msat: u64, quantity: Option, payer_note: Option, route_parameters: Option, hrn: Option, ) -> Result { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } @@ -207,7 +207,7 @@ impl Bolt12Payment { if let Some(expiry_secs) = expiry_secs { let absolute_expiry = (SystemTime::now() + Duration::from_secs(expiry_secs as u64)) .duration_since(UNIX_EPOCH) - .unwrap(); + .expect("system time must be after Unix epoch"); offer_builder = offer_builder.absolute_expiry(absolute_expiry); } @@ -219,7 +219,9 @@ impl Bolt12Payment { log_error!(self.logger, "Failed to create offer: quantity can't be zero."); return Err(Error::InvalidQuantity); } else { - offer = offer.supported_quantity(Quantity::Bounded(NonZeroU64::new(qty).unwrap())) + offer = offer.supported_quantity(Quantity::Bounded( + NonZeroU64::new(qty).expect("quantity is non-zero"), + )) }; }; @@ -262,7 +264,7 @@ impl Bolt12Payment { &self, offer: &Offer, quantity: Option, payer_note: Option, route_parameters: Option, ) -> Result { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } @@ -405,7 +407,7 @@ impl Bolt12Payment { if let Some(expiry_secs) = expiry_secs { let absolute_expiry = (SystemTime::now() + Duration::from_secs(expiry_secs as u64)) .duration_since(UNIX_EPOCH) - .unwrap(); + .expect("system time must be after Unix epoch"); offer_builder = offer_builder.absolute_expiry(absolute_expiry); } @@ -425,7 +427,7 @@ impl Bolt12Payment { /// [`Refund`]: lightning::offers::refund::Refund /// [`Bolt12Invoice`]: lightning::offers::invoice::Bolt12Invoice pub fn request_refund_payment(&self, refund: &Refund) -> Result { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } @@ -474,7 +476,7 @@ impl Bolt12Payment { let absolute_expiry = (SystemTime::now() + Duration::from_secs(expiry_secs as u64)) .duration_since(UNIX_EPOCH) - .unwrap(); + .expect("system time must be after Unix epoch"); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); let route_parameters = route_parameters.or(self.config.route_parameters).unwrap_or_default(); diff --git a/src/payment/onchain.rs b/src/payment/onchain.rs index cc16690e2..9d00968fc 100644 --- a/src/payment/onchain.rs +++ b/src/payment/onchain.rs @@ -80,7 +80,7 @@ impl OnchainPayment { pub fn send_to_address( &self, address: &bitcoin::Address, amount_sats: u64, fee_rate: Option, ) -> Result { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } @@ -110,7 +110,7 @@ impl OnchainPayment { pub fn send_all_to_address( &self, address: &bitcoin::Address, retain_reserves: bool, fee_rate: Option, ) -> Result { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } diff --git a/src/payment/spontaneous.rs b/src/payment/spontaneous.rs index 74fa84c0e..1c819582e 100644 --- a/src/payment/spontaneous.rs +++ b/src/payment/spontaneous.rs @@ -56,7 +56,7 @@ impl SpontaneousPayment { route_parameters: Option, custom_tlvs: Option>, preimage: Option, ) -> Result { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } @@ -206,7 +206,7 @@ impl SpontaneousPayment { /// /// [`Bolt11Payment::send_probes`]: crate::payment::Bolt11Payment pub fn send_probes(&self, amount_msat: u64, node_id: PublicKey) -> Result<(), Error> { - if !*self.is_running.read().unwrap() { + if !*self.is_running.read().expect("lock") { return Err(Error::NotRunning); } diff --git a/src/payment/unified.rs b/src/payment/unified.rs index 8681dbf6e..3708afe8e 100644 --- a/src/payment/unified.rs +++ b/src/payment/unified.rs @@ -25,8 +25,7 @@ use bitcoin::{Amount, Txid}; use bitcoin_payment_instructions::amount::Amount as BPIAmount; use bitcoin_payment_instructions::{PaymentInstructions, PaymentMethod}; use lightning::ln::channelmanager::PaymentId; -use lightning::offers::offer::Offer; -use lightning::onion_message::dns_resolution::HumanReadableName; +use lightning::offers::offer::Offer as LdkOffer; use lightning::routing::router::RouteParametersConfig; use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, Description}; @@ -40,6 +39,16 @@ use crate::Config; type Uri<'a> = bip21::Uri<'a, NetworkChecked, Extras>; +#[cfg(not(feature = "uniffi"))] +type HumanReadableName = lightning::onion_message::dns_resolution::HumanReadableName; +#[cfg(feature = "uniffi")] +type HumanReadableName = crate::ffi::HumanReadableName; + +#[cfg(not(feature = "uniffi"))] +type Offer = LdkOffer; +#[cfg(feature = "uniffi")] +type Offer = Arc; + #[derive(Debug, Clone)] struct Extras { bolt11_invoice: Option, @@ -65,16 +74,27 @@ pub struct UnifiedPayment { bolt12_payment: Arc, config: Arc, logger: Arc, - hrn_resolver: Arc, + hrn_resolver: HRNResolver, + #[cfg(hrn_tests)] + test_offer: std::sync::Mutex>, } impl UnifiedPayment { pub(crate) fn new( onchain_payment: Arc, bolt11_invoice: Arc, bolt12_payment: Arc, config: Arc, logger: Arc, - hrn_resolver: Arc, + hrn_resolver: HRNResolver, ) -> Self { - Self { onchain_payment, bolt11_invoice, bolt12_payment, config, logger, hrn_resolver } + Self { + onchain_payment, + bolt11_invoice, + bolt12_payment, + config, + logger, + hrn_resolver, + #[cfg(hrn_tests)] + test_offer: std::sync::Mutex::new(None), + } } } @@ -115,7 +135,7 @@ impl UnifiedPayment { let bolt12_offer = match self.bolt12_payment.receive_inner(amount_msats, description, None, None) { - Ok(offer) => Some(offer), + Ok(offer) => Some(maybe_wrap(offer)), Err(e) => { log_error!(self.logger, "Failed to create offer: {}", e); None @@ -165,12 +185,19 @@ impl UnifiedPayment { &self, uri_str: &str, amount_msat: Option, route_parameters: Option, ) -> Result { - let parse_fut = PaymentInstructions::parse( - uri_str, - self.config.network, - self.hrn_resolver.as_ref(), - false, - ); + let target_network; + + #[cfg(hrn_tests)] + { + target_network = bitcoin::Network::Bitcoin; + } + #[cfg(not(hrn_tests))] + { + target_network = self.config.network; + } + + let parse_fut = + PaymentInstructions::parse(uri_str, target_network, &self.hrn_resolver, false); let instructions = tokio::time::timeout(Duration::from_secs(HRN_RESOLUTION_TIMEOUT_SECS), parse_fut) @@ -196,7 +223,7 @@ impl UnifiedPayment { Error::InvalidAmount })?; - let fut = instr.set_amount(amt, self.hrn_resolver.as_ref()); + let fut = instr.set_amount(amt, &self.hrn_resolver); tokio::time::timeout(Duration::from_secs(HRN_RESOLUTION_TIMEOUT_SECS), fut) .await @@ -233,8 +260,30 @@ impl UnifiedPayment { for method in sorted_payment_methods { match method { - PaymentMethod::LightningBolt12(offer) => { - let offer = maybe_wrap(offer.clone()); + PaymentMethod::LightningBolt12(_offer) => { + #[cfg(not(hrn_tests))] + let offer = maybe_wrap(_offer.clone()); + + #[cfg(hrn_tests)] + // We inject a test-only offer here because full DNSSEC validation is + // currently infeasible in regtest environments. This allows us to + // bypass the validation requirements that would otherwise fail + // without a functional global DNSSEC root in the test runner. + let offer = { + let test_offer_guard = self.test_offer.lock().map_err(|e| { + log_error!( + self.logger, + "Failed to lock test_offer due to poisoning: {:?}", + e + ); + Error::PaymentSendingFailed + })?; + + match &*test_offer_guard { + Some(o) => o.clone(), + None => maybe_wrap(_offer.clone()), + } + }; let payment_result = if let Ok(hrn) = HumanReadableName::from_encoded(uri_str) { let hrn = maybe_wrap(hrn.clone()); @@ -290,6 +339,24 @@ impl UnifiedPayment { } } +#[cfg(hrn_tests)] +#[cfg_attr(feature = "uniffi", uniffi::export)] +impl UnifiedPayment { + /// Sets a test offer to be used in the `send` method when the `hrn_tests` config flag is enabled. + /// + /// This is necessary for Bolt12 payments in HRN tests because we typically resolve offers + /// via [BIP 353] DNS addresses. Since full DNSSEC validation is infeasible in regtest + /// environments, the automated resolution of an offer from a URI will fail. Injected + /// offers allow us to bypass this resolution step and test the subsequent payment flow. + /// + /// [BIP 353]: https://github.com/bitcoin/bips/blob/master/bip-0353.mediawiki + pub fn set_test_offer(&self, offer: Offer) { + let _ = self.test_offer.lock().map(|mut guard| *guard = Some(offer)).map_err(|e| { + log_error!(self.logger, "Failed to set test offer due to poisoned lock: {:?}", e) + }); + } +} + /// Represents the result of a payment made using a [BIP 21] URI or a [BIP 353] Human-Readable Name. /// /// After a successful on-chain transaction, the transaction ID ([`Txid`]) is returned. @@ -395,9 +462,10 @@ impl<'a> bip21::de::DeserializationState<'a> for DeserializationState { "lno" => { let bolt12_value = String::try_from(value).map_err(|_| Error::UriParameterParsingFailed)?; - let offer = - bolt12_value.parse::().map_err(|_| Error::UriParameterParsingFailed)?; - self.bolt12_offer = Some(offer); + let offer = bolt12_value + .parse::() + .map_err(|_| Error::UriParameterParsingFailed)?; + self.bolt12_offer = Some(maybe_wrap(offer)); Ok(bip21::de::ParamKind::Known) }, _ => Ok(bip21::de::ParamKind::Unknown), @@ -420,7 +488,7 @@ mod tests { use bitcoin::address::NetworkUnchecked; use bitcoin::{Address, Network}; - use super::{Amount, Bolt11Invoice, Extras, Offer}; + use super::{maybe_wrap, Amount, Bolt11Invoice, Extras, LdkOffer}; #[test] fn parse_uri() { @@ -474,7 +542,7 @@ mod tests { } if let Some(offer) = parsed_uri_with_offer.extras.bolt12_offer { - assert_eq!(offer, Offer::from_str(expected_bolt12_offer_2).unwrap()); + assert_eq!(offer, maybe_wrap(LdkOffer::from_str(expected_bolt12_offer_2).unwrap())); } else { panic!("No offer found."); } diff --git a/src/peer_store.rs b/src/peer_store.rs index ce8a9810e..307fb6929 100644 --- a/src/peer_store.rs +++ b/src/peer_store.rs @@ -41,7 +41,7 @@ where } pub(crate) fn add_peer(&self, peer_info: PeerInfo) -> Result<(), Error> { - let mut locked_peers = self.peers.write().unwrap(); + let mut locked_peers = self.peers.write().expect("lock"); if locked_peers.contains_key(&peer_info.node_id) { return Ok(()); @@ -52,18 +52,18 @@ where } pub(crate) fn remove_peer(&self, node_id: &PublicKey) -> Result<(), Error> { - let mut locked_peers = self.peers.write().unwrap(); + let mut locked_peers = self.peers.write().expect("lock"); locked_peers.remove(node_id); self.persist_peers(&*locked_peers) } pub(crate) fn list_peers(&self) -> Vec { - self.peers.read().unwrap().values().cloned().collect() + self.peers.read().expect("lock").values().cloned().collect() } pub(crate) fn get_peer(&self, node_id: &PublicKey) -> Option { - self.peers.read().unwrap().get(node_id).cloned() + self.peers.read().expect("lock").get(node_id).cloned() } fn persist_peers(&self, locked_peers: &HashMap) -> Result<(), Error> { diff --git a/src/runtime.rs b/src/runtime.rs index 39a34ddfe..1d8eb32b0 100644 --- a/src/runtime.rs +++ b/src/runtime.rs @@ -66,7 +66,7 @@ impl Runtime { where F: Future + Send + 'static, { - let mut background_tasks = self.background_tasks.lock().unwrap(); + let mut background_tasks = self.background_tasks.lock().expect("lock"); let runtime_handle = self.handle(); // Since it seems to make a difference to `tokio` (see // https://docs.rs/tokio/latest/tokio/time/fn.timeout.html#panics) we make sure the futures @@ -78,7 +78,8 @@ impl Runtime { where F: Future + Send + 'static, { - let mut cancellable_background_tasks = self.cancellable_background_tasks.lock().unwrap(); + let mut cancellable_background_tasks = + self.cancellable_background_tasks.lock().expect("lock"); let runtime_handle = self.handle(); // Since it seems to make a difference to `tokio` (see // https://docs.rs/tokio/latest/tokio/time/fn.timeout.html#panics) we make sure the futures @@ -90,7 +91,7 @@ impl Runtime { where F: Future + Send + 'static, { - let mut background_processor_task = self.background_processor_task.lock().unwrap(); + let mut background_processor_task = self.background_processor_task.lock().expect("lock"); debug_assert!(background_processor_task.is_none(), "Expected no background processor_task"); let runtime_handle = self.handle(); @@ -121,14 +122,15 @@ impl Runtime { } pub fn abort_cancellable_background_tasks(&self) { - let mut tasks = core::mem::take(&mut *self.cancellable_background_tasks.lock().unwrap()); + let mut tasks = + core::mem::take(&mut *self.cancellable_background_tasks.lock().expect("lock")); debug_assert!(tasks.len() > 0, "Expected some cancellable background_tasks"); tasks.abort_all(); self.block_on(async { while let Some(_) = tasks.join_next().await {} }) } pub fn wait_on_background_tasks(&self) { - let mut tasks = core::mem::take(&mut *self.background_tasks.lock().unwrap()); + let mut tasks = core::mem::take(&mut *self.background_tasks.lock().expect("lock")); debug_assert!(tasks.len() > 0, "Expected some background_tasks"); self.block_on(async { loop { @@ -161,7 +163,7 @@ impl Runtime { pub fn wait_on_background_processor_task(&self) { if let Some(background_processor_task) = - self.background_processor_task.lock().unwrap().take() + self.background_processor_task.lock().expect("lock").take() { let abort_handle = background_processor_task.abort_handle(); // Since it seems to make a difference to `tokio` (see @@ -208,7 +210,7 @@ impl Runtime { ); } - fn handle(&self) -> &tokio::runtime::Handle { + pub(crate) fn handle(&self) -> &tokio::runtime::Handle { match &self.mode { RuntimeMode::Owned(rt) => rt.handle(), RuntimeMode::Handle(handle) => handle, diff --git a/src/scoring.rs b/src/scoring.rs index 3ed7b9d1e..8abc4eab6 100644 --- a/src/scoring.rs +++ b/src/scoring.rs @@ -13,7 +13,7 @@ use crate::io::utils::write_external_pathfinding_scores_to_cache; use crate::logger::LdkLogger; use crate::runtime::Runtime; use crate::types::DynStore; -use crate::{write_node_metrics, Logger, NodeMetrics, Scorer}; +use crate::{update_and_persist_node_metrics, Logger, NodeMetrics, Scorer}; /// Start a background task that periodically downloads scores via an external url and merges them into the local /// pathfinding scores. @@ -82,13 +82,14 @@ async fn sync_external_scores( log_error!(logger, "Failed to persist external scores to cache: {}", e); } - let duration_since_epoch = - SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap(); - scorer.lock().unwrap().merge(liquidities, duration_since_epoch); - let mut locked_node_metrics = node_metrics.write().unwrap(); - locked_node_metrics.latest_pathfinding_scores_sync_timestamp = - Some(duration_since_epoch.as_secs()); - write_node_metrics(&*locked_node_metrics, &*kv_store, logger).unwrap_or_else(|e| { + let duration_since_epoch = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("system time must be after Unix epoch"); + scorer.lock().expect("lock").merge(liquidities, duration_since_epoch); + update_and_persist_node_metrics(&node_metrics, &*kv_store, logger, |m| { + m.latest_pathfinding_scores_sync_timestamp = Some(duration_since_epoch.as_secs()); + }) + .unwrap_or_else(|e| { log_error!(logger, "Persisting node metrics failed: {}", e); }); log_trace!(logger, "External scores merged successfully"); diff --git a/src/types.rs b/src/types.rs index dae315ae0..aec3967b1 100644 --- a/src/types.rs +++ b/src/types.rs @@ -12,13 +12,19 @@ use std::sync::{Arc, Mutex}; use bitcoin::secp256k1::PublicKey; use bitcoin::{OutPoint, ScriptBuf}; +use bitcoin_payment_instructions::amount::Amount as BPIAmount; +use bitcoin_payment_instructions::dns_resolver::DNSHrnResolver; +use bitcoin_payment_instructions::hrn_resolution::{ + HrnResolutionFuture, HrnResolver, HumanReadableName, LNURLResolutionFuture, +}; use bitcoin_payment_instructions::onion_message_resolver::LDKOnionMessageDNSSECHrnResolver; use lightning::chain::chainmonitor; use lightning::impl_writeable_tlv_based; -use lightning::ln::channel_state::ChannelDetails as LdkChannelDetails; +use lightning::ln::channel_state::{ChannelDetails as LdkChannelDetails, ChannelShutdownState}; use lightning::ln::msgs::{RoutingMessageHandler, SocketAddress}; use lightning::ln::peer_handler::IgnoringMessageHandler; use lightning::ln::types::ChannelId; +use lightning::onion_message::dns_resolution::DNSResolverMessageHandler; use lightning::routing::gossip; use lightning::routing::router::DefaultRouter; use lightning::routing::scoring::{CombinedScorer, ProbabilisticScoringFeeParameters}; @@ -51,7 +57,7 @@ where { } -pub(crate) trait DynStoreTrait: Send + Sync { +pub trait DynStoreTrait: Send + Sync { fn read_async( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> Pin, bitcoin::io::Error>> + Send + 'static>>; @@ -318,11 +324,44 @@ pub(crate) type OnionMessenger = lightning::onion_message::messenger::OnionMesse Arc, Arc, Arc, - Arc, + Arc, IgnoringMessageHandler, >; -pub(crate) type HRNResolver = LDKOnionMessageDNSSECHrnResolver, Arc>; +#[derive(Clone)] +pub enum HRNResolver { + Onion(Arc, Arc>>), + Local(Arc), +} + +impl HrnResolver for HRNResolver { + fn resolve_hrn<'a>(&'a self, hrn: &'a HumanReadableName) -> HrnResolutionFuture<'a> { + match self { + HRNResolver::Onion(inner) => inner.resolve_hrn(hrn), + HRNResolver::Local(inner) => inner.resolve_hrn(hrn), + } + } + + fn resolve_lnurl<'a>(&'a self, url: &'a str) -> HrnResolutionFuture<'a> { + match self { + HRNResolver::Onion(inner) => inner.resolve_lnurl(url), + HRNResolver::Local(inner) => inner.resolve_lnurl(url), + } + } + + fn resolve_lnurl_to_invoice<'a>( + &'a self, callback_url: String, amount: BPIAmount, expected_description_hash: [u8; 32], + ) -> LNURLResolutionFuture<'a> { + match self { + HRNResolver::Onion(inner) => { + inner.resolve_lnurl_to_invoice(callback_url, amount, expected_description_hash) + }, + HRNResolver::Local(inner) => { + inner.resolve_lnurl_to_invoice(callback_url, amount, expected_description_hash) + }, + } + } +} pub(crate) type MessageRouter = lightning::onion_message::messenger::DefaultMessageRouter< Arc, @@ -558,6 +597,10 @@ pub struct ChannelDetails { pub inbound_htlc_maximum_msat: Option, /// Set of configurable parameters that affect channel operation. pub config: ChannelConfig, + /// The current shutdown state of the channel, if any. + /// + /// Will be `None` for objects serialized with LDK Node v0.1 and earlier. + pub channel_shutdown_state: Option, } impl From for ChannelDetails { @@ -573,9 +616,9 @@ impl From for ChannelDetails { channel_value_sats: value.channel_value_satoshis, unspendable_punishment_reserve: value.unspendable_punishment_reserve, user_channel_id: UserChannelId(value.user_channel_id), - // unwrap safety: This value will be `None` for objects serialized with LDK versions - // prior to 0.0.115. - feerate_sat_per_1000_weight: value.feerate_sat_per_1000_weight.unwrap(), + feerate_sat_per_1000_weight: value + .feerate_sat_per_1000_weight + .expect("value is set for objects serialized with LDK v0.0.115+"), outbound_capacity_msat: value.outbound_capacity_msat, inbound_capacity_msat: value.inbound_capacity_msat, confirmations_required: value.confirmations_required, @@ -608,11 +651,15 @@ impl From for ChannelDetails { next_outbound_htlc_limit_msat: value.next_outbound_htlc_limit_msat, next_outbound_htlc_minimum_msat: value.next_outbound_htlc_minimum_msat, force_close_spend_delay: value.force_close_spend_delay, - // unwrap safety: This field is only `None` for objects serialized prior to LDK 0.0.107 - inbound_htlc_minimum_msat: value.inbound_htlc_minimum_msat.unwrap_or(0), + inbound_htlc_minimum_msat: value + .inbound_htlc_minimum_msat + .expect("value is set for objects serialized with LDK v0.0.107+"), inbound_htlc_maximum_msat: value.inbound_htlc_maximum_msat, - // unwrap safety: `config` is only `None` for LDK objects serialized prior to 0.0.109. - config: value.config.map(|c| c.into()).unwrap(), + config: value + .config + .map(|c| c.into()) + .expect("value is set for objects serialized with LDK v0.0.109+"), + channel_shutdown_state: value.channel_shutdown_state, } } } diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs index 0e80a46db..daeb7becb 100644 --- a/src/wallet/mod.rs +++ b/src/wallet/mod.rs @@ -35,7 +35,7 @@ use lightning::chain::chaininterface::{ BroadcasterInterface, INCREMENTAL_RELAY_FEE_SAT_PER_1000_WEIGHT, }; use lightning::chain::channelmonitor::ANTI_REORG_DELAY; -use lightning::chain::{BestBlock, ClaimId, Listen}; +use lightning::chain::{BestBlock as BlockLocator, ClaimId, Listen}; use lightning::ln::channelmanager::PaymentId; use lightning::ln::funding::FundingTxInput; use lightning::ln::inbound_payment::ExpandedKey; @@ -115,34 +115,42 @@ impl Wallet { } pub(crate) fn get_full_scan_request(&self) -> FullScanRequest { - self.inner.lock().unwrap().start_full_scan().build() + self.inner.lock().expect("lock").start_full_scan().build() } pub(crate) fn get_incremental_sync_request(&self) -> SyncRequest<(KeychainKind, u32)> { - self.inner.lock().unwrap().start_sync_with_revealed_spks().build() + self.inner.lock().expect("lock").start_sync_with_revealed_spks().build() } pub(crate) fn get_cached_txs(&self) -> Vec> { - self.inner.lock().unwrap().tx_graph().full_txs().map(|tx_node| tx_node.tx).collect() + self.inner.lock().expect("lock").tx_graph().full_txs().map(|tx_node| tx_node.tx).collect() } pub(crate) fn get_unconfirmed_txids(&self) -> Vec { self.inner .lock() - .unwrap() + .expect("lock") .transactions() .filter(|t| t.chain_position.is_unconfirmed()) .map(|t| t.tx_node.txid) .collect() } - pub(crate) fn current_best_block(&self) -> BestBlock { - let checkpoint = self.inner.lock().unwrap().latest_checkpoint(); - BestBlock { block_hash: checkpoint.hash(), height: checkpoint.height() } + pub(crate) fn current_best_block(&self) -> BlockLocator { + let checkpoint = self.inner.lock().expect("lock").latest_checkpoint(); + let mut current_block = Some(checkpoint.clone()); + let previous_blocks = std::array::from_fn(|_| { + let child = current_block.take()?; + // BDK's checkpoint chain may be sparse; only accept contiguous parents. + let parent = child.prev().filter(|cp| cp.height() + 1 == child.height())?; + current_block = Some(parent.clone()); + Some(parent.hash()) + }); + BlockLocator { block_hash: checkpoint.hash(), height: checkpoint.height(), previous_blocks } } pub(crate) fn apply_update(&self, update: impl Into) -> Result<(), Error> { - let mut locked_wallet = self.inner.lock().unwrap(); + let mut locked_wallet = self.inner.lock().expect("lock"); match locked_wallet.apply_update_events(update) { Ok(events) => { self.update_payment_store(&mut *locked_wallet, events).map_err(|e| { @@ -150,7 +158,7 @@ impl Wallet { Error::PersistenceFailed })?; - let mut locked_persister = self.persister.lock().unwrap(); + let mut locked_persister = self.persister.lock().expect("lock"); locked_wallet.persist(&mut locked_persister).map_err(|e| { log_error!(self.logger, "Failed to persist wallet: {}", e); Error::PersistenceFailed @@ -172,7 +180,7 @@ impl Wallet { return Ok(()); } - let mut locked_wallet = self.inner.lock().unwrap(); + let mut locked_wallet = self.inner.lock().expect("lock"); let chain_tip1 = locked_wallet.latest_checkpoint().block_id(); let wallet_txs1 = locked_wallet @@ -203,7 +211,7 @@ impl Wallet { Error::PersistenceFailed })?; - let mut locked_persister = self.persister.lock().unwrap(); + let mut locked_persister = self.persister.lock().expect("lock"); locked_wallet.persist(&mut locked_persister).map_err(|e| { log_error!(self.logger, "Failed to persist wallet: {}", e); Error::PersistenceFailed @@ -426,7 +434,7 @@ impl Wallet { ) -> Result { let fee_rate = self.fee_estimator.estimate_fee_rate(confirmation_target); - let mut locked_wallet = self.inner.lock().unwrap(); + let mut locked_wallet = self.inner.lock().expect("lock"); let mut tx_builder = locked_wallet.build_tx(); tx_builder.add_recipient(output_script, amount).fee_rate(fee_rate).nlocktime(locktime); @@ -454,7 +462,7 @@ impl Wallet { }, } - let mut locked_persister = self.persister.lock().unwrap(); + let mut locked_persister = self.persister.lock().expect("lock"); locked_wallet.persist(&mut locked_persister).map_err(|e| { log_error!(self.logger, "Failed to persist wallet: {}", e); Error::PersistenceFailed @@ -469,8 +477,8 @@ impl Wallet { } pub(crate) fn get_new_address(&self) -> Result { - let mut locked_wallet = self.inner.lock().unwrap(); - let mut locked_persister = self.persister.lock().unwrap(); + let mut locked_wallet = self.inner.lock().expect("lock"); + let mut locked_persister = self.persister.lock().expect("lock"); let address_info = locked_wallet.reveal_next_address(KeychainKind::External); locked_wallet.persist(&mut locked_persister).map_err(|e| { @@ -481,8 +489,8 @@ impl Wallet { } pub(crate) fn get_new_internal_address(&self) -> Result { - let mut locked_wallet = self.inner.lock().unwrap(); - let mut locked_persister = self.persister.lock().unwrap(); + let mut locked_wallet = self.inner.lock().expect("lock"); + let mut locked_persister = self.persister.lock().expect("lock"); let address_info = locked_wallet.next_unused_address(KeychainKind::Internal); locked_wallet.persist(&mut locked_persister).map_err(|e| { @@ -493,8 +501,8 @@ impl Wallet { } pub(crate) fn cancel_tx(&self, tx: &Transaction) -> Result<(), Error> { - let mut locked_wallet = self.inner.lock().unwrap(); - let mut locked_persister = self.persister.lock().unwrap(); + let mut locked_wallet = self.inner.lock().expect("lock"); + let mut locked_persister = self.persister.lock().expect("lock"); locked_wallet.cancel_tx(tx); locked_wallet.persist(&mut locked_persister).map_err(|e| { @@ -508,7 +516,7 @@ impl Wallet { pub(crate) fn get_balances( &self, total_anchor_channels_reserve_sats: u64, ) -> Result<(u64, u64), Error> { - let balance = self.inner.lock().unwrap().balance(); + let balance = self.inner.lock().expect("lock").balance(); // Make sure `list_confirmed_utxos` returns at least one `Utxo` we could use to spend/bump // Anchors if we have any confirmed amounts. @@ -644,7 +652,7 @@ impl Wallet { pub(crate) fn get_max_funding_amount( &self, cur_anchor_reserve_sats: u64, fee_rate: FeeRate, ) -> Result { - let mut locked_wallet = self.inner.lock().unwrap(); + let mut locked_wallet = self.inner.lock().expect("lock"); // Use a dummy P2WSH script (34 bytes) to match the size of a real funding output. let dummy_p2wsh_script = ScriptBuf::new().to_p2wsh(); @@ -668,7 +676,7 @@ impl Wallet { &self, shared_input: Input, shared_output_script: ScriptBuf, cur_anchor_reserve_sats: u64, fee_rate: FeeRate, ) -> Result { - let mut locked_wallet = self.inner.lock().unwrap(); + let mut locked_wallet = self.inner.lock().expect("lock"); debug_assert!(matches!( locked_wallet.public_descriptor(KeychainKind::External), @@ -712,7 +720,7 @@ impl Wallet { fee_rate.unwrap_or_else(|| self.fee_estimator.estimate_fee_rate(confirmation_target)); let tx = { - let mut locked_wallet = self.inner.lock().unwrap(); + let mut locked_wallet = self.inner.lock().expect("lock"); // Prepare the tx_builder. We properly check the reserve requirements (again) further down. let tx_builder = match send_amount { @@ -834,7 +842,7 @@ impl Wallet { }, } - let mut locked_persister = self.persister.lock().unwrap(); + let mut locked_persister = self.persister.lock().expect("lock"); locked_wallet.persist(&mut locked_persister).map_err(|e| { log_error!(self.logger, "Failed to persist wallet: {}", e); Error::PersistenceFailed @@ -888,8 +896,8 @@ impl Wallet { pub(crate) fn select_confirmed_utxos( &self, must_spend: Vec, must_pay_to: &[TxOut], fee_rate: FeeRate, ) -> Result { - let mut locked_wallet = self.inner.lock().unwrap(); - let mut locked_persister = self.persister.lock().unwrap(); + let mut locked_wallet = self.inner.lock().expect("lock"); + let mut locked_persister = self.persister.lock().expect("lock"); debug_assert!(matches!( locked_wallet.public_descriptor(KeychainKind::External), @@ -964,7 +972,7 @@ impl Wallet { } fn list_confirmed_utxos_inner(&self) -> Result, ()> { - let locked_wallet = self.inner.lock().unwrap(); + let locked_wallet = self.inner.lock().expect("lock"); let mut utxos = Vec::new(); let confirmed_txs: Vec = locked_wallet .transactions() @@ -1058,8 +1066,8 @@ impl Wallet { #[allow(deprecated)] fn get_change_script_inner(&self) -> Result { - let mut locked_wallet = self.inner.lock().unwrap(); - let mut locked_persister = self.persister.lock().unwrap(); + let mut locked_wallet = self.inner.lock().expect("lock"); + let mut locked_persister = self.persister.lock().expect("lock"); let address_info = locked_wallet.next_unused_address(KeychainKind::Internal); locked_wallet.persist(&mut locked_persister).map_err(|e| { @@ -1071,7 +1079,7 @@ impl Wallet { #[allow(deprecated)] pub(crate) fn sign_owned_inputs(&self, unsigned_tx: Transaction) -> Result { - let locked_wallet = self.inner.lock().unwrap(); + let locked_wallet = self.inner.lock().expect("lock"); let mut psbt = Psbt::from_unsigned_tx(unsigned_tx).map_err(|e| { log_error!(self.logger, "Failed to construct PSBT: {}", e); @@ -1108,7 +1116,7 @@ impl Wallet { #[allow(deprecated)] fn sign_psbt_inner(&self, mut psbt: Psbt) -> Result { - let locked_wallet = self.inner.lock().unwrap(); + let locked_wallet = self.inner.lock().expect("lock"); // While BDK populates both `witness_utxo` and `non_witness_utxo` fields, LDK does not. As // BDK by default doesn't trust the witness UTXO to account for the Segwit bug, we must @@ -1256,7 +1264,7 @@ impl Wallet { }, }; - let mut locked_wallet = self.inner.lock().unwrap(); + let mut locked_wallet = self.inner.lock().expect("lock"); debug_assert!( locked_wallet.tx_details(txid).is_some(), @@ -1319,7 +1327,7 @@ impl Wallet { log_error!( self.logger, "Provided fee rate {} is too low for RBF fee bump of txid {}, required minimum fee rate: {}", - fee_rate.unwrap(), + fee_rate.expect("fee rate is set"), txid, required_fee_rate ); @@ -1380,7 +1388,7 @@ impl Wallet { }, } - let mut locked_persister = self.persister.lock().unwrap(); + let mut locked_persister = self.persister.lock().expect("lock"); locked_wallet.persist(&mut locked_persister).map_err(|e| { log_error!(self.logger, "Failed to persist wallet after fee bump of {}: {}", txid, e); Error::PersistenceFailed @@ -1431,7 +1439,7 @@ impl Listen for Wallet { } fn block_connected(&self, block: &bitcoin::Block, height: u32) { - let mut locked_wallet = self.inner.lock().unwrap(); + let mut locked_wallet = self.inner.lock().expect("lock"); let pre_checkpoint = locked_wallet.latest_checkpoint(); if pre_checkpoint.height() != height - 1 @@ -1481,7 +1489,7 @@ impl Listen for Wallet { }, }; - let mut locked_persister = self.persister.lock().unwrap(); + let mut locked_persister = self.persister.lock().expect("lock"); match locked_wallet.persist(&mut locked_persister) { Ok(_) => (), Err(e) => { @@ -1491,7 +1499,7 @@ impl Listen for Wallet { }; } - fn blocks_disconnected(&self, _fork_point_block: BestBlock) { + fn blocks_disconnected(&self, _fork_point_block: BlockLocator) { // This is a no-op as we don't have to tell BDK about disconnections. According to the BDK // team, it's sufficient in case of a reorg to always connect blocks starting from the last // point of disagreement. @@ -1513,7 +1521,7 @@ impl WalletSource for Wallet { &'a self, outpoint: OutPoint, ) -> impl Future> + Send + 'a { async move { - let locked_wallet = self.inner.lock().unwrap(); + let locked_wallet = self.inner.lock().expect("lock"); locked_wallet .tx_details(outpoint.txid) .map(|tx_details| tx_details.tx.deref().clone()) diff --git a/src/wallet/ser.rs b/src/wallet/ser.rs index c1ad984e6..c6a707bcd 100644 --- a/src/wallet/ser.rs +++ b/src/wallet/ser.rs @@ -94,7 +94,9 @@ impl Readable for ChangeSetDeserWrapper { decode_tlv_stream!(reader, { (0, blocks, required), }); - Ok(Self(BdkLocalChainChangeSet { blocks: blocks.0.unwrap() })) + Ok(Self(BdkLocalChainChangeSet { + blocks: blocks.0.expect("required blocks TLV field should be present"), + })) } } @@ -141,10 +143,10 @@ impl Readable for ChangeSetDeserWrapper> (0, time, required), (2, txid, required), }); - set.insert((time.0.unwrap().0, txid.0.unwrap())); + set.insert(( + time.0.expect("required confirmation time TLV field should be present").0, + txid.0.expect("required txid TLV field should be present"), + )); } Ok(Self(set)) } @@ -205,7 +210,7 @@ impl Readable for ChangeSetDeserWrapper>> { read_tlv_fields!(reader, { (0, tx, required), }); - set.insert(Arc::new(tx.0.unwrap())); + set.insert(Arc::new(tx.0.expect("required transaction TLV field should be present"))); } Ok(Self(set)) } @@ -232,8 +237,10 @@ impl Readable for ChangeSetDeserWrapper { }); Ok(Self(ConfirmationBlockTime { - block_id: block_id.0.unwrap().0, - confirmation_time: confirmation_time.0.unwrap(), + block_id: block_id.0.expect("required block_id TLV field should be present").0, + confirmation_time: confirmation_time + .0 + .expect("required confirmation_time TLV field should be present"), })) } } @@ -257,7 +264,10 @@ impl Readable for ChangeSetDeserWrapper { (2, hash, required), }); - Ok(Self(BlockId { height: height.0.unwrap(), hash: hash.0.unwrap() })) + Ok(Self(BlockId { + height: height.0.expect("required height TLV field should be present"), + hash: hash.0.expect("required hash TLV field should be present"), + })) } } @@ -285,7 +295,10 @@ impl Readable for ChangeSetDeserWrapper { decode_tlv_stream!(reader, { (0, last_revealed, required) }); Ok(Self(BdkIndexerChangeSet { - last_revealed: last_revealed.0.unwrap().0, + last_revealed: last_revealed + .0 + .expect("required last_revealed TLV field should be present") + .0, spk_cache: Default::default(), })) } @@ -317,7 +330,10 @@ impl Readable for ChangeSetDeserWrapper> { (0, descriptor_id, required), (2, last_index, required), }); - set.insert(descriptor_id.0.unwrap().0, last_index.0.unwrap()); + set.insert( + descriptor_id.0.expect("required descriptor_id TLV field should be present").0, + last_index.0.expect("required last_index TLV field should be present"), + ); } Ok(Self(set)) } @@ -336,7 +352,9 @@ impl Readable for ChangeSetDeserWrapper { decode_tlv_stream!(reader, { (0, hash, required) }); - Ok(Self(DescriptorId(hash.0.unwrap().0))) + Ok(Self(DescriptorId( + hash.0.expect("required descriptor hash TLV field should be present").0, + ))) } } @@ -351,6 +369,9 @@ impl Readable for ChangeSetDeserWrapper { use bitcoin::hashes::Hash; let buf: [u8; 32] = Readable::read(reader)?; - Ok(Self(Sha256Hash::from_slice(&buf[..]).unwrap())) + Ok(Self( + Sha256Hash::from_slice(&buf[..]) + .expect("a 32-byte buffer should decode into a sha256 hash"), + )) } } diff --git a/tests/common/cln.rs b/tests/common/cln.rs new file mode 100644 index 000000000..8b1d38d13 --- /dev/null +++ b/tests/common/cln.rs @@ -0,0 +1,329 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use std::str::FromStr; +use std::sync::Arc; + +use async_trait::async_trait; +use clightningrpc::lightningrpc::{LightningRPC, PayOptions}; +use ldk_node::bitcoin::secp256k1::PublicKey; +use ldk_node::lightning::ln::msgs::SocketAddress; +use serde_json::json; + +use super::external_node::{ExternalChannel, ExternalNode, TestFailure}; + +pub(crate) struct TestClnNode { + client: Arc, + listen_addr: SocketAddress, +} + +impl TestClnNode { + pub(crate) fn new(socket_path: &str, listen_addr: SocketAddress) -> Self { + Self { client: Arc::new(LightningRPC::new(socket_path)), listen_addr } + } + + pub(crate) fn from_env() -> Self { + let sock = + std::env::var("CLN_SOCKET_PATH").unwrap_or_else(|_| "/tmp/lightning-rpc".to_string()); + let listen_addr: SocketAddress = std::env::var("CLN_P2P_ADDR") + .unwrap_or_else(|_| "127.0.0.1:19846".to_string()) + .parse() + .unwrap(); + Self::new(&sock, listen_addr) + } + + /// Run a synchronous CLN RPC call on a dedicated blocking thread. + /// + /// `clightningrpc` is sync-only; calling it on the tokio runtime would block + /// the worker (we run with `worker_threads = 1`) and deadlock LDK's tasks. + async fn rpc(&self, f: F) -> T + where + F: FnOnce(&LightningRPC) -> T + Send + 'static, + T: Send + 'static, + { + let client = Arc::clone(&self.client); + tokio::task::spawn_blocking(move || f(&*client)).await.expect("CLN RPC task panicked") + } + + /// Repeatedly call `splice_update` until `commitments_secured` is true. + /// Returns the final PSBT. Gives up after 10 attempts. + async fn splice_update_loop( + &self, channel_id: &str, mut psbt: String, + ) -> Result { + const MAX_ATTEMPTS: u32 = 10; + for _ in 0..MAX_ATTEMPTS { + let ch_id = channel_id.to_string(); + let psbt_arg = psbt.clone(); + let update_result: serde_json::Value = self + .rpc(move |c| { + c.call("splice_update", &json!({"channel_id": ch_id, "psbt": psbt_arg})) + }) + .await + .map_err(|e| self.make_error(format!("splice_update: {}", e)))?; + psbt = update_result["psbt"] + .as_str() + .ok_or_else(|| self.make_error("splice_update did not return psbt"))? + .to_string(); + if update_result["commitments_secured"].as_bool() == Some(true) { + return Ok(psbt); + } + } + Err(self.make_error(format!( + "splice_update did not reach commitments_secured after {} attempts", + MAX_ATTEMPTS + ))) + } +} + +/// Parse a CLN msat value which may be either a plain integer or a string like "1000000msat". +/// Returns `None` if the value is missing or malformed; callers should propagate as an error. +fn parse_msat(v: &serde_json::Value) -> Option { + v.as_u64().or_else(|| v.as_str()?.strip_suffix("msat")?.parse().ok()) +} + +#[async_trait] +impl ExternalNode for TestClnNode { + fn name(&self) -> &str { + "CLN" + } + + async fn get_node_id(&self) -> Result { + let info = self + .rpc(|c| c.getinfo()) + .await + .map_err(|e| self.make_error(format!("getinfo: {}", e)))?; + PublicKey::from_str(&info.id).map_err(|e| self.make_error(format!("parse node id: {}", e))) + } + + async fn get_listening_address(&self) -> Result { + Ok(self.listen_addr.clone()) + } + + async fn connect_peer( + &self, peer_id: PublicKey, addr: SocketAddress, + ) -> Result<(), TestFailure> { + let uri = format!("{}@{}", peer_id, addr); + let _: serde_json::Value = self + .rpc(move |c| c.call("connect", &json!({"id": uri}))) + .await + .map_err(|e| self.make_error(format!("connect: {}", e)))?; + Ok(()) + } + + async fn disconnect_peer(&self, peer_id: PublicKey) -> Result<(), TestFailure> { + let id = peer_id.to_string(); + let _: serde_json::Value = self + .rpc(move |c| c.call("disconnect", &json!({"id": id, "force": true}))) + .await + .map_err(|e| self.make_error(format!("disconnect: {}", e)))?; + Ok(()) + } + + async fn open_channel( + &self, peer_id: PublicKey, _addr: SocketAddress, capacity_sat: u64, push_msat: Option, + ) -> Result { + // Use the generic `call` method to include `push_msat`, which the + // typed `fundchannel` method does not support. + let mut params = json!({ + "id": peer_id.to_string(), + "amount": capacity_sat, + }); + if let Some(push) = push_msat { + params["push_msat"] = json!(push); + } + + let result: serde_json::Value = self + .rpc(move |c| c.call("fundchannel", ¶ms)) + .await + .map_err(|e| self.make_error(format!("fundchannel: {}", e)))?; + + Ok(result["channel_id"].as_str().unwrap_or("").to_string()) + } + + async fn close_channel(&self, channel_id: &str) -> Result<(), TestFailure> { + let ch_id = channel_id.to_string(); + self.rpc(move |c| c.close(&ch_id, None, None)) + .await + .map_err(|e| self.make_error(format!("close: {}", e)))?; + Ok(()) + } + + async fn force_close_channel(&self, channel_id: &str) -> Result<(), TestFailure> { + // `unilateraltimeout: 1` triggers an immediate unilateral close. + let ch_id = channel_id.to_string(); + let _: serde_json::Value = self + .rpc(move |c| c.call("close", &json!({"id": ch_id, "unilateraltimeout": 1}))) + .await + .map_err(|e| self.make_error(format!("force close: {}", e)))?; + Ok(()) + } + + async fn create_invoice( + &self, amount_msat: u64, description: &str, + ) -> Result { + let desc = description.to_string(); + let label = format!( + "{}-{}", + desc, + std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap_or_default() + .as_nanos() + ); + let invoice = self + .rpc(move |c| c.invoice(Some(amount_msat), &label, &desc, None, None, None)) + .await + .map_err(|e| self.make_error(format!("invoice: {}", e)))?; + Ok(invoice.bolt11) + } + + async fn pay_invoice(&self, invoice: &str) -> Result { + let inv = invoice.to_string(); + let result = self + .rpc(move |c| c.pay(&inv, PayOptions::default())) + .await + .map_err(|e| self.make_error(format!("pay: {}", e)))?; + Ok(result.payment_preimage) + } + + async fn send_keysend( + &self, peer_id: PublicKey, amount_msat: u64, + ) -> Result { + let dest = peer_id.to_string(); + let result: serde_json::Value = self + .rpc(move |c| { + c.call( + "keysend", + // maxdelay=288: CLN's default is too low for LDK's required final CLTV. + &json!({"destination": dest, "amount_msat": amount_msat, "maxdelay": 288}), + ) + }) + .await + .map_err(|e| self.make_error(format!("keysend: {}", e)))?; + let preimage = result["payment_preimage"] + .as_str() + .ok_or_else(|| self.make_error("keysend did not return payment_preimage"))?; + Ok(preimage.to_string()) + } + + async fn get_funding_address(&self) -> Result { + let addr = self + .rpc(|c| c.newaddr(None)) + .await + .map_err(|e| self.make_error(format!("newaddr: {}", e)))?; + addr.bech32.ok_or_else(|| self.make_error("no bech32 address returned")) + } + + async fn get_block_height(&self) -> Result { + let info = self + .rpc(|c| c.getinfo()) + .await + .map_err(|e| self.make_error(format!("getinfo: {}", e)))?; + Ok(info.blockheight as u64) + } + + async fn list_channels(&self) -> Result, TestFailure> { + let response: serde_json::Value = self + .rpc(|c| c.call("listpeerchannels", &serde_json::Map::new())) + .await + .map_err(|e| self.make_error(format!("listpeerchannels: {}", e)))?; + let mut channels = Vec::new(); + + for ch in response["channels"].as_array().into_iter().flatten() { + let peer_id_str = ch["peer_id"] + .as_str() + .ok_or_else(|| self.make_error("list_channels: missing peer_id"))?; + let peer_id = PublicKey::from_str(peer_id_str).map_err(|e| { + self.make_error(format!("list_channels: invalid peer_id '{}': {}", peer_id_str, e)) + })?; + let channel_id = ch["channel_id"] + .as_str() + .ok_or_else(|| self.make_error("list_channels: missing channel_id"))? + .to_string(); + let total_msat = parse_msat(&ch["total_msat"]) + .ok_or_else(|| self.make_error("list_channels: missing/invalid total_msat"))?; + let to_us_msat = parse_msat(&ch["to_us_msat"]) + .ok_or_else(|| self.make_error("list_channels: missing/invalid to_us_msat"))?; + let funding_txid = ch["funding_txid"].as_str().map(String::from); + let state = ch["state"].as_str().unwrap_or(""); + let pending_htlcs_count = ch["htlcs"].as_array().map(|a| a.len()).unwrap_or(0); + channels.push(ExternalChannel { + channel_id, + peer_id, + capacity_sat: total_msat / 1000, + local_balance_msat: to_us_msat, + remote_balance_msat: total_msat.saturating_sub(to_us_msat), + funding_txid, + is_active: state == "CHANNELD_NORMAL", + pending_htlcs_count, + }); + } + Ok(channels) + } + + async fn splice_in(&self, channel_id: &str, amount_sat: u64) -> Result<(), TestFailure> { + let ch_id = channel_id.to_string(); + let amount: i64 = amount_sat.try_into().map_err(|_| { + self.make_error(format!("splice_in: amount_sat overflow: {}", amount_sat)) + })?; + let init_result: serde_json::Value = self + .rpc(move |c| { + c.call("splice_init", &json!({"channel_id": ch_id, "relative_amount": amount})) + }) + .await + .map_err(|e| self.make_error(format!("splice_init: {}", e)))?; + let mut psbt = init_result["psbt"] + .as_str() + .ok_or_else(|| self.make_error("splice_init did not return psbt"))? + .to_string(); + + psbt = self.splice_update_loop(channel_id, psbt).await?; + + let ch_id = channel_id.to_string(); + let _: serde_json::Value = self + .rpc(move |c| c.call("splice_signed", &json!({"channel_id": ch_id, "psbt": psbt}))) + .await + .map_err(|e| self.make_error(format!("splice_signed: {}", e)))?; + Ok(()) + } + + async fn splice_out( + &self, channel_id: &str, amount_sat: u64, address: Option<&str>, + ) -> Result<(), TestFailure> { + // Funds always go to CLN's own wallet; specifying a custom address + // would require manual PSBT manipulation which is out of scope. + if address.is_some() { + return Err(self.make_error( + "splice_out with custom address is not supported by CLN adapter".to_string(), + )); + } + let ch_id = channel_id.to_string(); + let positive: i64 = amount_sat.try_into().map_err(|_| { + self.make_error(format!("splice_out: amount_sat overflow: {}", amount_sat)) + })?; + let amount = -positive; + let init_result: serde_json::Value = self + .rpc(move |c| { + c.call("splice_init", &json!({"channel_id": ch_id, "relative_amount": amount})) + }) + .await + .map_err(|e| self.make_error(format!("splice_init: {}", e)))?; + let mut psbt = init_result["psbt"] + .as_str() + .ok_or_else(|| self.make_error("splice_init did not return psbt"))? + .to_string(); + + psbt = self.splice_update_loop(channel_id, psbt).await?; + + let ch_id = channel_id.to_string(); + let _: serde_json::Value = self + .rpc(move |c| c.call("splice_signed", &json!({"channel_id": ch_id, "psbt": psbt}))) + .await + .map_err(|e| self.make_error(format!("splice_signed: {}", e)))?; + Ok(()) + } +} diff --git a/tests/common/eclair.rs b/tests/common/eclair.rs new file mode 100644 index 000000000..c1864261d --- /dev/null +++ b/tests/common/eclair.rs @@ -0,0 +1,340 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use std::str::FromStr; + +use async_trait::async_trait; +use base64::prelude::{Engine as _, BASE64_STANDARD}; +use ldk_node::bitcoin::secp256k1::PublicKey; +use ldk_node::lightning::ln::msgs::SocketAddress; +use serde_json::Value; + +use super::external_node::{ExternalChannel, ExternalNode, TestFailure}; + +/// Percent-encode a string for `application/x-www-form-urlencoded` form values. +fn form_encode(s: &str) -> String { + let mut out = String::with_capacity(s.len()); + for b in s.bytes() { + match b { + b'A'..=b'Z' | b'a'..=b'z' | b'0'..=b'9' | b'-' | b'_' | b'.' | b'~' => { + out.push(b as char); + }, + b' ' => out.push('+'), + _ => out.push_str(&format!("%{:02X}", b)), + } + } + out +} + +pub(crate) struct TestEclairNode { + base_url: String, + auth_header: String, + listen_addr: SocketAddress, +} + +impl TestEclairNode { + pub(crate) fn new(base_url: &str, password: &str, listen_addr: SocketAddress) -> Self { + let credentials = BASE64_STANDARD.encode(format!(":{}", password)); + Self { + base_url: base_url.to_string(), + auth_header: format!("Basic {}", credentials), + listen_addr, + } + } + + pub(crate) fn from_env() -> Self { + let base_url = + std::env::var("ECLAIR_API_URL").unwrap_or_else(|_| "http://127.0.0.1:8080".to_string()); + let password = + std::env::var("ECLAIR_API_PASSWORD").unwrap_or_else(|_| "eclairpassword".to_string()); + let listen_addr: SocketAddress = std::env::var("ECLAIR_P2P_ADDR") + .unwrap_or_else(|_| "127.0.0.1:9736".to_string()) + .parse() + .unwrap(); + Self::new(&base_url, &password, listen_addr) + } + + async fn post(&self, endpoint: &str, params: &[(&str, &str)]) -> Result { + let url = format!("{}{}", self.base_url, endpoint); + let body = params + .iter() + .map(|(k, v)| format!("{}={}", form_encode(k), form_encode(v))) + .collect::>() + .join("&"); + + let request = bitreq::post(&url) + .with_header("Authorization", &self.auth_header) + .with_header("Content-Type", "application/x-www-form-urlencoded") + .with_body(body) + .with_timeout(30); + + let response = request + .send_async() + .await + .map_err(|e| self.make_error(format!("request to {} failed: {}", endpoint, e)))?; + + if response.status_code < 200 || response.status_code >= 300 { + let body_str = response.as_str().unwrap_or("(non-utf8 body)"); + return Err(self.make_error(format!( + "{} returned {}: {}", + endpoint, response.status_code, body_str + ))); + } + + let body_str = response + .as_str() + .map_err(|e| self.make_error(format!("reading response from {}: {}", endpoint, e)))?; + + serde_json::from_str(body_str).map_err(|e| { + self.make_error(format!( + "parsing response from {}: {} (body: {})", + endpoint, e, body_str + )) + }) + } + + /// Poll /getsentinfo until the payment settles or fails. Surfaces Eclair-side + /// failure reasons rather than waiting for an opaque LDK event timeout. + async fn poll_payment_settlement( + &self, payment_id: &str, label: &str, + ) -> Result { + let timeout_secs = super::INTEROP_TIMEOUT_SECS; + let deadline = tokio::time::Instant::now() + tokio::time::Duration::from_secs(timeout_secs); + loop { + if tokio::time::Instant::now() >= deadline { + return Err(self.make_error(format!( + "{} {} did not settle within {}s", + label, payment_id, timeout_secs + ))); + } + tokio::time::sleep(tokio::time::Duration::from_millis(500)).await; + let info = self.post("/getsentinfo", &[("id", payment_id)]).await?; + if let Some(attempts) = info.as_array() { + if let Some(last) = attempts.last() { + let status = last["status"]["type"].as_str().unwrap_or(""); + if status == "sent" { + return Ok(payment_id.to_string()); + } else if status == "failed" { + let failure = last["status"]["failures"] + .as_array() + .and_then(|f| f.last()) + .and_then(|f| f["failureMessage"].as_str()) + .unwrap_or("unknown"); + return Err(self + .make_error(format!("{} {} failed: {}", label, payment_id, failure))); + } + } + } + } + } +} + +#[async_trait] +impl ExternalNode for TestEclairNode { + fn name(&self) -> &str { + "Eclair" + } + + async fn get_node_id(&self) -> Result { + let info = self.post("/getinfo", &[]).await?; + let node_id_str = info["nodeId"] + .as_str() + .ok_or_else(|| self.make_error("missing nodeId in getinfo response"))?; + PublicKey::from_str(node_id_str) + .map_err(|e| self.make_error(format!("parse nodeId: {}", e))) + } + + async fn get_listening_address(&self) -> Result { + Ok(self.listen_addr.clone()) + } + + async fn connect_peer( + &self, peer_id: PublicKey, addr: SocketAddress, + ) -> Result<(), TestFailure> { + let uri = format!("{}@{}", peer_id, addr); + self.post("/connect", &[("uri", &uri)]).await?; + Ok(()) + } + + async fn disconnect_peer(&self, peer_id: PublicKey) -> Result<(), TestFailure> { + self.post("/disconnect", &[("nodeId", &peer_id.to_string())]).await?; + Ok(()) + } + + async fn open_channel( + &self, peer_id: PublicKey, _addr: SocketAddress, capacity_sat: u64, push_msat: Option, + ) -> Result { + let node_id = peer_id.to_string(); + let capacity = capacity_sat.to_string(); + let push_str = push_msat.map(|m| m.to_string()); + + let mut params = vec![("nodeId", node_id.as_str()), ("fundingSatoshis", capacity.as_str())]; + if let Some(ref push) = push_str { + params.push(("pushMsat", push.as_str())); + } + + let result = self.post("/open", ¶ms).await?; + let channel_id = result + .as_str() + .map(String::from) + .or_else(|| result["channelId"].as_str().map(String::from)) + .ok_or_else(|| { + self.make_error(format!("open did not return channel id: {}", result)) + })?; + Ok(channel_id) + } + + async fn close_channel(&self, channel_id: &str) -> Result<(), TestFailure> { + self.post("/close", &[("channelId", channel_id)]).await?; + Ok(()) + } + + async fn force_close_channel(&self, channel_id: &str) -> Result<(), TestFailure> { + self.post("/forceclose", &[("channelId", channel_id)]).await?; + Ok(()) + } + + async fn create_invoice( + &self, amount_msat: u64, description: &str, + ) -> Result { + let amount_str = amount_msat.to_string(); + let result = self + .post("/createinvoice", &[("amountMsat", &amount_str), ("description", description)]) + .await?; + let invoice = result["serialized"] + .as_str() + .ok_or_else(|| self.make_error("missing serialized in invoice response"))?; + Ok(invoice.to_string()) + } + + async fn pay_invoice(&self, invoice: &str) -> Result { + let result = self.post("/payinvoice", &[("invoice", invoice)]).await?; + let payment_id = result + .as_str() + .ok_or_else(|| self.make_error("payinvoice did not return payment id"))? + .to_string(); + self.poll_payment_settlement(&payment_id, "payment").await + } + + async fn send_keysend( + &self, peer_id: PublicKey, amount_msat: u64, + ) -> Result { + let amount_str = amount_msat.to_string(); + let node_id_str = peer_id.to_string(); + let result = self + .post("/sendtonode", &[("nodeId", &node_id_str), ("amountMsat", &amount_str)]) + .await?; + let payment_id = result + .as_str() + .ok_or_else(|| self.make_error("sendtonode did not return payment id"))? + .to_string(); + self.poll_payment_settlement(&payment_id, "keysend").await + } + + async fn get_funding_address(&self) -> Result { + let result = self.post("/getnewaddress", &[]).await?; + result + .as_str() + .map(String::from) + .ok_or_else(|| self.make_error("getnewaddress did not return string")) + } + + async fn get_block_height(&self) -> Result { + let info = self.post("/getinfo", &[]).await?; + info["blockHeight"] + .as_u64() + .ok_or_else(|| self.make_error("missing blockHeight in getinfo response")) + } + + async fn list_channels(&self) -> Result, TestFailure> { + let result = self.post("/channels", &[]).await?; + let channels_arr = + result.as_array().ok_or_else(|| self.make_error("/channels did not return array"))?; + + let mut channels = Vec::new(); + for ch in channels_arr { + let channel_id = ch["channelId"] + .as_str() + .ok_or_else(|| self.make_error("list_channels: missing channelId"))? + .to_string(); + let node_id_str = ch["nodeId"] + .as_str() + .ok_or_else(|| self.make_error("list_channels: missing nodeId"))?; + let peer_id = PublicKey::from_str(node_id_str).map_err(|e| { + self.make_error(format!("list_channels: invalid nodeId '{}': {}", node_id_str, e)) + })?; + let state_str = ch["state"].as_str().unwrap_or(""); + let commitments = &ch["data"]["commitments"]; + + // Closed/closing channels may lack active commitments -- skip them. + let active_commitment = match commitments["active"].as_array().and_then(|a| a.first()) { + Some(c) => c, + None => continue, + }; + + let capacity_sat = active_commitment["fundingAmount"] + .as_u64() + .ok_or_else(|| self.make_error("list_channels: missing fundingAmount"))?; + let funding_txid = active_commitment["fundingInput"] + .as_str() + .and_then(|s| s.split(':').next()) + .map(String::from); + let local_balance_msat = + active_commitment["localCommit"]["spec"]["toLocal"].as_u64().ok_or_else(|| { + self.make_error("list_channels: missing localCommit.spec.toLocal") + })?; + let remote_balance_msat = + active_commitment["localCommit"]["spec"]["toRemote"].as_u64().ok_or_else(|| { + self.make_error("list_channels: missing localCommit.spec.toRemote") + })?; + + let pending_htlcs_count = active_commitment["localCommit"]["spec"]["htlcs"] + .as_array() + .map(|a| a.len()) + .unwrap_or(0); + channels.push(ExternalChannel { + channel_id, + peer_id, + capacity_sat, + local_balance_msat, + remote_balance_msat, + funding_txid, + is_active: state_str == "NORMAL", + pending_htlcs_count, + }); + } + Ok(channels) + } + + async fn splice_in(&self, channel_id: &str, amount_sat: u64) -> Result<(), TestFailure> { + let amount_str = amount_sat.to_string(); + self.post("/splicein", &[("channelId", channel_id), ("amountIn", &amount_str)]).await?; + Ok(()) + } + + async fn splice_out( + &self, channel_id: &str, amount_sat: u64, address: Option<&str>, + ) -> Result<(), TestFailure> { + // Eclair's /spliceout requires an address; if caller passes None, generate one + // from Eclair's own wallet so the trait contract is symmetric with CLN. + let owned_addr; + let addr = match address { + Some(a) => a, + None => { + owned_addr = self.get_funding_address().await?; + owned_addr.as_str() + }, + }; + let amount_str = amount_sat.to_string(); + self.post( + "/spliceout", + &[("channelId", channel_id), ("amountOut", &amount_str), ("address", addr)], + ) + .await?; + Ok(()) + } +} diff --git a/tests/common/external_node.rs b/tests/common/external_node.rs new file mode 100644 index 000000000..d4c93a650 --- /dev/null +++ b/tests/common/external_node.rs @@ -0,0 +1,148 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use std::fmt; + +use async_trait::async_trait; +use ldk_node::bitcoin::secp256k1::PublicKey; +use ldk_node::lightning::ln::msgs::SocketAddress; + +/// Represents a channel opened to or from an external Lightning node. +#[derive(Debug, Clone)] +pub(crate) struct ExternalChannel { + /// Implementation-specific channel identifier; treat as opaque. + pub channel_id: String, + pub peer_id: PublicKey, + pub capacity_sat: u64, + pub local_balance_msat: u64, + pub remote_balance_msat: u64, + pub funding_txid: Option, + pub is_active: bool, + /// In-flight HTLCs on the peer's view of the channel. + pub pending_htlcs_count: usize, +} + +/// Errors that can occur during interop test operations. +#[derive(Debug)] +pub(crate) enum TestFailure { + ExternalNodeError { node: String, detail: String }, + NotSupported { node: String, operation: String }, +} + +impl fmt::Display for TestFailure { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + TestFailure::ExternalNodeError { node, detail } => { + write!(f, "External node '{}' error: {}", node, detail) + }, + TestFailure::NotSupported { node, operation } => { + write!(f, "'{}' does not support '{}'", node, operation) + }, + } + } +} + +impl std::error::Error for TestFailure {} + +/// Abstraction over an external Lightning node used in interop tests. +#[async_trait] +pub(crate) trait ExternalNode: Send + Sync { + /// Human-readable name for this node (e.g. "eclair", "lnd", "cln"). + fn name(&self) -> &str; + + /// Returns the node's public key. + async fn get_node_id(&self) -> Result; + + /// Returns an address on which this node is listening. + async fn get_listening_address(&self) -> Result; + + /// Connect to a peer by public key and address. + async fn connect_peer( + &self, peer_id: PublicKey, addr: SocketAddress, + ) -> Result<(), TestFailure>; + + /// Disconnect from a peer by public key. + async fn disconnect_peer(&self, peer_id: PublicKey) -> Result<(), TestFailure>; + + /// Open a channel to a peer. + /// + /// Returns a channel id string that the implementation may use + /// to correlate with subsequent close/query calls. + async fn open_channel( + &self, peer_id: PublicKey, addr: SocketAddress, capacity_sat: u64, push_msat: Option, + ) -> Result; + + /// Cooperatively close a channel by its implementation-defined channel id. + async fn close_channel(&self, channel_id: &str) -> Result<(), TestFailure>; + + /// Force-close a channel by its implementation-defined channel id. + async fn force_close_channel(&self, channel_id: &str) -> Result<(), TestFailure>; + + /// Create a BOLT11 invoice for the given amount. + async fn create_invoice( + &self, amount_msat: u64, description: &str, + ) -> Result; + + /// Pay a BOLT11 invoice; returns an implementation-specific payment identifier on success. + async fn pay_invoice(&self, invoice: &str) -> Result; + + /// Send a keysend payment to a peer. + async fn send_keysend( + &self, peer_id: PublicKey, amount_msat: u64, + ) -> Result; + + /// Get an on-chain address that can be used to fund this node. + async fn get_funding_address(&self) -> Result; + + /// Returns the current blockchain height as seen by this node. + async fn get_block_height(&self) -> Result; + + /// List all channels known to this node. + async fn list_channels(&self) -> Result, TestFailure>; + + /// Construct a `TestFailure::ExternalNodeError` for this node. + fn make_error(&self, detail: impl Into) -> TestFailure { + TestFailure::ExternalNodeError { node: self.name().to_string(), detail: detail.into() } + } + + /// Wait until this node has synced to at least `min_height`. Polls for up to 60 seconds. + async fn wait_for_block_sync(&self, min_height: u64) -> Result<(), TestFailure> { + for _ in 0..60 { + if let Ok(h) = self.get_block_height().await { + if h >= min_height { + return Ok(()); + } + } + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + } + Err(self.make_error(format!("did not reach height {} after 60s", min_height))) + } + + /// Splice additional funds into an existing channel. + /// + /// Not all implementations support splicing. The default returns `NotSupported`. + async fn splice_in(&self, _channel_id: &str, _amount_sat: u64) -> Result<(), TestFailure> { + Err(TestFailure::NotSupported { + node: self.name().to_string(), + operation: "splice_in".to_string(), + }) + } + + /// Splice funds out of an existing channel. + /// + /// If `address` is provided, funds are sent to that on-chain address; + /// otherwise the implementation decides the destination (e.g. own wallet). + /// Not all implementations support splicing. The default returns `NotSupported`. + async fn splice_out( + &self, _channel_id: &str, _amount_sat: u64, _address: Option<&str>, + ) -> Result<(), TestFailure> { + Err(TestFailure::NotSupported { + node: self.name().to_string(), + operation: "splice_out".to_string(), + }) + } +} diff --git a/tests/common/lnd.rs b/tests/common/lnd.rs new file mode 100644 index 000000000..4b9acb83a --- /dev/null +++ b/tests/common/lnd.rs @@ -0,0 +1,406 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use std::str::FromStr; + +use async_trait::async_trait; +use bitcoin::hashes::{sha256, Hash}; +use bitcoin::hex::DisplayHex; +use ldk_node::bitcoin::secp256k1::PublicKey; +use ldk_node::lightning::ln::msgs::SocketAddress; +use lnd_grpc_rust::lnrpc::payment::PaymentStatus; +use lnd_grpc_rust::lnrpc::{ + CloseChannelRequest as LndCloseChannelRequest, ConnectPeerRequest as LndConnectPeerRequest, + DisconnectPeerRequest as LndDisconnectPeerRequest, GetInfoRequest as LndGetInfoRequest, + Invoice as LndInvoice, LightningAddress as LndLightningAddress, + ListChannelsRequest as LndListChannelsRequest, OpenChannelRequest as LndOpenChannelRequest, +}; +use lnd_grpc_rust::routerrpc::SendPaymentRequest; +use lnd_grpc_rust::{connect, LndClient}; +use tokio::fs; +use tokio::sync::Mutex; + +use super::external_node::{ExternalChannel, ExternalNode, TestFailure}; + +pub(crate) struct TestLndNode { + client: Mutex, + listen_addr: SocketAddress, +} + +impl TestLndNode { + pub(crate) async fn new( + cert_path: String, macaroon_path: String, endpoint: String, listen_addr: SocketAddress, + ) -> Self { + let cert_bytes = fs::read(&cert_path).await.expect("Failed to read TLS cert file"); + let mac_bytes = fs::read(&macaroon_path).await.expect("Failed to read macaroon file"); + let cert = cert_bytes.as_hex().to_string(); + let macaroon = mac_bytes.as_hex().to_string(); + let client = connect(cert, macaroon, endpoint).await.expect("Failed to connect to LND"); + Self { client: Mutex::new(client), listen_addr } + } + + pub(crate) async fn from_env() -> Self { + let cert_path = std::env::var("LND_CERT_PATH").expect("LND_CERT_PATH not set"); + let macaroon_path = std::env::var("LND_MACAROON_PATH").expect("LND_MACAROON_PATH not set"); + let endpoint = + std::env::var("LND_ENDPOINT").unwrap_or_else(|_| "127.0.0.1:8081".to_string()); + let listen_addr: SocketAddress = std::env::var("LND_P2P_ADDR") + .unwrap_or_else(|_| "127.0.0.1:9735".to_string()) + .parse() + .unwrap(); + Self::new(cert_path, macaroon_path, endpoint, listen_addr).await + } +} + +#[async_trait] +impl ExternalNode for TestLndNode { + fn name(&self) -> &str { + "LND" + } + + async fn get_node_id(&self) -> Result { + let mut client = self.client.lock().await; + let response = client + .lightning() + .get_info(LndGetInfoRequest {}) + .await + .map_err(|e| self.make_error(format!("get_info: {}", e)))? + .into_inner(); + PublicKey::from_str(&response.identity_pubkey) + .map_err(|e| self.make_error(format!("parse pubkey: {}", e))) + } + + async fn get_listening_address(&self) -> Result { + Ok(self.listen_addr.clone()) + } + + async fn connect_peer( + &self, peer_id: PublicKey, addr: SocketAddress, + ) -> Result<(), TestFailure> { + let mut client = self.client.lock().await; + let request = LndConnectPeerRequest { + addr: Some(LndLightningAddress { pubkey: peer_id.to_string(), host: addr.to_string() }), + ..Default::default() + }; + client + .lightning() + .connect_peer(request) + .await + .map_err(|e| self.make_error(format!("connect_peer: {}", e)))?; + Ok(()) + } + + async fn disconnect_peer(&self, peer_id: PublicKey) -> Result<(), TestFailure> { + let mut client = self.client.lock().await; + let request = LndDisconnectPeerRequest { pub_key: peer_id.to_string() }; + client + .lightning() + .disconnect_peer(request) + .await + .map_err(|e| self.make_error(format!("disconnect_peer: {}", e)))?; + Ok(()) + } + + async fn open_channel( + &self, peer_id: PublicKey, _addr: SocketAddress, capacity_sat: u64, push_msat: Option, + ) -> Result { + let mut client = self.client.lock().await; + let local_funding_amount: i64 = capacity_sat + .try_into() + .map_err(|_| self.make_error(format!("capacity_sat overflow: {}", capacity_sat)))?; + // LND's OpenChannelRequest accepts push amount in sats only. + // Sub-satoshi precision is truncated (e.g. 1500 msat → 1 sat). + let push_sat: i64 = push_msat + .map(|m| (m / 1000).try_into()) + .transpose() + .map_err(|_| { + self.make_error(format!("push_msat overflow: {}", push_msat.unwrap_or(0))) + })? + .unwrap_or(0); + + let request = LndOpenChannelRequest { + node_pubkey: peer_id.serialize().to_vec(), + local_funding_amount, + push_sat, + ..Default::default() + }; + + let response = client + .lightning() + .open_channel_sync(request) + .await + .map_err(|e| self.make_error(format!("open_channel: {}", e)))? + .into_inner(); + + let txid_bytes = match response.funding_txid { + Some(lnd_grpc_rust::lnrpc::channel_point::FundingTxid::FundingTxidBytes(bytes)) => { + bytes + }, + Some(lnd_grpc_rust::lnrpc::channel_point::FundingTxid::FundingTxidStr(s)) => { + bitcoin::Txid::from_str(&s) + .map_err(|e| { + self.make_error(format!("open_channel: invalid txid string '{}': {}", s, e)) + })? + .as_byte_array() + .to_vec() + }, + None => return Err(self.make_error("No funding txid in response")), + }; + + // LND returns txid bytes in reversed order + let mut txid_arr: [u8; 32] = txid_bytes.try_into().map_err(|b: Vec| { + self.make_error(format!("open_channel: expected 32-byte txid, got {} bytes", b.len())) + })?; + txid_arr.reverse(); + let txid_hex = txid_arr.as_hex().to_string(); + Ok(format!("{}:{}", txid_hex, response.output_index)) + } + + async fn close_channel(&self, channel_id: &str) -> Result<(), TestFailure> { + let mut client = self.client.lock().await; + let (txid_bytes, output_index) = parse_channel_point(channel_id)?; + let request = LndCloseChannelRequest { + channel_point: Some(lnd_grpc_rust::lnrpc::ChannelPoint { + funding_txid: Some( + lnd_grpc_rust::lnrpc::channel_point::FundingTxid::FundingTxidBytes(txid_bytes), + ), + output_index, + }), + sat_per_vbyte: 1, + ..Default::default() + }; + // CloseChannel is a server-streaming RPC that blocks until the close tx + // is confirmed. We spawn the stream in the background so the caller can + // mine blocks and wait for the ChannelClosed event separately. + let stream = client + .lightning() + .close_channel(request) + .await + .map_err(|e| self.make_error(format!("close_channel: {}", e)))? + .into_inner(); + tokio::spawn(async move { + let mut s = stream; + while let Some(msg) = s.message().await.transpose() { + if let Err(e) = msg { + eprintln!("close_channel stream error: {}", e); + break; + } + } + }); + Ok(()) + } + + async fn force_close_channel(&self, channel_id: &str) -> Result<(), TestFailure> { + let mut client = self.client.lock().await; + let (txid_bytes, output_index) = parse_channel_point(channel_id)?; + let request = LndCloseChannelRequest { + channel_point: Some(lnd_grpc_rust::lnrpc::ChannelPoint { + funding_txid: Some( + lnd_grpc_rust::lnrpc::channel_point::FundingTxid::FundingTxidBytes(txid_bytes), + ), + output_index, + }), + force: true, + ..Default::default() + }; + let stream = client + .lightning() + .close_channel(request) + .await + .map_err(|e| self.make_error(format!("force_close_channel: {}", e)))? + .into_inner(); + tokio::spawn(async move { + let mut s = stream; + while let Some(msg) = s.message().await.transpose() { + if let Err(e) = msg { + eprintln!("force_close_channel stream error: {}", e); + break; + } + } + }); + Ok(()) + } + + async fn create_invoice( + &self, amount_msat: u64, description: &str, + ) -> Result { + let mut client = self.client.lock().await; + let value_msat: i64 = amount_msat + .try_into() + .map_err(|_| self.make_error(format!("amount_msat overflow: {}", amount_msat)))?; + let invoice = + LndInvoice { value_msat, memo: description.to_string(), ..Default::default() }; + let response = client + .lightning() + .add_invoice(invoice) + .await + .map_err(|e| self.make_error(format!("create_invoice: {}", e)))? + .into_inner(); + Ok(response.payment_request) + } + + async fn pay_invoice(&self, invoice: &str) -> Result { + let mut client = self.client.lock().await; + let request = SendPaymentRequest { + payment_request: invoice.to_string(), + timeout_seconds: 60, + no_inflight_updates: true, + ..Default::default() + }; + + let mut stream = client + .router() + .send_payment_v2(request) + .await + .map_err(|e| self.make_error(format!("pay_invoice: {}", e)))? + .into_inner(); + + while let Some(payment) = stream + .message() + .await + .map_err(|e| self.make_error(format!("pay_invoice stream: {}", e)))? + { + match PaymentStatus::try_from(payment.status) { + Ok(PaymentStatus::Succeeded) => { + return Ok(payment.payment_preimage); + }, + Ok(PaymentStatus::Failed) => { + return Err( + self.make_error(format!("payment failed: {:?}", payment.failure_reason)) + ); + }, + _ => continue, + } + } + + Err(self.make_error("payment stream ended without terminal status")) + } + + async fn send_keysend( + &self, peer_id: PublicKey, amount_msat: u64, + ) -> Result { + let mut client = self.client.lock().await; + + let mut preimage = [0u8; 32]; + rand::Rng::fill(&mut rand::rng(), &mut preimage); + let payment_hash = sha256::Hash::hash(&preimage).to_byte_array().to_vec(); + + // Keysend requires inserting the preimage as TLV record 5482373484. + let mut dest_custom_records = std::collections::HashMap::new(); + dest_custom_records.insert(5482373484, preimage.to_vec()); + let amt_msat: i64 = amount_msat + .try_into() + .map_err(|_| self.make_error(format!("amount_msat overflow: {}", amount_msat)))?; + + let request = SendPaymentRequest { + dest: peer_id.serialize().to_vec(), + amt_msat, + payment_hash, + dest_custom_records, + timeout_seconds: 60, + no_inflight_updates: true, + ..Default::default() + }; + + let mut stream = client + .router() + .send_payment_v2(request) + .await + .map_err(|e| self.make_error(format!("send_keysend: {}", e)))? + .into_inner(); + + while let Some(payment) = + stream.message().await.map_err(|e| self.make_error(format!("keysend stream: {}", e)))? + { + match PaymentStatus::try_from(payment.status) { + Ok(PaymentStatus::Succeeded) => { + return Ok(payment.payment_preimage); + }, + Ok(PaymentStatus::Failed) => { + return Err( + self.make_error(format!("keysend failed: {:?}", payment.failure_reason)) + ); + }, + _ => continue, + } + } + + Err(self.make_error("keysend stream ended without terminal status")) + } + + async fn get_funding_address(&self) -> Result { + let mut client = self.client.lock().await; + let response = client + .lightning() + .new_address(lnd_grpc_rust::lnrpc::NewAddressRequest { + r#type: 4, // TAPROOT_PUBKEY + ..Default::default() + }) + .await + .map_err(|e| self.make_error(format!("get_funding_address: {}", e)))? + .into_inner(); + Ok(response.address) + } + + async fn get_block_height(&self) -> Result { + let mut client = self.client.lock().await; + let response = client + .lightning() + .get_info(LndGetInfoRequest {}) + .await + .map_err(|e| self.make_error(format!("get_info: {}", e)))? + .into_inner(); + Ok(response.block_height as u64) + } + + async fn list_channels(&self) -> Result, TestFailure> { + let mut client = self.client.lock().await; + let response = client + .lightning() + .list_channels(LndListChannelsRequest { ..Default::default() }) + .await + .map_err(|e| self.make_error(format!("list_channels: {}", e)))? + .into_inner(); + + let channels = response + .channels + .into_iter() + .map(|ch| { + let peer_id = PublicKey::from_str(&ch.remote_pubkey).map_err(|e| { + self.make_error(format!( + "list_channels: invalid remote_pubkey '{}': {}", + ch.remote_pubkey, e + )) + })?; + // LND reports balances in satoshis; convert to msat (sub-sat precision lost). + Ok(ExternalChannel { + channel_id: ch.channel_point.clone(), + peer_id, + capacity_sat: ch.capacity as u64, + local_balance_msat: ch.local_balance as u64 * 1000, + remote_balance_msat: ch.remote_balance as u64 * 1000, + funding_txid: ch.channel_point.split(':').next().map(String::from), + is_active: ch.active, + pending_htlcs_count: ch.pending_htlcs.len(), + }) + }) + .collect::, _>>()?; + + Ok(channels) + } +} + +/// Parse a channel point string "txid:output_index" into (txid_bytes, output_index). +fn parse_channel_point(channel_point: &str) -> Result<(Vec, u32), TestFailure> { + let err = |msg: String| TestFailure::ExternalNodeError { node: "LND".to_string(), detail: msg }; + let (txid_str, idx_str) = channel_point + .split_once(':') + .ok_or_else(|| err(format!("invalid format: {}", channel_point)))?; + let txid = bitcoin::Txid::from_str(txid_str).map_err(|e| err(format!("bad txid: {}", e)))?; + let output_index: u32 = idx_str.parse().map_err(|e| err(format!("bad output index: {}", e)))?; + Ok((txid.as_byte_array().to_vec(), output_index)) +} diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 4f68f9825..a80b692ed 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -5,15 +5,24 @@ // http://opensource.org/licenses/MIT>, at your option. You may not use this file except in // accordance with one or both of these licenses. -#![cfg(any(test, cln_test, lnd_test, vss_test))] +#![cfg(any(test, cln_test, lnd_test, eclair_test, vss_test))] #![allow(dead_code)] +pub(crate) mod external_node; pub(crate) mod logging; +#[cfg(cln_test)] +pub(crate) mod cln; +#[cfg(eclair_test)] +pub(crate) mod eclair; +#[cfg(lnd_test)] +pub(crate) mod lnd; + use std::collections::{HashMap, HashSet}; use std::env; use std::future::Future; use std::path::PathBuf; +use std::str::FromStr; use std::sync::atomic::{AtomicU16, Ordering}; use std::sync::{Arc, RwLock}; use std::time::Duration; @@ -27,13 +36,16 @@ use bitcoin::{ use electrsd::corepc_node::{Client as BitcoindClient, Node as BitcoinD}; use electrsd::{corepc_node, ElectrsD}; use electrum_client::ElectrumApi; -use ldk_node::config::{AsyncPaymentsRole, Config, ElectrumSyncConfig, EsploraSyncConfig}; +use ldk_node::config::{ + AsyncPaymentsRole, Config, ElectrumSyncConfig, EsploraSyncConfig, HRNResolverConfig, + HumanReadableNamesConfig, +}; use ldk_node::entropy::{generate_entropy_mnemonic, NodeEntropy}; use ldk_node::io::sqlite_store::SqliteStore; use ldk_node::payment::{PaymentDirection, PaymentKind, PaymentStatus}; use ldk_node::{ - Builder, CustomTlvRecord, Event, LightningBalance, Node, NodeError, PendingSweepBalance, - UserChannelId, + Builder, ChannelShutdownState, CustomTlvRecord, Event, LightningBalance, Node, NodeError, + PendingSweepBalance, UserChannelId, }; use lightning::io; use lightning::ln::msgs::SocketAddress; @@ -48,9 +60,24 @@ use rand::distr::Alphanumeric; use rand::{rng, Rng}; use serde_json::{json, Value}; +/// Shared timeout (in seconds) for waiting on LDK events and external node operations. +pub(crate) const INTEROP_TIMEOUT_SECS: u64 = 60; + macro_rules! expect_event { ($node:expr, $event_type:ident) => {{ - match $node.next_event_async().await { + let event = tokio::time::timeout( + std::time::Duration::from_secs(crate::common::INTEROP_TIMEOUT_SECS), + $node.next_event_async(), + ) + .await + .unwrap_or_else(|_| { + panic!( + "{} timed out waiting for {} event after 60s", + $node.node_id(), + std::stringify!($event_type) + ) + }); + match event { ref e @ Event::$event_type { .. } => { println!("{} got event {:?}", $node.node_id(), e); $node.event_handled().unwrap(); @@ -66,7 +93,15 @@ pub(crate) use expect_event; macro_rules! expect_channel_pending_event { ($node:expr, $counterparty_node_id:expr) => {{ - match $node.next_event_async().await { + let event = tokio::time::timeout( + std::time::Duration::from_secs(crate::common::INTEROP_TIMEOUT_SECS), + $node.next_event_async(), + ) + .await + .unwrap_or_else(|_| { + panic!("{} timed out waiting for ChannelPending event after 60s", $node.node_id()) + }); + match event { ref e @ Event::ChannelPending { funding_txo, counterparty_node_id, .. } => { println!("{} got event {:?}", $node.node_id(), e); assert_eq!(counterparty_node_id, $counterparty_node_id); @@ -84,7 +119,15 @@ pub(crate) use expect_channel_pending_event; macro_rules! expect_channel_ready_event { ($node:expr, $counterparty_node_id:expr) => {{ - match $node.next_event_async().await { + let event = tokio::time::timeout( + std::time::Duration::from_secs(crate::common::INTEROP_TIMEOUT_SECS), + $node.next_event_async(), + ) + .await + .unwrap_or_else(|_| { + panic!("{} timed out waiting for ChannelReady event after 60s", $node.node_id()) + }); + match event { ref e @ Event::ChannelReady { user_channel_id, counterparty_node_id, .. } => { println!("{} got event {:?}", $node.node_id(), e); assert_eq!(counterparty_node_id, Some($counterparty_node_id)); @@ -104,7 +147,15 @@ macro_rules! expect_channel_ready_events { ($node:expr, $counterparty_node_id_a:expr, $counterparty_node_id_b:expr) => {{ let mut ids = Vec::new(); for _ in 0..2 { - match $node.next_event_async().await { + let event = tokio::time::timeout( + std::time::Duration::from_secs(crate::common::INTEROP_TIMEOUT_SECS), + $node.next_event_async(), + ) + .await + .unwrap_or_else(|_| { + panic!("{} timed out waiting for ChannelReady event after 60s", $node.node_id()) + }); + match event { ref e @ Event::ChannelReady { counterparty_node_id, .. } => { println!("{} got event {:?}", $node.node_id(), e); ids.push(counterparty_node_id); @@ -130,7 +181,15 @@ pub(crate) use expect_channel_ready_events; macro_rules! expect_splice_pending_event { ($node:expr, $counterparty_node_id:expr) => {{ - match $node.next_event_async().await { + let event = tokio::time::timeout( + std::time::Duration::from_secs(crate::common::INTEROP_TIMEOUT_SECS), + $node.next_event_async(), + ) + .await + .unwrap_or_else(|_| { + panic!("{} timed out waiting for SplicePending event after 60s", $node.node_id()) + }); + match event { ref e @ Event::SplicePending { new_funding_txo, counterparty_node_id, .. } => { println!("{} got event {:?}", $node.node_id(), e); assert_eq!(counterparty_node_id, $counterparty_node_id); @@ -148,19 +207,27 @@ pub(crate) use expect_splice_pending_event; macro_rules! expect_payment_received_event { ($node:expr, $amount_msat:expr) => {{ - match $node.next_event_async().await { + let event = tokio::time::timeout( + std::time::Duration::from_secs(crate::common::INTEROP_TIMEOUT_SECS), + $node.next_event_async(), + ) + .await + .unwrap_or_else(|_| { + panic!("{} timed out waiting for PaymentReceived event after 60s", $node.node_id()) + }); + match event { ref e @ Event::PaymentReceived { payment_id, amount_msat, .. } => { println!("{} got event {:?}", $node.node_id(), e); assert_eq!(amount_msat, $amount_msat); let payment = $node.payment(&payment_id.unwrap()).unwrap(); - if !matches!(payment.kind, PaymentKind::Onchain { .. }) { + if !matches!(payment.kind, ldk_node::payment::PaymentKind::Onchain { .. }) { assert_eq!(payment.fee_paid_msat, None); } $node.event_handled().unwrap(); payment_id }, ref e => { - panic!("{} got unexpected event!: {:?}", std::stringify!(node_b), e); + panic!("{} got unexpected event!: {:?}", std::stringify!($node), e); }, } }}; @@ -170,7 +237,18 @@ pub(crate) use expect_payment_received_event; macro_rules! expect_payment_claimable_event { ($node:expr, $payment_id:expr, $payment_hash:expr, $claimable_amount_msat:expr) => {{ - match $node.next_event_async().await { + let event = tokio::time::timeout( + std::time::Duration::from_secs(crate::common::INTEROP_TIMEOUT_SECS), + $node.next_event_async(), + ) + .await + .unwrap_or_else(|_| { + panic!( + "{} timed out waiting for PaymentClaimable event after 60s", + std::stringify!($node) + ) + }); + match event { ref e @ Event::PaymentClaimable { payment_id, payment_hash, @@ -195,7 +273,15 @@ pub(crate) use expect_payment_claimable_event; macro_rules! expect_payment_successful_event { ($node:expr, $payment_id:expr, $fee_paid_msat:expr) => {{ - match $node.next_event_async().await { + let event = tokio::time::timeout( + std::time::Duration::from_secs(crate::common::INTEROP_TIMEOUT_SECS), + $node.next_event_async(), + ) + .await + .unwrap_or_else(|_| { + panic!("{} timed out waiting for PaymentSuccessful event after 60s", $node.node_id()) + }); + match event { ref e @ Event::PaymentSuccessful { payment_id, fee_paid_msat, .. } => { println!("{} got event {:?}", $node.node_id(), e); if let Some(fee_msat) = $fee_paid_msat { @@ -371,15 +457,16 @@ impl Default for TestConfig { macro_rules! setup_builder { ($builder:ident, $config:expr) => { - #[cfg(feature = "uniffi")] - let $builder = Builder::from_config($config.clone()); - #[cfg(not(feature = "uniffi"))] + #[allow(unused_mut)] let mut $builder = Builder::from_config($config.clone()); }; } pub(crate) use setup_builder; +#[cfg(any(cln_test, lnd_test, eclair_test))] +pub(crate) mod scenarios; + pub(crate) fn setup_two_nodes( chain_source: &TestChainSource, allow_0conf: bool, anchor_channels: bool, anchors_trusted_no_reserve: bool, @@ -400,11 +487,27 @@ pub(crate) fn setup_two_nodes_with_store( println!("== Node A =="); let mut config_a = random_config(anchor_channels); config_a.store_type = store_type; + + if cfg!(hrn_tests) { + config_a.node_config.hrn_config = + HumanReadableNamesConfig { resolution_config: HRNResolverConfig::Blip32 }; + } + let node_a = setup_node(chain_source, config_a); println!("\n== Node B =="); let mut config_b = random_config(anchor_channels); config_b.store_type = store_type; + + if cfg!(hrn_tests) { + config_b.node_config.hrn_config = HumanReadableNamesConfig { + resolution_config: HRNResolverConfig::Dns { + dns_server_address: SocketAddress::from_str("8.8.8.8:53").unwrap(), + enable_hrn_resolution_service: true, + }, + }; + } + if allow_0conf { config_b.node_config.trusted_peers_0conf.push(node_a.node_id()); } @@ -422,7 +525,17 @@ pub(crate) fn setup_two_nodes_with_store( } pub(crate) fn setup_node(chain_source: &TestChainSource, config: TestConfig) -> TestNode { + setup_node_with_builder(chain_source, config, |_| {}) +} + +pub(crate) fn setup_node_with_builder( + chain_source: &TestChainSource, config: TestConfig, configure_builder: F, +) -> TestNode +where + F: FnOnce(&mut Builder), +{ setup_builder!(builder, config.node_config); + match chain_source { TestChainSource::Esplora(electrsd) => { let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); @@ -481,6 +594,8 @@ pub(crate) fn setup_node(chain_source: &TestChainSource, config: TestConfig) -> builder.set_wallet_recovery_mode(); } + configure_builder(&mut builder); + let node = match config.store_type { TestStoreType::TestSyncStore => { let kv_store = TestSyncStore::new(config.node_config.storage_dir_path.into()); @@ -489,10 +604,6 @@ pub(crate) fn setup_node(chain_source: &TestChainSource, config: TestConfig) -> TestStoreType::Sqlite => builder.build(config.node_entropy.into()).unwrap(), }; - if config.recovery_mode { - builder.set_wallet_recovery_mode(); - } - node.start().unwrap(); assert!(node.status().is_running); assert!(node.status().latest_fee_rate_cache_update_timestamp.is_some()); @@ -790,7 +901,7 @@ pub async fn splice_in_with_all( pub(crate) async fn do_channel_full_cycle( node_a: TestNode, node_b: TestNode, bitcoind: &BitcoindClient, electrsd: &E, allow_0conf: bool, - expect_anchor_channel: bool, force_close: bool, + disable_node_b_reserve: bool, expect_anchor_channel: bool, force_close: bool, ) { let addr_a = node_a.onchain_payment().new_address().unwrap(); let addr_b = node_b.onchain_payment().new_address().unwrap(); @@ -846,15 +957,27 @@ pub(crate) async fn do_channel_full_cycle( println!("\nA -- open_channel -> B"); let funding_amount_sat = 2_080_000; let push_msat = (funding_amount_sat / 2) * 1000; // balance the channel - node_a - .open_announced_channel( - node_b.node_id(), - node_b.listening_addresses().unwrap().first().unwrap().clone(), - funding_amount_sat, - Some(push_msat), - None, - ) - .unwrap(); + if disable_node_b_reserve { + node_a + .open_0reserve_channel( + node_b.node_id(), + node_b.listening_addresses().unwrap().first().unwrap().clone(), + funding_amount_sat, + Some(push_msat), + None, + ) + .unwrap(); + } else { + node_a + .open_announced_channel( + node_b.node_id(), + node_b.listening_addresses().unwrap().first().unwrap().clone(), + funding_amount_sat, + Some(push_msat), + None, + ) + .unwrap(); + } assert_eq!(node_a.list_peers().first().unwrap().node_id, node_b.node_id()); assert!(node_a.list_peers().first().unwrap().is_persisted); @@ -913,9 +1036,31 @@ pub(crate) async fn do_channel_full_cycle( node_b_anchor_reserve_sat ); + // Note that only node B has 0-reserve, we don't yet have an API to allow the opener of the + // channel to have 0-reserve. + if disable_node_b_reserve { + assert_eq!(node_b.list_channels()[0].unspendable_punishment_reserve, Some(0)); + assert_eq!(node_b.list_channels()[0].outbound_capacity_msat, push_msat); + assert_eq!(node_b.list_channels()[0].next_outbound_htlc_limit_msat, push_msat); + + assert_eq!(node_b.list_balances().total_lightning_balance_sats * 1000, push_msat); + let LightningBalance::ClaimableOnChannelClose { amount_satoshis, .. } = + node_b.list_balances().lightning_balances[0] + else { + panic!("Unexpected `LightningBalance` variant"); + }; + assert_eq!(amount_satoshis * 1000, push_msat); + } + let user_channel_id_a = expect_channel_ready_event!(node_a, node_b.node_id()); let user_channel_id_b = expect_channel_ready_event!(node_b, node_a.node_id()); + // After channel_ready, no shutdown should be in progress. + assert!(node_a.list_channels().iter().all(|c| matches!( + c.channel_shutdown_state, + None | Some(ChannelShutdownState::NotShuttingDown) + ))); + println!("\nB receive"); let invoice_amount_1_msat = 2500_000; let invoice_description: Bolt11InvoiceDescription = @@ -1261,12 +1406,59 @@ pub(crate) async fn do_channel_full_cycle( 2 ); + if disable_node_b_reserve { + let node_a_outbound_capacity_msat = node_a.list_channels()[0].outbound_capacity_msat; + let node_a_reserve_msat = + node_a.list_channels()[0].unspendable_punishment_reserve.unwrap() * 1000; + // TODO: Zero-fee commitment channels are anchor channels, but do not allocate any + // funds to the anchor, so this will need to be updated when we ship these channels + // in ldk-node. + let node_a_anchors_msat = if expect_anchor_channel { 2 * 330 * 1000 } else { 0 }; + let funding_amount_msat = node_a.list_channels()[0].channel_value_sats * 1000; + // Node B does not have any reserve, so we only subtract a few items on node A's + // side to arrive at node B's capacity + let node_b_capacity_msat = funding_amount_msat + - node_a_outbound_capacity_msat + - node_a_reserve_msat + - node_a_anchors_msat; + let got_capacity_msat = node_b.list_channels()[0].outbound_capacity_msat; + assert_eq!(got_capacity_msat, node_b_capacity_msat); + assert_ne!(got_capacity_msat, 0); + // Sanity check to make sure this is a non-trivial amount + assert!(got_capacity_msat > 15_000_000); + + // This is a private channel, so node B can send 100% of the value over + assert_eq!(node_b.list_channels()[0].next_outbound_htlc_limit_msat, node_b_capacity_msat); + + node_b.spontaneous_payment().send(node_b_capacity_msat, node_a.node_id(), None).unwrap(); + expect_event!(node_b, PaymentSuccessful); + expect_event!(node_a, PaymentReceived); + + node_a.spontaneous_payment().send(node_b_capacity_msat, node_b.node_id(), None).unwrap(); + expect_event!(node_a, PaymentSuccessful); + expect_event!(node_b, PaymentReceived); + } + println!("\nB close_channel (force: {})", force_close); tokio::time::sleep(Duration::from_secs(1)).await; if force_close { node_a.force_close_channel(&user_channel_id_a, node_b.node_id(), None).unwrap(); } else { node_a.close_channel(&user_channel_id_a, node_b.node_id()).unwrap(); + // The cooperative shutdown may complete before we get to check, but if the channel + // is still visible it must already be in a shutdown state. + if let Some(channel) = + node_a.list_channels().into_iter().find(|c| c.user_channel_id == user_channel_id_a) + { + assert!( + !matches!( + channel.channel_shutdown_state, + None | Some(ChannelShutdownState::NotShuttingDown) + ), + "Expected shutdown in progress on node_a, got {:?}", + channel.channel_shutdown_state, + ); + } } expect_event!(node_a, ChannelClosed); @@ -1357,6 +1549,49 @@ pub(crate) async fn do_channel_full_cycle( generate_blocks_and_wait(&bitcoind, electrsd, 5).await; node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); + } else { + assert_eq!(node_a.list_balances().lightning_balances.len(), 1); + assert!(node_a.list_balances().pending_balances_from_channel_closures.is_empty()); + let node_a_blocks_to_go = match node_a.list_balances().lightning_balances[0] { + LightningBalance::ClaimableAwaitingConfirmations { + counterparty_node_id, + confirmation_height, + .. + } => { + assert_eq!(counterparty_node_id, node_b.node_id()); + let cur_height = node_a.status().current_best_block.height; + let blocks_to_go = confirmation_height - cur_height; + blocks_to_go + }, + _ => panic!("Unexpected balance state!"), + }; + + assert_eq!(node_b.list_balances().lightning_balances.len(), 1); + assert!(node_b.list_balances().pending_balances_from_channel_closures.is_empty()); + let node_b_blocks_to_go = match node_b.list_balances().lightning_balances[0] { + LightningBalance::ClaimableAwaitingConfirmations { + counterparty_node_id, + confirmation_height, + .. + } => { + assert_eq!(counterparty_node_id, node_a.node_id()); + let cur_height = node_b.status().current_best_block.height; + let blocks_to_go = confirmation_height - cur_height; + blocks_to_go + }, + _ => panic!("Unexpected balance state!"), + }; + + assert_eq!(node_a_blocks_to_go, node_b_blocks_to_go); + + generate_blocks_and_wait(&bitcoind, electrsd, node_a_blocks_to_go as usize).await; + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + assert!(node_a.list_balances().lightning_balances.is_empty()); + assert!(node_a.list_balances().pending_balances_from_channel_closures.is_empty()); + assert!(node_b.list_balances().lightning_balances.is_empty()); + assert!(node_b.list_balances().pending_balances_from_channel_closures.is_empty()); } let sum_of_all_payments_sat = (push_msat diff --git a/tests/common/scenarios/channel.rs b/tests/common/scenarios/channel.rs new file mode 100644 index 000000000..da968b469 --- /dev/null +++ b/tests/common/scenarios/channel.rs @@ -0,0 +1,103 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use std::time::Duration; + +use electrsd::corepc_node::Client as BitcoindClient; +use electrum_client::ElectrumApi; +use ldk_node::{Event, Node}; + +use super::super::external_node::ExternalNode; +use super::super::generate_blocks_and_wait; +use super::Side; + +/// Open a channel from LDK to peer; returns (user_channel_id, external_channel_id). +pub(crate) async fn open_channel_to_external( + node: &Node, peer: &(impl ExternalNode + ?Sized), bitcoind: &BitcoindClient, electrs: &E, + funding_amount_sat: u64, push_msat: Option, +) -> (ldk_node::UserChannelId, String) { + let ext_node_id = peer.get_node_id().await.unwrap(); + let ext_addr = peer.get_listening_address().await.unwrap(); + + node.open_channel(ext_node_id, ext_addr, funding_amount_sat, push_msat, None).unwrap(); + + let funding_txo = expect_channel_pending_event!(node, ext_node_id); + super::super::wait_for_tx(electrs, funding_txo.txid).await; + generate_blocks_and_wait(bitcoind, electrs, 10).await; + super::sync_wallets_with_retry(node).await; + let user_channel_id = expect_channel_ready_event!(node, ext_node_id); + + let ext_channels = peer.list_channels().await.unwrap(); + let funding_txid_str = funding_txo.txid.to_string(); + let ext_channel_id = ext_channels + .iter() + .find(|ch| ch.funding_txid.as_deref() == Some(&funding_txid_str)) + // Fallback to active channel by peer_id; avoids picking up closing channels from prior scenarios. + .or_else(|| ext_channels.iter().find(|ch| ch.peer_id == node.node_id() && ch.is_active)) + .map(|ch| ch.channel_id.clone()) + .unwrap_or_else(|| panic!("Could not find channel on external node {}", peer.name())); + + (user_channel_id, ext_channel_id) +} + +/// Cooperative close from the chosen side. Mines 1 block and asserts ChannelClosed. +pub(crate) async fn cooperative_close( + node: &Node, peer: &(impl ExternalNode + ?Sized), bitcoind: &BitcoindClient, electrs: &E, + user_channel_id: &ldk_node::UserChannelId, ext_channel_id: &str, initiator: Side, +) { + tokio::time::sleep(Duration::from_secs(2)).await; + match initiator { + Side::Ldk => { + let ext_node_id = peer.get_node_id().await.unwrap(); + node.close_channel(user_channel_id, ext_node_id).unwrap(); + }, + Side::External => { + peer.close_channel(ext_channel_id).await.unwrap(); + }, + } + generate_blocks_and_wait(bitcoind, electrs, 1).await; + super::sync_wallets_with_retry(node).await; + expect_event!(node, ChannelClosed); +} + +/// Force close from the chosen side. Mines 6 blocks and asserts ChannelClosed. +/// +/// External-initiated path additionally polls the mempool because the peer's +/// commitment-broadcast can lag the force-close RPC return. +pub(crate) async fn force_close( + node: &Node, peer: &(impl ExternalNode + ?Sized), bitcoind: &BitcoindClient, electrs: &E, + user_channel_id: &ldk_node::UserChannelId, ext_channel_id: &str, initiator: Side, +) { + match initiator { + Side::Ldk => { + let ext_node_id = peer.get_node_id().await.unwrap(); + node.force_close_channel(user_channel_id, ext_node_id, None).unwrap(); + expect_event!(node, ChannelClosed); + generate_blocks_and_wait(bitcoind, electrs, 6).await; + super::sync_wallets_with_retry(node).await; + }, + Side::External => { + peer.force_close_channel(ext_channel_id).await.unwrap(); + // External peer's force-close RPC may return before commitment tx is broadcast. + let before = + bitcoind.call::>("getrawmempool", &[]).unwrap_or_default().len(); + for _ in 0..30 { + tokio::time::sleep(Duration::from_secs(1)).await; + let now = + bitcoind.call::>("getrawmempool", &[]).unwrap_or_default().len(); + if now > before { + break; + } + } + generate_blocks_and_wait(bitcoind, electrs, 6).await; + super::sync_wallets_with_retry(node).await; + tokio::time::sleep(Duration::from_secs(2)).await; + super::sync_wallets_with_retry(node).await; + expect_event!(node, ChannelClosed); + }, + } +} diff --git a/tests/common/scenarios/connectivity.rs b/tests/common/scenarios/connectivity.rs new file mode 100644 index 000000000..e24419c76 --- /dev/null +++ b/tests/common/scenarios/connectivity.rs @@ -0,0 +1,86 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use std::str::FromStr; +use std::time::Duration; + +use ldk_node::bitcoin::secp256k1::PublicKey; +use ldk_node::lightning::ln::msgs::SocketAddress; +use ldk_node::{Event, Node}; +use lightning_invoice::Bolt11Invoice; + +use super::super::external_node::ExternalNode; +use super::Side; + +/// Disconnect a peer from the chosen side. Returns the dispatch result so callers +/// can tolerate races (mid-payment) by ignoring the error. +pub(crate) async fn disconnect_by_side( + node: &Node, peer: &(impl ExternalNode + ?Sized), side: &Side, +) -> Result<(), String> { + let ext_node_id = peer.get_node_id().await.unwrap(); + match side { + Side::Ldk => node.disconnect(ext_node_id).map_err(|e| format!("{:?}", e)), + Side::External => { + peer.disconnect_peer(node.node_id()).await.map_err(|e| format!("{:?}", e)) + }, + } +} + +/// Reconnect to a peer and wait until the connection is established. +pub(crate) async fn reconnect_and_wait( + node: &Node, peer_id: PublicKey, addr: SocketAddress, context: &str, +) { + node.connect(peer_id, addr, true).unwrap(); + let max_attempts = super::super::INTEROP_TIMEOUT_SECS; + for i in 0..max_attempts { + if node.list_peers().iter().any(|p| p.node_id == peer_id && p.is_connected) { + tokio::time::sleep(Duration::from_secs(2)).await; + return; + } + if i + 1 == max_attempts { + panic!("Peer did not reconnect within {}s ({})", max_attempts, context); + } + tokio::time::sleep(Duration::from_secs(1)).await; + } +} + +/// Disconnect during payment, reconnect, verify payment resolves. +pub(crate) async fn disconnect_during_payment( + node: &Node, peer: &(impl ExternalNode + ?Sized), disconnect_side: &Side, +) { + let ext_node_id = peer.get_node_id().await.unwrap(); + let ext_addr = peer.get_listening_address().await.unwrap(); + + let invoice_str = peer.create_invoice(10_000_000, "disconnect-payment-test").await.unwrap(); + let parsed_invoice = Bolt11Invoice::from_str(&invoice_str).unwrap(); + + // If send() fails immediately, no event will arrive, so skip event wait below. + let send_ok = node.bolt11_payment().send(&parsed_invoice, None).is_ok(); + + // Disconnect may race with payment delivery; tolerate failure. + let _ = disconnect_by_side(node, peer, disconnect_side).await; + + tokio::time::sleep(Duration::from_secs(2)).await; + reconnect_and_wait(node, ext_node_id, ext_addr, "disconnect during payment").await; + + if send_ok { + let event = tokio::time::timeout( + Duration::from_secs(super::super::INTEROP_TIMEOUT_SECS), + node.next_event_async(), + ) + .await + .expect("Timed out waiting for payment to resolve after reconnect"); + match event { + Event::PaymentSuccessful { .. } | Event::PaymentFailed { .. } => { + node.event_handled().unwrap(); + }, + other => { + panic!("Expected payment outcome event, got: {:?}", other); + }, + } + } +} diff --git a/tests/common/scenarios/mod.rs b/tests/common/scenarios/mod.rs new file mode 100644 index 000000000..1999b7d0c --- /dev/null +++ b/tests/common/scenarios/mod.rs @@ -0,0 +1,251 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +//! Shared interop test scenarios, generic over `ExternalNode`. +//! +//! - `channel` / `payment` / `connectivity` -- composable building blocks +//! - `interop_tests!` macro -- emits one `#[tokio::test]` per scenario + +pub(crate) mod channel; +pub(crate) mod connectivity; +pub(crate) mod payment; + +use std::future::Future; +use std::time::Duration; + +use bitcoin::Amount; +use electrsd::corepc_node::Client as BitcoindClient; +use electrum_client::ElectrumApi; +use ldk_node::{Event, Node}; + +use super::external_node::ExternalNode; +use super::{generate_blocks_and_wait, premine_and_distribute_funds}; + +#[derive(Debug, Clone, Copy)] +pub(crate) enum Side { + Ldk, + External, +} + +/// Retry an async operation with 1s delay; used for ops that may fail due to gossip delay. +pub(crate) async fn retry_until_ok(max_attempts: u32, operation: &str, mut f: F) -> T +where + F: FnMut() -> Fut, + Fut: Future>, + E: std::fmt::Display, +{ + for attempt in 1..=max_attempts { + match f().await { + Ok(val) => return val, + Err(e) => { + if attempt == max_attempts { + panic!("{} failed after {} attempts: {}", operation, max_attempts, e); + } + tokio::time::sleep(Duration::from_secs(1)).await; + }, + } + } + unreachable!() +} + +/// Sync wallets, retrying on `WalletOperationTimeout`. +pub(crate) async fn sync_wallets_with_retry(node: &Node) { + for attempt in 0..3 { + match node.sync_wallets() { + Ok(()) => return, + Err(ldk_node::NodeError::WalletOperationTimeout) if attempt < 2 => { + tokio::time::sleep(Duration::from_secs(5)).await; + }, + Err(e) => panic!("sync_wallets failed: {:?}", e), + } + } +} + +/// Wait until the peer reports 0 pending HTLCs on the channel; required before close because +/// `PaymentSuccessful` fires one round-trip before the HTLC is removed from peer commitment. +pub(crate) async fn wait_for_htlcs_settled( + peer: &(impl ExternalNode + ?Sized), ext_channel_id: &str, +) { + for _ in 0..30 { + let channels = tokio::time::timeout(Duration::from_secs(5), peer.list_channels()) + .await + .ok() + .and_then(|r| r.ok()); + if let Some(channels) = channels { + if let Some(ch) = channels.iter().find(|c| c.channel_id == ext_channel_id) { + if ch.pending_htlcs_count == 0 { + return; + } + } + } + tokio::time::sleep(Duration::from_millis(500)).await; + } + panic!("HTLCs did not settle on {} channel {} within 15s", peer.name(), ext_channel_id); +} + +/// Build a fresh LDK node configured for interop tests. Uses electrum at the +/// docker-compose default port and bumps sync timeouts for combo stress. +pub(crate) fn setup_ldk_node() -> Node { + let config = crate::common::random_config(true); + let mut builder = ldk_node::Builder::from_config(config.node_config); + let mut sync_config = ldk_node::config::ElectrumSyncConfig::default(); + sync_config.timeouts_config.onchain_wallet_sync_timeout_secs = 180; + sync_config.timeouts_config.lightning_wallet_sync_timeout_secs = 120; + builder.set_chain_source_electrum("tcp://127.0.0.1:50001".to_string(), Some(sync_config)); + let node = builder.build(config.node_entropy).unwrap(); + node.start().unwrap(); + node +} + +/// Fund both LDK node and external node, connect them. +pub(crate) async fn setup_interop_test( + node: &Node, peer: &(impl ExternalNode + ?Sized), bitcoind: &BitcoindClient, electrs: &E, +) { + let ldk_address = node.onchain_payment().new_address().unwrap(); + let premine_amount = Amount::from_sat(50_000_000); + premine_and_distribute_funds(bitcoind, electrs, vec![ldk_address], premine_amount).await; + + // Fund the peer via the ldk_node_test wallet loaded by premine_and_distribute_funds. + let ext_funding_addr_str = peer.get_funding_address().await.unwrap(); + let ext_amount = Amount::from_sat(50_000_000); + let amounts_json = serde_json::json!({&ext_funding_addr_str: ext_amount.to_btc()}); + let empty_account = serde_json::json!(""); + bitcoind + .call::( + "sendmany", + &[empty_account, amounts_json, serde_json::json!(0), serde_json::json!("")], + ) + .expect("failed to fund external node"); + generate_blocks_and_wait(bitcoind, electrs, 1).await; + + // Block until the peer indexes the funding tx, else channel opens time out. + let chain_height: u64 = bitcoind.get_blockchain_info().unwrap().blocks.try_into().unwrap(); + peer.wait_for_block_sync(chain_height).await.unwrap(); + + sync_wallets_with_retry(node).await; + + let ext_node_id = peer.get_node_id().await.unwrap(); + let ext_addr = peer.get_listening_address().await.unwrap(); + node.connect(ext_node_id, ext_addr, true).unwrap(); +} + +/// Drive a scenario end-to-end: fund LDK + peer, run the scenario, stop the node. +/// Each `#[tokio::test]` in the integration-test files calls this with the +/// per-impl `setup_clients` future and a scenario fn. +pub(crate) async fn run_interop_scenario( + setup_fut: impl Future, scenario: F, +) where + N: ExternalNode, + E: ElectrumApi, + F: AsyncFnOnce(&Node, &N, &BitcoindClient, &E), +{ + let (bitcoind, electrs, ext) = setup_fut.await; + let node = setup_ldk_node(); + setup_interop_test(&node, &ext, &bitcoind, &electrs).await; + scenario(&node, &ext, &bitcoind, &electrs).await; + node.stop().unwrap(); +} + +/// Open a channel, send a BOLT11 payment in each direction, then cooperatively close. +pub(crate) async fn basic_channel_cycle_scenario( + node: &Node, peer: &(impl ExternalNode + ?Sized), bitcoind: &BitcoindClient, electrs: &E, +) { + let (user_ch, ext_ch) = channel::open_channel_to_external( + node, + peer, + bitcoind, + electrs, + 1_000_000, + Some(500_000_000), + ) + .await; + + payment::send_bolt11_to_peer(node, peer, 10_000_000, "basic-send").await; + payment::receive_bolt11_payment(node, peer, 10_000_000).await; + + channel::cooperative_close(node, peer, bitcoind, electrs, &user_ch, &ext_ch, Side::Ldk).await; +} + +/// Open a channel, send keysend in both directions, then cooperatively close. +pub(crate) async fn keysend_scenario( + node: &Node, peer: &(impl ExternalNode + ?Sized), bitcoind: &BitcoindClient, electrs: &E, +) { + let (user_ch, ext_ch) = channel::open_channel_to_external( + node, + peer, + bitcoind, + electrs, + 1_000_000, + Some(500_000_000), + ) + .await; + payment::send_keysend_to_peer(node, peer, 5_000_000).await; + payment::receive_keysend_payment(node, peer, 5_000_000).await; + channel::cooperative_close(node, peer, bitcoind, electrs, &user_ch, &ext_ch, Side::Ldk).await; +} + +/// Open a channel, send a payment, then force-close from the LDK side. +pub(crate) async fn force_close_after_payment_scenario( + node: &Node, peer: &(impl ExternalNode + ?Sized), bitcoind: &BitcoindClient, electrs: &E, +) { + let (user_ch, ext_ch) = channel::open_channel_to_external( + node, + peer, + bitcoind, + electrs, + 1_000_000, + Some(500_000_000), + ) + .await; + payment::send_bolt11_to_peer(node, peer, 5_000_000, "force-close").await; + wait_for_htlcs_settled(peer, &ext_ch).await; + channel::force_close(node, peer, bitcoind, electrs, &user_ch, &ext_ch, Side::Ldk).await; +} + +/// Open a channel, dispatch a payment with a mid-flight disconnect+reconnect, +/// then cooperatively close. +pub(crate) async fn disconnect_during_payment_scenario( + node: &Node, peer: &(impl ExternalNode + ?Sized), bitcoind: &BitcoindClient, electrs: &E, +) { + let (user_ch, ext_ch) = channel::open_channel_to_external( + node, + peer, + bitcoind, + electrs, + 1_000_000, + Some(500_000_000), + ) + .await; + connectivity::disconnect_during_payment(node, peer, &Side::Ldk).await; + wait_for_htlcs_settled(peer, &ext_ch).await; + channel::cooperative_close(node, peer, bitcoind, electrs, &user_ch, &ext_ch, Side::Ldk).await; +} + +/// Open a channel, splice-in additional funds, send a post-splice payment, then close. +pub(crate) async fn splice_in_scenario( + node: &Node, peer: &(impl ExternalNode + ?Sized), bitcoind: &BitcoindClient, electrs: &E, +) { + let (user_ch, ext_ch) = channel::open_channel_to_external( + node, + peer, + bitcoind, + electrs, + 1_000_000, + Some(500_000_000), + ) + .await; + let ext_node_id = peer.get_node_id().await.unwrap(); + node.splice_in(&user_ch, ext_node_id, 500_000).unwrap(); + expect_splice_pending_event!(node, ext_node_id); + generate_blocks_and_wait(bitcoind, electrs, 6).await; + sync_wallets_with_retry(node).await; + expect_channel_ready_event!(node, ext_node_id); + + payment::send_bolt11_to_peer(node, peer, 5_000_000, "post-splice").await; + + channel::cooperative_close(node, peer, bitcoind, electrs, &user_ch, &ext_ch, Side::Ldk).await; +} diff --git a/tests/common/scenarios/payment.rs b/tests/common/scenarios/payment.rs new file mode 100644 index 000000000..191f60abc --- /dev/null +++ b/tests/common/scenarios/payment.rs @@ -0,0 +1,62 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use std::str::FromStr; + +use ldk_node::{Event, Node}; +use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, Description}; + +use super::super::external_node::ExternalNode; +use super::retry_until_ok; + +/// LDK pays the peer via a fresh BOLT11 invoice; asserts `PaymentSuccessful`. +pub(crate) async fn send_bolt11_to_peer( + node: &Node, peer: &(impl ExternalNode + ?Sized), amount_msat: u64, label: &str, +) { + let invoice_str = peer.create_invoice(amount_msat, label).await.unwrap(); + let parsed = Bolt11Invoice::from_str(&invoice_str).unwrap(); + node.bolt11_payment().send(&parsed, None).unwrap(); + expect_event!(node, PaymentSuccessful); +} + +/// External node pays LDK via BOLT11 invoice. Retries to absorb gossip-propagation +/// delay (peer may not yet know a route to LDK right after channel confirmation). +pub(crate) async fn receive_bolt11_payment( + node: &Node, peer: &(impl ExternalNode + ?Sized), amount_msat: u64, +) { + let invoice = node + .bolt11_payment() + .receive( + amount_msat, + &Bolt11InvoiceDescription::Direct( + Description::new("interop-receive-test".to_string()).unwrap(), + ), + 3600, + ) + .unwrap(); + let invoice_str = invoice.to_string(); + retry_until_ok(10, "receive_bolt11_payment", || peer.pay_invoice(&invoice_str)).await; + expect_payment_received_event!(node, amount_msat); +} + +/// LDK keysends to peer; asserts `PaymentSuccessful`. +pub(crate) async fn send_keysend_to_peer( + node: &Node, peer: &(impl ExternalNode + ?Sized), amount_msat: u64, +) { + let peer_id = peer.get_node_id().await.unwrap(); + node.spontaneous_payment().send(amount_msat, peer_id, None).unwrap(); + expect_event!(node, PaymentSuccessful); +} + +/// External node sends keysend to LDK. Retries to absorb gossip-propagation delay. +pub(crate) async fn receive_keysend_payment( + node: &Node, peer: &(impl ExternalNode + ?Sized), amount_msat: u64, +) { + let node_id = node.node_id(); + retry_until_ok(10, "receive_keysend_payment", || peer.send_keysend(node_id, amount_msat)).await; + expect_payment_received_event!(node, amount_msat); +} diff --git a/tests/docker/Dockerfile.eclair b/tests/docker/Dockerfile.eclair new file mode 100644 index 000000000..522a2ef80 --- /dev/null +++ b/tests/docker/Dockerfile.eclair @@ -0,0 +1,22 @@ +# Repackage acinq/eclair:latest onto a glibc-based runtime. +# The official image uses Alpine (musl libc), which causes SIGSEGV in +# secp256k1-jni because the native library is compiled against glibc. +FROM acinq/eclair:latest AS source + +FROM eclipse-temurin:21-jre-jammy +WORKDIR /app + +RUN apt-get update && apt-get install -y --no-install-recommends \ + bash jq curl unzip && \ + rm -rf /var/lib/apt/lists/* + +COPY --from=source /app/eclair-node /app/eclair-node + +ENV ECLAIR_DATADIR=/data +ENV JAVA_OPTS= + +RUN mkdir -p "$ECLAIR_DATADIR" +VOLUME [ "/data" ] + +# Exec form so the JVM receives SIGTERM directly from `docker stop`. +ENTRYPOINT ["bash", "-c", "exec /app/eclair-node/bin/eclair-node.sh \"-Declair.datadir=${ECLAIR_DATADIR}\""] diff --git a/tests/docker/docker-compose-cln.yml b/tests/docker/docker-compose-cln.yml index ef0efa8d8..7b697eff3 100644 --- a/tests/docker/docker-compose-cln.yml +++ b/tests/docker/docker-compose-cln.yml @@ -11,11 +11,16 @@ services: "-rpcbind=0.0.0.0", "-rpcuser=user", "-rpcpassword=pass", - "-fallbackfee=0.00001" + "-fallbackfee=0.00001", + "-rest", + "-zmqpubrawblock=tcp://0.0.0.0:28332", + "-zmqpubrawtx=tcp://0.0.0.0:28333" ] ports: - "18443:18443" # Regtest RPC port - "18444:18444" # Regtest P2P port + - "28332:28332" # ZMQ block port + - "28333:28333" # ZMQ tx port networks: - bitcoin-electrs healthcheck: @@ -53,6 +58,8 @@ services: depends_on: bitcoin: condition: service_healthy + volumes: + - ${CLN_DATA_DIR:-/tmp/cln-data}:/root/.lightning command: [ "--bitcoin-rpcconnect=bitcoin", @@ -60,6 +67,7 @@ services: "--bitcoin-rpcuser=user", "--bitcoin-rpcpassword=pass", "--regtest", + "--experimental-splicing", ] ports: - "19846:19846" diff --git a/tests/docker/docker-compose-eclair.yml b/tests/docker/docker-compose-eclair.yml new file mode 100644 index 000000000..56a5629f1 --- /dev/null +++ b/tests/docker/docker-compose-eclair.yml @@ -0,0 +1,80 @@ +services: + # All services use host networking because Eclair subscribes to bitcoind + # ZMQ notifications (hashblock/rawtx). ZMQ PUB/SUB over Docker bridge + # networking is unreliable -- the subscriber may silently miss messages, + # causing Eclair to fall behind the chain tip. Host networking avoids + # this by keeping all inter-process communication on localhost. + bitcoin: + image: blockstream/bitcoind:30.2 + platform: linux/amd64 + network_mode: host + command: + [ + "bitcoind", + "-printtoconsole", + "-regtest=1", + "-rpcallowip=0.0.0.0/0", + "-rpcbind=0.0.0.0", + "-rpcuser=user", + "-rpcpassword=pass", + "-fallbackfee=0.00001", + "-rest", + "-txindex=1", + "-zmqpubhashblock=tcp://0.0.0.0:28332", + "-zmqpubrawtx=tcp://0.0.0.0:28333" + ] + healthcheck: + test: ["CMD", "bitcoin-cli", "-regtest", "-rpcuser=user", "-rpcpassword=pass", "getblockchaininfo"] + interval: 5s + timeout: 10s + retries: 5 + + electrs: + image: mempool/electrs:v3.2.0 + platform: linux/amd64 + network_mode: host + depends_on: + bitcoin: + condition: service_healthy + command: + [ + "-vvvv", + "--timestamp", + "--jsonrpc-import", + "--cookie=user:pass", + "--network=regtest", + "--daemon-rpc-addr=127.0.0.1:18443", + "--http-addr=0.0.0.0:3002", + "--electrum-rpc-addr=0.0.0.0:50001" + ] + + eclair: + build: + context: . + dockerfile: Dockerfile.eclair + image: ldk-node-eclair:local + platform: linux/amd64 + network_mode: host + depends_on: + bitcoin: + condition: service_healthy + environment: + JAVA_OPTS: >- + -Xmx512m + -Declair.allow-unsafe-startup=true + -Declair.chain=regtest + -Declair.server.port=9736 + -Declair.api.enabled=true + -Declair.api.binding-ip=0.0.0.0 + -Declair.api.port=8080 + -Declair.api.password=eclairpassword + -Declair.bitcoind.host=127.0.0.1 + -Declair.bitcoind.rpcport=18443 + -Declair.bitcoind.rpcuser=user + -Declair.bitcoind.rpcpassword=pass + -Declair.bitcoind.wallet=eclair + -Declair.bitcoind.zmqblock=tcp://127.0.0.1:28332 + -Declair.bitcoind.zmqtx=tcp://127.0.0.1:28333 + -Declair.features.keysend=optional + -Declair.on-chain-fees.confirmation-priority.funding=slow + -Declair.printToConsole diff --git a/tests/docker/docker-compose-lnd.yml b/tests/docker/docker-compose-lnd.yml old mode 100755 new mode 100644 index 304c4eb78..957030e78 --- a/tests/docker/docker-compose-lnd.yml +++ b/tests/docker/docker-compose-lnd.yml @@ -12,6 +12,7 @@ services: "-rpcuser=user", "-rpcpassword=pass", "-fallbackfee=0.00001", + "-rest", "-zmqpubrawblock=tcp://0.0.0.0:28332", "-zmqpubrawtx=tcp://0.0.0.0:28333" ] @@ -55,9 +56,10 @@ services: image: lightninglabs/lnd:v0.20.1-beta container_name: ldk-node-lnd depends_on: - - bitcoin + bitcoin: + condition: service_healthy volumes: - - ${LND_DATA_DIR}:/root/.lnd + - ${LND_DATA_DIR:-/tmp/lnd-data}:/root/.lnd ports: - "8081:8081" - "9735:9735" diff --git a/docker-compose.yml b/tests/docker/docker-compose.yml similarity index 100% rename from docker-compose.yml rename to tests/docker/docker-compose.yml diff --git a/tests/integration_tests_cln.rs b/tests/integration_tests_cln.rs index 6eea7b067..1c90920ff 100644 --- a/tests/integration_tests_cln.rs +++ b/tests/integration_tests_cln.rs @@ -9,130 +9,48 @@ mod common; -use std::default::Default; -use std::str::FromStr; - -use clightningrpc::lightningrpc::LightningRPC; -use clightningrpc::responses::NetworkAddress; +use common::cln::TestClnNode; +use common::scenarios::{ + basic_channel_cycle_scenario, disconnect_during_payment_scenario, + force_close_after_payment_scenario, keysend_scenario, run_interop_scenario, splice_in_scenario, +}; use electrsd::corepc_client::client_sync::Auth; use electrsd::corepc_node::Client as BitcoindClient; use electrum_client::Client as ElectrumClient; -use ldk_node::bitcoin::secp256k1::PublicKey; -use ldk_node::bitcoin::Amount; -use ldk_node::lightning::ln::msgs::SocketAddress; -use ldk_node::{Builder, Event}; -use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, Description}; -use rand::distr::Alphanumeric; -use rand::{rng, Rng}; -#[tokio::test(flavor = "multi_thread", worker_threads = 1)] -async fn test_cln() { - // Setup bitcoind / electrs clients - let bitcoind_client = BitcoindClient::new_with_auth( +async fn setup_clients() -> (BitcoindClient, ElectrumClient, TestClnNode) { + let bitcoind = BitcoindClient::new_with_auth( "http://127.0.0.1:18443", Auth::UserPass("user".to_string(), "pass".to_string()), ) .unwrap(); - let electrs_client = ElectrumClient::new("tcp://127.0.0.1:50001").unwrap(); - - // Give electrs a kick. - common::generate_blocks_and_wait(&bitcoind_client, &electrs_client, 1).await; - - // Setup LDK Node - let config = common::random_config(true); - let mut builder = Builder::from_config(config.node_config); - builder.set_chain_source_esplora("http://127.0.0.1:3002".to_string(), None); - - let node = builder.build(config.node_entropy).unwrap(); - node.start().unwrap(); - - // Premine some funds and distribute - let address = node.onchain_payment().new_address().unwrap(); - let premine_amount = Amount::from_sat(5_000_000); - common::premine_and_distribute_funds( - &bitcoind_client, - &electrs_client, - vec![address], - premine_amount, - ) - .await; - - // Setup CLN - let sock = "/tmp/lightning-rpc"; - let cln_client = LightningRPC::new(&sock); - let cln_info = { - loop { - let info = cln_client.getinfo().unwrap(); - // Wait for CLN to sync block height before channel open. - // Prevents crash due to unset blockheight (see LDK Node issue #527). - if info.blockheight > 0 { - break info; - } - tokio::time::sleep(std::time::Duration::from_millis(250)).await; - } - }; - let cln_node_id = PublicKey::from_str(&cln_info.id).unwrap(); - let cln_address: SocketAddress = match cln_info.binding.first().unwrap() { - NetworkAddress::Ipv4 { address, port } => { - std::net::SocketAddrV4::new(*address, *port).into() - }, - NetworkAddress::Ipv6 { address, port } => { - std::net::SocketAddrV6::new(*address, *port, 0, 0).into() - }, - _ => { - panic!() - }, - }; - - node.sync_wallets().unwrap(); - - // Open the channel - let funding_amount_sat = 1_000_000; - - node.open_channel(cln_node_id, cln_address, funding_amount_sat, Some(500_000_000), None) - .unwrap(); + let electrs = ElectrumClient::new("tcp://127.0.0.1:50001").unwrap(); + let cln = TestClnNode::from_env(); + (bitcoind, electrs, cln) +} - let funding_txo = common::expect_channel_pending_event!(node, cln_node_id); - common::wait_for_tx(&electrs_client, funding_txo.txid).await; - common::generate_blocks_and_wait(&bitcoind_client, &electrs_client, 6).await; - node.sync_wallets().unwrap(); - let user_channel_id = common::expect_channel_ready_event!(node, cln_node_id); +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_basic_channel_cycle() { + run_interop_scenario(setup_clients(), basic_channel_cycle_scenario).await; +} - // Send a payment to CLN - let mut rng = rng(); - let rand_label: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); - let cln_invoice = - cln_client.invoice(Some(10_000_000), &rand_label, &rand_label, None, None, None).unwrap(); - let parsed_invoice = Bolt11Invoice::from_str(&cln_invoice.bolt11).unwrap(); +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +#[ignore = "CLN <=v25.12.x keysend final_cltv=22 < LDK min 42; fixed in master (ElementsProject/lightning#9034), awaiting v26.04 Docker image"] +async fn test_keysend() { + run_interop_scenario(setup_clients(), keysend_scenario).await; +} - node.bolt11_payment().send(&parsed_invoice, None).unwrap(); - common::expect_event!(node, PaymentSuccessful); - let cln_listed_invoices = - cln_client.listinvoices(Some(&rand_label), None, None, None).unwrap().invoices; - assert_eq!(cln_listed_invoices.len(), 1); - assert_eq!(cln_listed_invoices.first().unwrap().status, "paid"); +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_force_close_after_payment() { + run_interop_scenario(setup_clients(), force_close_after_payment_scenario).await; +} - // Send a payment to LDK - let rand_label: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); - let invoice_description = - Bolt11InvoiceDescription::Direct(Description::new(rand_label).unwrap()); - let ldk_invoice = - node.bolt11_payment().receive(10_000_000, &invoice_description, 3600).unwrap(); - cln_client.pay(&ldk_invoice.to_string(), Default::default()).unwrap(); - common::expect_event!(node, PaymentReceived); +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_disconnect_during_payment() { + run_interop_scenario(setup_clients(), disconnect_during_payment_scenario).await; +} - // Retry close until monitor updates settle (avoids flaky sleep). - for i in 0..10 { - match node.close_channel(&user_channel_id, cln_node_id) { - Ok(()) => break, - Err(e) => { - if i == 9 { - panic!("close_channel failed after 10 attempts: {:?}", e); - } - std::thread::sleep(std::time::Duration::from_secs(1)); - }, - } - } - common::expect_event!(node, ChannelClosed); - node.stop().unwrap(); +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_splice_in() { + run_interop_scenario(setup_clients(), splice_in_scenario).await; } diff --git a/tests/integration_tests_eclair.rs b/tests/integration_tests_eclair.rs new file mode 100644 index 000000000..42d617eec --- /dev/null +++ b/tests/integration_tests_eclair.rs @@ -0,0 +1,74 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +#![cfg(eclair_test)] + +mod common; + +use base64::prelude::{Engine as _, BASE64_STANDARD}; +use common::eclair::TestEclairNode; +use common::scenarios::{ + basic_channel_cycle_scenario, disconnect_during_payment_scenario, + force_close_after_payment_scenario, keysend_scenario, run_interop_scenario, splice_in_scenario, +}; +use electrsd::corepc_client::client_sync::Auth; +use electrsd::corepc_node::Client as BitcoindClient; +use electrum_client::Client as ElectrumClient; + +/// Unlock all UTXOs in the given bitcoind wallet via JSON-RPC. +async fn unlock_utxos(wallet_url: &str, user: &str, pass: &str) { + let auth = BASE64_STANDARD.encode(format!("{}:{}", user, pass)); + let body = r#"{"jsonrpc":"1.0","method":"lockunspent","params":[true]}"#; + let _ = bitreq::post(wallet_url) + .with_header("Authorization", format!("Basic {}", auth)) + .with_header("Content-Type", "text/plain") + .with_body(body) + .with_timeout(5) + .send_async() + .await; +} + +async fn setup_clients() -> (BitcoindClient, ElectrumClient, TestEclairNode) { + let bitcoind = BitcoindClient::new_with_auth( + "http://127.0.0.1:18443/wallet/ldk_node_test", + Auth::UserPass("user".to_string(), "pass".to_string()), + ) + .unwrap(); + let electrs = ElectrumClient::new("tcp://127.0.0.1:50001").unwrap(); + + // Unlock any UTXOs left locked by previous force-close tests. + unlock_utxos("http://127.0.0.1:18443/wallet/eclair", "user", "pass").await; + + let eclair = TestEclairNode::from_env(); + (bitcoind, electrs, eclair) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_basic_channel_cycle() { + run_interop_scenario(setup_clients(), basic_channel_cycle_scenario).await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_keysend() { + run_interop_scenario(setup_clients(), keysend_scenario).await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_force_close_after_payment() { + run_interop_scenario(setup_clients(), force_close_after_payment_scenario).await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_disconnect_during_payment() { + run_interop_scenario(setup_clients(), disconnect_during_payment_scenario).await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +#[ignore = "Eclair advertises splicing via custom bit 154 instead of BOLT bit 62/63; disjoint from LDK until Eclair migrates"] +async fn test_splice_in() { + run_interop_scenario(setup_clients(), splice_in_scenario).await; +} diff --git a/tests/integration_tests_hrn.rs b/tests/integration_tests_hrn.rs new file mode 100644 index 000000000..910240039 --- /dev/null +++ b/tests/integration_tests_hrn.rs @@ -0,0 +1,83 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +#![cfg(hrn_tests)] + +mod common; + +use bitcoin::Amount; +use common::{ + expect_channel_ready_event, expect_payment_successful_event, generate_blocks_and_wait, + open_channel, premine_and_distribute_funds, random_chain_source, setup_bitcoind_and_electrsd, + setup_two_nodes, TestChainSource, +}; +use ldk_node::payment::UnifiedPaymentResult; +use ldk_node::Event; +use lightning::ln::channelmanager::PaymentId; + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn unified_send_to_hrn() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = random_chain_source(&bitcoind, &electrsd); + + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); + + let address_a = node_a.onchain_payment().new_address().unwrap(); + let premined_sats = 5_000_000; + + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![address_a], + Amount::from_sat(premined_sats), + ) + .await; + + node_a.sync_wallets().unwrap(); + open_channel(&node_a, &node_b, 4_000_000, true, &electrsd).await; + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6).await; + + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_channel_ready_event!(node_b, node_a.node_id()); + + // Wait until node_b broadcasts a node announcement + while node_b.status().latest_node_announcement_broadcast_timestamp.is_none() { + tokio::time::sleep(std::time::Duration::from_millis(10)).await; + } + + // Sleep to make sure the node announcement propagates + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + + let test_offer = node_b.bolt12_payment().receive(1000000, "test offer", None, None).unwrap(); + + let hrn_str = "matt@mattcorallo.com"; + + let unified_handler = node_a.unified_payment(); + unified_handler.set_test_offer(test_offer); + + let offer_payment_id: PaymentId = + match unified_handler.send(&hrn_str, Some(1000000), None).await { + Ok(UnifiedPaymentResult::Bolt12 { payment_id }) => { + println!("\nBolt12 payment sent successfully with PaymentID: {:?}", payment_id); + payment_id + }, + Ok(UnifiedPaymentResult::Bolt11 { payment_id: _ }) => { + panic!("Expected Bolt12 payment but got Bolt11"); + }, + Ok(UnifiedPaymentResult::Onchain { txid: _ }) => { + panic!("Expected Bolt12 payment but got On-chain transaction"); + }, + Err(e) => { + panic!("Expected Bolt12 payment but got error: {:?}", e); + }, + }; + + expect_payment_successful_event!(node_a, Some(offer_payment_id), None); +} diff --git a/tests/integration_tests_lnd.rs b/tests/integration_tests_lnd.rs index 8f1d4c868..b3059ac48 100755 --- a/tests/integration_tests_lnd.rs +++ b/tests/integration_tests_lnd.rs @@ -1,224 +1,56 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + #![cfg(lnd_test)] mod common; -use std::default::Default; -use std::str::FromStr; - -use bitcoin::hex::DisplayHex; +use common::lnd::TestLndNode; +use common::scenarios::{ + basic_channel_cycle_scenario, disconnect_during_payment_scenario, + force_close_after_payment_scenario, keysend_scenario, run_interop_scenario, splice_in_scenario, +}; use electrsd::corepc_client::client_sync::Auth; use electrsd::corepc_node::Client as BitcoindClient; use electrum_client::Client as ElectrumClient; -use ldk_node::bitcoin::secp256k1::PublicKey; -use ldk_node::bitcoin::Amount; -use ldk_node::lightning::ln::msgs::SocketAddress; -use ldk_node::{Builder, Event}; -use lightning_invoice::{Bolt11InvoiceDescription, Description}; -use lnd_grpc_rust::lnrpc::invoice::InvoiceState::Settled as LndInvoiceStateSettled; -use lnd_grpc_rust::lnrpc::{ - GetInfoRequest as LndGetInfoRequest, GetInfoResponse as LndGetInfoResponse, - Invoice as LndInvoice, ListInvoiceRequest as LndListInvoiceRequest, - QueryRoutesRequest as LndQueryRoutesRequest, Route as LndRoute, SendRequest as LndSendRequest, -}; -use lnd_grpc_rust::{connect, LndClient}; -use tokio::fs; -#[tokio::test(flavor = "multi_thread", worker_threads = 1)] -async fn test_lnd() { - // Setup bitcoind / electrs clients - let bitcoind_client = BitcoindClient::new_with_auth( +async fn setup_clients() -> (BitcoindClient, ElectrumClient, TestLndNode) { + let bitcoind = BitcoindClient::new_with_auth( "http://127.0.0.1:18443", Auth::UserPass("user".to_string(), "pass".to_string()), ) .unwrap(); - let electrs_client = ElectrumClient::new("tcp://127.0.0.1:50001").unwrap(); - - // Give electrs a kick. - common::generate_blocks_and_wait(&bitcoind_client, &electrs_client, 1).await; - - // Setup LDK Node - let config = common::random_config(true); - let mut builder = Builder::from_config(config.node_config); - builder.set_chain_source_esplora("http://127.0.0.1:3002".to_string(), None); - - let node = builder.build(config.node_entropy).unwrap(); - node.start().unwrap(); - - // Premine some funds and distribute - let address = node.onchain_payment().new_address().unwrap(); - let premine_amount = Amount::from_sat(5_000_000); - common::premine_and_distribute_funds( - &bitcoind_client, - &electrs_client, - vec![address], - premine_amount, - ) - .await; - - // Setup LND - let endpoint = "127.0.0.1:8081"; - let cert_path = std::env::var("LND_CERT_PATH").expect("LND_CERT_PATH not set"); - let macaroon_path = std::env::var("LND_MACAROON_PATH").expect("LND_MACAROON_PATH not set"); - let mut lnd = TestLndClient::new(cert_path, macaroon_path, endpoint.to_string()).await; - - let lnd_node_info = lnd.get_node_info().await; - let lnd_node_id = PublicKey::from_str(&lnd_node_info.identity_pubkey).unwrap(); - let lnd_address: SocketAddress = "127.0.0.1:9735".parse().unwrap(); - - node.sync_wallets().unwrap(); - - // Open the channel - let funding_amount_sat = 1_000_000; - - node.open_channel(lnd_node_id, lnd_address, funding_amount_sat, Some(500_000_000), None) - .unwrap(); - - let funding_txo = common::expect_channel_pending_event!(node, lnd_node_id); - common::wait_for_tx(&electrs_client, funding_txo.txid).await; - common::generate_blocks_and_wait(&bitcoind_client, &electrs_client, 6).await; - node.sync_wallets().unwrap(); - let user_channel_id = common::expect_channel_ready_event!(node, lnd_node_id); - - // Send a payment to LND - let lnd_invoice = lnd.create_invoice(100_000_000).await; - let parsed_invoice = lightning_invoice::Bolt11Invoice::from_str(&lnd_invoice).unwrap(); - - node.bolt11_payment().send(&parsed_invoice, None).unwrap(); - common::expect_event!(node, PaymentSuccessful); - let lnd_listed_invoices = lnd.list_invoices().await; - assert_eq!(lnd_listed_invoices.len(), 1); - assert_eq!(lnd_listed_invoices.first().unwrap().state, LndInvoiceStateSettled as i32); - - // Check route LND -> LDK - let amount_msat = 9_000_000; - let max_retries = 7; - for attempt in 1..=max_retries { - match lnd.query_routes(&node.node_id().to_string(), amount_msat).await { - Ok(routes) => { - if !routes.is_empty() { - break; - } - }, - Err(err) => { - if attempt == max_retries { - panic!("Failed to find route from LND to LDK: {}", err); - } - }, - }; - // wait for the payment process - tokio::time::sleep(std::time::Duration::from_millis(200)).await; - } - - // Send a payment to LDK - let invoice_description = - Bolt11InvoiceDescription::Direct(Description::new("lndTest".to_string()).unwrap()); - let ldk_invoice = - node.bolt11_payment().receive(amount_msat, &invoice_description, 3600).unwrap(); - lnd.pay_invoice(&ldk_invoice.to_string()).await; - common::expect_event!(node, PaymentReceived); - - node.close_channel(&user_channel_id, lnd_node_id).unwrap(); - common::expect_event!(node, ChannelClosed); - node.stop().unwrap(); + let electrs = ElectrumClient::new("tcp://127.0.0.1:50001").unwrap(); + let lnd = TestLndNode::from_env().await; + (bitcoind, electrs, lnd) } -struct TestLndClient { - client: LndClient, +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_basic_channel_cycle() { + run_interop_scenario(setup_clients(), basic_channel_cycle_scenario).await; } -impl TestLndClient { - async fn new(cert_path: String, macaroon_path: String, socket: String) -> Self { - // Read the contents of the file into a vector of bytes - let cert_bytes = fs::read(cert_path).await.expect("Failed to read tls cert file"); - let mac_bytes = fs::read(macaroon_path).await.expect("Failed to read macaroon file"); - - // Convert the bytes to a hex string - let cert = cert_bytes.as_hex().to_string(); - let macaroon = mac_bytes.as_hex().to_string(); - - let client = connect(cert, macaroon, socket).await.expect("Failed to connect to Lnd"); - - TestLndClient { client } - } - - async fn get_node_info(&mut self) -> LndGetInfoResponse { - let response = self - .client - .lightning() - .get_info(LndGetInfoRequest {}) - .await - .expect("Failed to fetch node info from LND") - .into_inner(); - - response - } - - async fn create_invoice(&mut self, amount_msat: u64) -> String { - let invoice = LndInvoice { value_msat: amount_msat as i64, ..Default::default() }; - - self.client - .lightning() - .add_invoice(invoice) - .await - .expect("Failed to create invoice on LND") - .into_inner() - .payment_request - } - - async fn list_invoices(&mut self) -> Vec { - self.client - .lightning() - .list_invoices(LndListInvoiceRequest { ..Default::default() }) - .await - .expect("Failed to list invoices from LND") - .into_inner() - .invoices - } - - async fn query_routes( - &mut self, pubkey: &str, amount_msat: u64, - ) -> Result, String> { - let request = LndQueryRoutesRequest { - pub_key: pubkey.to_string(), - amt_msat: amount_msat as i64, - ..Default::default() - }; - - let response = self - .client - .lightning() - .query_routes(request) - .await - .map_err(|err| format!("Failed to query routes from LND: {:?}", err))? - .into_inner(); - - if response.routes.is_empty() { - return Err(format!("No routes found for pubkey: {}", pubkey)); - } +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_keysend() { + run_interop_scenario(setup_clients(), keysend_scenario).await; +} - Ok(response.routes) - } +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_force_close_after_payment() { + run_interop_scenario(setup_clients(), force_close_after_payment_scenario).await; +} - async fn pay_invoice(&mut self, invoice_str: &str) { - let send_req = - LndSendRequest { payment_request: invoice_str.to_string(), ..Default::default() }; - let response = self - .client - .lightning() - .send_payment_sync(send_req) - .await - .expect("Failed to pay invoice on LND") - .into_inner(); +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn test_disconnect_during_payment() { + run_interop_scenario(setup_clients(), disconnect_during_payment_scenario).await; +} - if !response.payment_error.is_empty() || response.payment_preimage.is_empty() { - panic!( - "LND payment failed: {}", - if response.payment_error.is_empty() { - "No preimage returned" - } else { - &response.payment_error - } - ); - } - } +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +#[ignore = "LND does not implement BOLT splicing"] +async fn test_splice_in() { + run_interop_scenario(setup_clients(), splice_in_scenario).await; } diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 413b2d44a..1b1fc4825 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -30,6 +30,7 @@ use electrsd::corepc_node::Node as BitcoinD; use electrsd::ElectrsD; use ldk_node::config::{AsyncPaymentsRole, EsploraSyncConfig}; use ldk_node::entropy::NodeEntropy; +use ldk_node::io::sqlite_store::SqliteStore; use ldk_node::liquidity::LSPS2ServiceConfig; use ldk_node::payment::{ ConfirmationStatus, PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus, @@ -39,6 +40,7 @@ use ldk_node::{Builder, Event, NodeError}; use lightning::ln::channelmanager::PaymentId; use lightning::routing::gossip::{NodeAlias, NodeId}; use lightning::routing::router::RouteParametersConfig; +use lightning::util::persist::KVStoreSync; use lightning_invoice::{Bolt11InvoiceDescription, Description}; use lightning_types::payment::{PaymentHash, PaymentPreimage}; use log::LevelFilter; @@ -48,8 +50,17 @@ async fn channel_full_cycle() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = random_chain_source(&bitcoind, &electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); - do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false) - .await; + do_channel_full_cycle( + node_a, + node_b, + &bitcoind.client, + &electrsd.client, + false, + false, + true, + false, + ) + .await; } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] @@ -57,8 +68,17 @@ async fn channel_full_cycle_force_close() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = random_chain_source(&bitcoind, &electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); - do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, true) - .await; + do_channel_full_cycle( + node_a, + node_b, + &bitcoind.client, + &electrsd.client, + false, + false, + true, + true, + ) + .await; } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] @@ -66,8 +86,17 @@ async fn channel_full_cycle_force_close_trusted_no_reserve() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = random_chain_source(&bitcoind, &electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, true); - do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, true) - .await; + do_channel_full_cycle( + node_a, + node_b, + &bitcoind.client, + &electrsd.client, + false, + false, + true, + true, + ) + .await; } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] @@ -75,8 +104,17 @@ async fn channel_full_cycle_0conf() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = random_chain_source(&bitcoind, &electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, true, true, false); - do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, true, true, false) - .await; + do_channel_full_cycle( + node_a, + node_b, + &bitcoind.client, + &electrsd.client, + true, + false, + true, + false, + ) + .await; } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] @@ -84,8 +122,53 @@ async fn channel_full_cycle_legacy_staticremotekey() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let chain_source = random_chain_source(&bitcoind, &electrsd); let (node_a, node_b) = setup_two_nodes(&chain_source, false, false, false); - do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, false, false) - .await; + do_channel_full_cycle( + node_a, + node_b, + &bitcoind.client, + &electrsd.client, + false, + false, + false, + false, + ) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_full_cycle_0reserve() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = random_chain_source(&bitcoind, &electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); + do_channel_full_cycle( + node_a, + node_b, + &bitcoind.client, + &electrsd.client, + false, + true, + true, + false, + ) + .await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn channel_full_cycle_0conf_0reserve() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = random_chain_source(&bitcoind, &electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, true, true, false); + do_channel_full_cycle( + node_a, + node_b, + &bitcoind.client, + &electrsd.client, + true, + true, + true, + false, + ) + .await; } #[tokio::test(flavor = "multi_thread", worker_threads = 1)] @@ -1705,6 +1788,7 @@ async fn do_lsps2_client_service_integration(client_trusts_lsp: bool) { min_channel_opening_fee_msat: 0, max_client_to_self_delay: 1024, client_trusts_lsp, + disable_client_reserve: false, }; let service_config = random_config(true); @@ -2023,6 +2107,7 @@ async fn lsps2_client_trusts_lsp() { min_channel_opening_fee_msat: 0, max_client_to_self_delay: 1024, client_trusts_lsp: true, + disable_client_reserve: false, }; let service_config = random_config(true); @@ -2197,6 +2282,7 @@ async fn lsps2_lsp_trusts_client_but_client_does_not_claim() { min_channel_opening_fee_msat: 0, max_client_to_self_delay: 1024, client_trusts_lsp: false, + disable_client_reserve: false, }; let service_config = random_config(true); @@ -2873,3 +2959,75 @@ async fn splice_in_with_all_balance() { node_a.stop().unwrap(); node_b.stop().unwrap(); } + +#[tokio::test(flavor = "multi_thread", worker_threads = 1)] +async fn builder_configures_sqlite_backup_store() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = random_chain_source(&bitcoind, &electrsd); + + let mut config_a = random_config(true); + config_a.store_type = TestStoreType::Sqlite; + let primary_dir = config_a.node_config.storage_dir_path.clone(); + let backup_dir = common::random_storage_path(); + let node_a = common::setup_node_with_builder(&chain_source, config_a.clone(), |builder| { + builder.set_backup_storage_dir_path(backup_dir.to_str().unwrap().to_owned()); + }); + + let config_b = random_config(true); + let node_b = setup_node(&chain_source, config_b); + + do_channel_full_cycle( + node_a, + node_b, + &bitcoind.client, + &electrsd.client, + false, + true, + true, + false, + ) + .await; + + let primary_store = SqliteStore::new( + primary_dir.into(), + Some(ldk_node::io::sqlite_store::SQLITE_DB_FILE_NAME.to_string()), + Some(ldk_node::io::sqlite_store::KV_TABLE_NAME.to_string()), + ) + .unwrap(); + + let backup_store = SqliteStore::new( + backup_dir, + Some(ldk_node::io::sqlite_store::SQLITE_BACKUP_DB_FILE_NAME.to_string()), + Some(ldk_node::io::sqlite_store::KV_TABLE_NAME.to_string()), + ) + .unwrap(); + + for (pn, sn, key) in [ + ("bdk_wallet", "", "descriptor"), + ("bdk_wallet", "", "change_descriptor"), + ("bdk_wallet", "", "network"), + ("", "", "node_metrics"), + ("", "", "events"), + ("", "", "peers"), + ] { + let primary = KVStoreSync::read(&primary_store, pn, sn, key).unwrap(); + let backup = KVStoreSync::read(&backup_store, pn, sn, key).unwrap(); + + assert_eq!(backup, primary, "backup mismatch for {pn}/{sn}/{key}"); + } +} + +#[test] +fn sqlite_backup_rejects_primary_storage_path() { + let mut config = random_config(false); + config.store_type = TestStoreType::Sqlite; + + let primary_dir = config.node_config.storage_dir_path.clone(); + + setup_builder!(builder, config.node_config); + builder.set_backup_storage_dir_path(primary_dir); + + let res = builder.build(config.node_entropy.into()); + + assert!(matches!(res, Err(ldk_node::BuildError::BackupStorePathConflict))); +} diff --git a/tests/integration_tests_vss.rs b/tests/integration_tests_vss.rs index 32226a8b0..210e9a8b2 100644 --- a/tests/integration_tests_vss.rs +++ b/tests/integration_tests_vss.rs @@ -54,6 +54,7 @@ async fn channel_full_cycle_with_vss_store() { &bitcoind.client, &electrsd.client, false, + false, true, false, )