From f6bb02d666b0d40b0a290b72d40b912bcee32e18 Mon Sep 17 00:00:00 2001 From: Konstantin Akimov Date: Mon, 27 Apr 2026 01:43:18 +0700 Subject: [PATCH] fix: keep sending ISDLOCK invs to non-MN peers that want recsigs PR #6994 made masternodes skip ISDLOCK inv announcements to any peer with m_wants_recsigs set, on the premise that such peers can reconstruct the ISLOCK from the recsig. It works for MN peers but it does not work for quorum observers running with -watchquorums: those nodes also opt in to recsigs via QSENDRECSIGS but they don't have a signer worker running, so they cannot reconstruct an ISDLOCK from a recsig and they still need the inv. nodes[0] runs with -watchquorums and had progressively sent QSENDRECSIGS to all four MN peers; by the third call every MN saw nodes[0].m_wants_recsigs=true and skipped the inv to it. This commit make the ISDLOCK skipped only on the peer being verified masternode. Move the policy from PushInv (called for every inv type) to the three sites that actually relay MSG_ISDLOCK and have CNode in scope. --- src/net_processing.cpp | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 21a3f7102bdb..a0dfbfae459d 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -1193,6 +1193,16 @@ static uint16_t GetHeadersLimit(const CNode& pfrom, bool compressed) return MAX_HEADERS_UNCOMPRESSED_RESULT; } +// Returns true when peer is a verified masternode that has opted in to receive recsigs. +// Such peers participate in the signing flow that populates creatingInstantSendLocks, so +// they can reconstruct an ISDLOCK locally from the recsig and don't need the ISDLOCK inv. +// Non-MN peers (e.g. nodes running with -watchquorums) also opt in to recsigs via +// QSENDRECSIGS but still need ISDLOCK invs because they don't run the signing flow. +static bool PeerReconstructsISLockFromRecsig(const CNode& pnode, const Peer& peer) +{ + return peer.m_wants_recsigs && !pnode.GetVerifiedProRegTxHash().IsNull(); +} + static void PushInv(Peer& peer, const CInv& inv) { auto inv_relay = peer.GetInvRelay(); @@ -1204,13 +1214,6 @@ static void PushInv(Peer& peer, const CInv& inv) return; } - // Skip ISDLOCK inv announcements for peers that want recsigs, as they can reconstruct - // the islock from the recsig - if (inv.type == MSG_ISDLOCK && peer.m_wants_recsigs) { - LogPrint(BCLog::NET, "%s -- skipping ISDLOCK inv (peer wants recsigs): %s peer=%d\n", __func__, inv.ToString(), peer.m_id); - return; - } - LOCK(inv_relay->m_tx_inventory_mutex); if (inv_relay->m_tx_inventory_known_filter.contains(inv.hash)) { LogPrint(BCLog::NET, "%s -- skipping known inv: %s peer=%d\n", __func__, inv.ToString(), peer.m_id); @@ -2541,6 +2544,11 @@ void PeerManagerImpl::RelayInvFiltered(const CInv& inv, const CTransaction& rela return; } } // LOCK(tx_relay->m_bloom_filter_mutex) + if (inv.type == MSG_ISDLOCK && PeerReconstructsISLockFromRecsig(*pnode, *peer)) { + LogPrint(BCLog::NET, "%s -- skipping ISDLOCK inv (peer wants recsigs): %s peer=%d\n", + __func__, inv.ToString(), peer->m_id); + return; + } PushInv(*peer, inv); }); } @@ -2566,6 +2574,11 @@ void PeerManagerImpl::RelayInvFiltered(const CInv& inv, const uint256& relatedTx return; } } // LOCK(tx_relay->m_bloom_filter_mutex) + if (inv.type == MSG_ISDLOCK && PeerReconstructsISLockFromRecsig(*pnode, *peer)) { + LogPrint(BCLog::NET, "%s -- skipping ISDLOCK inv (peer wants recsigs): %s peer=%d\n", + __func__, inv.ToString(), peer->m_id); + return; + } PushInv(*peer, inv); }); } @@ -6348,9 +6361,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) if (islock == nullptr) continue; uint256 isLockHash{::SerializeHash(*islock)}; tx_relay->m_tx_inventory_known_filter.insert(isLockHash); - // Skip ISDLOCK inv announcements for peers that want recsigs, as they can reconstruct - // the islock from the recsig - if (!peer->m_wants_recsigs) { + if (!PeerReconstructsISLockFromRecsig(*pto, *peer)) { queueAndMaybePushInv(CInv(MSG_ISDLOCK, isLockHash)); } }