From 01b8c79154d2c7c57d7dcdb52a7d2b61789c28b6 Mon Sep 17 00:00:00 2001 From: UdjinM6 Date: Fri, 17 Apr 2026 21:18:22 +0300 Subject: [PATCH] refactor: use SteadyClock instead of SystemClock for duration measurements in Dash-specific code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Extends the approach from bitcoin#27405 to Dash-specific code. All six call sites were purely measuring elapsed time for timeouts, throttling, or cache expiry — SteadyClock is monotonic and immune to NTP adjustments, making it the correct choice. Co-Authored-By: Claude Opus 4.6 --- src/active/dkgsessionhandler.cpp | 4 ++-- src/instantsend/instantsend.cpp | 4 ++-- src/instantsend/instantsend.h | 2 +- src/llmq/dkgsessionmgr.cpp | 4 ++-- src/llmq/dkgsessionmgr.h | 4 ++-- src/net.cpp | 18 +++++++++--------- src/net.h | 2 +- 7 files changed, 19 insertions(+), 19 deletions(-) diff --git a/src/active/dkgsessionhandler.cpp b/src/active/dkgsessionhandler.cpp index 553ab9742be0..5c7044ff81f9 100644 --- a/src/active/dkgsessionhandler.cpp +++ b/src/active/dkgsessionhandler.cpp @@ -202,13 +202,13 @@ void ActiveDKGSessionHandler::SleepBeforePhase(QuorumPhase curPhase, const uint2 double adjustedPhaseSleepTimePerMember = phaseSleepTimePerMember * randomSleepFactor; int64_t sleepTime = (int64_t)(adjustedPhaseSleepTimePerMember * curSession->GetMyMemberIndex().value_or(0)); - int64_t endTime = TicksSinceEpoch(SystemClock::now()) + sleepTime; + const auto endTime = SteadyClock::now() + std::chrono::milliseconds{sleepTime}; int heightTmp{currentHeight.load()}; int heightStart{heightTmp}; LogPrint(BCLog::LLMQ_DKG, "ActiveDKGSessionHandler::%s -- %s qi[%d] - starting sleep for %d ms, curPhase=%d\n", __func__, params.name, quorumIndex, sleepTime, std23::to_underlying(curPhase)); - while (TicksSinceEpoch(SystemClock::now()) < endTime) { + while (SteadyClock::now() < endTime) { if (stopRequested) { LogPrint(BCLog::LLMQ_DKG, "ActiveDKGSessionHandler::%s -- %s qi[%d] - aborting due to stop/shutdown requested\n", __func__, params.name, quorumIndex); throw AbortPhaseException(); diff --git a/src/instantsend/instantsend.cpp b/src/instantsend/instantsend.cpp index 29afd31f9dda..c4f5243eb494 100644 --- a/src/instantsend/instantsend.cpp +++ b/src/instantsend/instantsend.cpp @@ -35,7 +35,7 @@ void CInstantSendManager::EnqueueInstantSendLock(NodeId from, const uint256& has LOCK(cs_timingsTxSeen); if (auto it = timingsTxSeen.find(islock->txid); it != timingsTxSeen.end()) { // This is the normal case where we received the TX before the islock - auto diff = TicksSinceEpoch(SystemClock::now()) - it->second; + auto diff = Ticks(SteadyClock::now() - it->second); timingsTxSeen.erase(it); return diff; } @@ -180,7 +180,7 @@ void CInstantSendManager::AddNonLockedTx(const CTransactionRef& tx, const CBlock if (ShouldReportISLockTiming()) { LOCK(cs_timingsTxSeen); // Only insert the time the first time we see the tx, as we sometimes try to resign - timingsTxSeen.try_emplace(tx->GetHash(), TicksSinceEpoch(SystemClock::now())); + timingsTxSeen.try_emplace(tx->GetHash(), SteadyClock::now()); } LogPrint(BCLog::INSTANTSEND, "CInstantSendManager::%s -- txid=%s, pindexMined=%s\n", __func__, diff --git a/src/instantsend/instantsend.h b/src/instantsend/instantsend.h index a9667ab4384c..e6ccde072cd1 100644 --- a/src/instantsend/instantsend.h +++ b/src/instantsend/instantsend.h @@ -78,7 +78,7 @@ class CInstantSendManager Uint256HashSet pendingRetryTxs GUARDED_BY(cs_pendingRetry); mutable Mutex cs_timingsTxSeen; - Uint256HashMap timingsTxSeen GUARDED_BY(cs_timingsTxSeen); + Uint256HashMap timingsTxSeen GUARDED_BY(cs_timingsTxSeen); mutable Mutex cs_height_cache; static constexpr size_t MAX_BLOCK_HEIGHT_CACHE{16384}; diff --git a/src/llmq/dkgsessionmgr.cpp b/src/llmq/dkgsessionmgr.cpp index 76384c8e73ed..70a2b60b6784 100644 --- a/src/llmq/dkgsessionmgr.cpp +++ b/src/llmq/dkgsessionmgr.cpp @@ -311,7 +311,7 @@ bool CDKGSessionManager::GetVerifiedContributions(Consensus::LLMQType llmqType, CBLSSecretKey skContribution; db->Read(std::make_tuple(DB_SKCONTRIB, llmqType, pQuorumBaseBlockIndex->GetBlockHash(), proTxHash), skContribution); - it = contributionsCache.emplace(cacheKey, ContributionsCacheEntry{TicksSinceEpoch(SystemClock::now()), vvecPtr, skContribution}).first; + it = contributionsCache.emplace(cacheKey, ContributionsCacheEntry{SteadyClock::now(), vvecPtr, skContribution}).first; } memberIndexesRet.emplace_back(i); @@ -358,7 +358,7 @@ bool CDKGSessionManager::GetEncryptedContributions(Consensus::LLMQType llmqType, void CDKGSessionManager::CleanupCache() const { LOCK(contributionsCacheCs); - auto curTime = TicksSinceEpoch(SystemClock::now()); + const auto curTime = SteadyClock::now(); for (auto it = contributionsCache.begin(); it != contributionsCache.end(); ) { if (curTime - it->second.entryTime > MAX_CONTRIBUTION_CACHE_TIME) { it = contributionsCache.erase(it); diff --git a/src/llmq/dkgsessionmgr.h b/src/llmq/dkgsessionmgr.h index 23cf14491ddd..0c9221673eb1 100644 --- a/src/llmq/dkgsessionmgr.h +++ b/src/llmq/dkgsessionmgr.h @@ -57,7 +57,7 @@ class CDKGSessionManager using SessionHandlerMap = std::map>; private: - static constexpr int64_t MAX_CONTRIBUTION_CACHE_TIME = 60 * 1000; + static constexpr auto MAX_CONTRIBUTION_CACHE_TIME = 60s; private: CDeterministicMNManager& m_dmnman; @@ -84,7 +84,7 @@ class CDKGSessionManager } }; struct ContributionsCacheEntry { - int64_t entryTime; + SteadyClock::time_point entryTime; BLSVerificationVectorPtr vvec; CBLSSecretKey skContribution; }; diff --git a/src/net.cpp b/src/net.cpp index 21648dbd01c5..d63da2308e31 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -2119,7 +2119,7 @@ void CConnman::DisconnectNodes() // the socket), we can be pretty sure that they are not interested in any pending messages anymore and // thus can immediately close the socket. if (!pnode->fOtherSideDisconnected) { - if (pnode->nDisconnectLingerTime == 0) { + if (pnode->nDisconnectLingerTime.load() == SteadyClock::time_point{}) { // let's not immediately close the socket but instead wait for at least 100ms so that there is a // chance to flush all/some pending data. Otherwise the other side might not receive REJECT messages // that were pushed right before setting fDisconnect=true @@ -2127,9 +2127,9 @@ void CConnman::DisconnectNodes() // 1. vSendMsg must be empty and all messages sent via send(). This is ensured by SocketHandler() // being called before DisconnectNodes and also by the linger time // 2. Internal socket send buffers must be flushed. This is ensured solely by the linger time - pnode->nDisconnectLingerTime = TicksSinceEpoch(SystemClock::now()) + 100; + pnode->nDisconnectLingerTime = SteadyClock::now() + 100ms; } - if (TicksSinceEpoch(SystemClock::now()) < pnode->nDisconnectLingerTime) { + if (SteadyClock::now() < pnode->nDisconnectLingerTime.load()) { // everything flushed to the kernel? const auto& [to_send, more, _msg_type] = pnode->m_transport->GetBytesToSend(pnode->nSendMsgSize != 0); const bool queue_is_empty{to_send.empty() && !more}; @@ -2636,18 +2636,18 @@ void CConnman::ThreadSocketHandler(CMasternodeSync& mn_sync) { AssertLockNotHeld(m_total_bytes_sent_mutex); - int64_t nLastCleanupNodes = 0; + auto nLastCleanupNodes = SteadyClock::time_point{}; while (!interruptNet) { // Handle sockets before we do the next round of disconnects. This allows us to flush send buffers one last time // before actually closing sockets. Receiving is however skipped in case a peer is pending to be disconnected SocketHandler(mn_sync); - if (TicksSinceEpoch(SystemClock::now()) - nLastCleanupNodes > 1000) { + if (SteadyClock::now() - nLastCleanupNodes > 1s) { ForEachNode(AllNodes, [&](CNode* pnode) { if (InactivityCheck(*pnode)) pnode->fDisconnect = true; }); - nLastCleanupNodes = TicksSinceEpoch(SystemClock::now()); + nLastCleanupNodes = SteadyClock::now(); } DisconnectNodes(); NotifyNumConnectionsChanged(mn_sync); @@ -3663,7 +3663,7 @@ void CConnman::ThreadMessageHandler() { LOCK(NetEventsInterface::g_msgproc_mutex); - int64_t nLastSendMessagesTimeMasternodes = 0; + auto nLastSendMessagesTimeMasternodes = SteadyClock::time_point{}; FastRandomContext rng; while (!flagInterruptMsgProc) @@ -3671,9 +3671,9 @@ void CConnman::ThreadMessageHandler() bool fMoreWork = false; bool fSkipSendMessagesForMasternodes = true; - if (TicksSinceEpoch(SystemClock::now()) - nLastSendMessagesTimeMasternodes >= 100) { + if (SteadyClock::now() - nLastSendMessagesTimeMasternodes >= 100ms) { fSkipSendMessagesForMasternodes = false; - nLastSendMessagesTimeMasternodes = TicksSinceEpoch(SystemClock::now()); + nLastSendMessagesTimeMasternodes = SteadyClock::now(); } // Randomize the order in which we process messages from/to our peers. diff --git a/src/net.h b/src/net.h index fbea1d395be8..21119f8675bb 100644 --- a/src/net.h +++ b/src/net.h @@ -784,7 +784,7 @@ class CNode // Setting fDisconnect to true will cause the node to be disconnected the // next time DisconnectNodes() runs std::atomic_bool fDisconnect{false}; - std::atomic nDisconnectLingerTime{0}; + std::atomic nDisconnectLingerTime{SteadyClock::time_point{}}; std::atomic_bool fSocketShutdown{false}; std::atomic_bool fOtherSideDisconnected { false }; // If 'true' this node will be disconnected on CMasternodeMan::ProcessMasternodeConnections()