From 49248b25d417df498bb37094635c605ca8913525 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Wed, 22 Apr 2026 16:29:45 +0200 Subject: [PATCH 01/28] feat: trace relevant http headers (#1982) --- CHANGELOG.md | 4 ++ crates/rpc/src/server/mod.rs | 1 - crates/utils/src/grpc.rs | 1 - crates/utils/src/grpc/connect_info.rs | 18 -------- crates/utils/src/grpc/layers.rs | 59 ++++++++++++++++++++------- crates/utils/src/tracing/grpc.rs | 16 ++++++++ 6 files changed, 65 insertions(+), 34 deletions(-) delete mode 100644 crates/utils/src/grpc/connect_info.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 8bdd77a0d1..fed5d6d5d2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # Changelog +## v0.14.10 (TBD) + +- Added `accept`, `origin`, `user-agent`, `forwarded`, `x-forwarded-for` and `x-real-ip` headers to telemetry for gRPC requests ([#1982](https://github.com/0xMiden/node/pull/1982). + ## v0.14.9 (2026-04-21) - Simplified network monitor counter script loading by linking the counter module directly via `with_linked_module` instead of assembling a standalone library ([#1957](https://github.com/0xMiden/node/pull/1957)). diff --git a/crates/rpc/src/server/mod.rs b/crates/rpc/src/server/mod.rs index 7f0dfae8a0..2e4c1caa79 100644 --- a/crates/rpc/src/server/mod.rs +++ b/crates/rpc/src/server/mod.rs @@ -74,7 +74,6 @@ impl Rpc { .max_connection_age(self.grpc_options.max_connection_age) .timeout(self.grpc_options.request_timeout) .layer(CatchPanicLayer::custom(catch_panic_layer_fn)) - .layer(grpc::connect_info_layer()) .layer( TraceLayer::new(SharedClassifier::new( GrpcErrorsAsFailures::new() diff --git a/crates/utils/src/grpc.rs b/crates/utils/src/grpc.rs index 19e2c90d55..3dc42adf8b 100644 --- a/crates/utils/src/grpc.rs +++ b/crates/utils/src/grpc.rs @@ -22,6 +22,5 @@ mod private { impl Sealed for url::Url {} } -pub mod connect_info; mod layers; pub use layers::*; diff --git a/crates/utils/src/grpc/connect_info.rs b/crates/utils/src/grpc/connect_info.rs deleted file mode 100644 index a5b0345d35..0000000000 --- a/crates/utils/src/grpc/connect_info.rs +++ /dev/null @@ -1,18 +0,0 @@ -use tonic::service::Interceptor; -use tonic::{Request, Status}; - -// Extracts the IP for connection management and rate-limiting requests, called `Governor`. -#[derive(Debug, Clone)] -pub struct ConnectInfoInterceptor; - -impl Interceptor for ConnectInfoInterceptor { - fn call(&mut self, mut request: Request<()>) -> Result, Status> { - let addr = request - .remote_addr() - .ok_or_else(|| Status::failed_precondition("Expected TCP connection"))?; - request - .metadata_mut() - .insert("forwarded", format!("for={addr}").try_into().unwrap()); - Ok(request) - } -} diff --git a/crates/utils/src/grpc/layers.rs b/crates/utils/src/grpc/layers.rs index 894412f13c..c94912bcee 100644 --- a/crates/utils/src/grpc/layers.rs +++ b/crates/utils/src/grpc/layers.rs @@ -1,20 +1,15 @@ +use std::net::{IpAddr, SocketAddr}; use std::time::Duration; use anyhow::{Context, ensure}; use governor::middleware::StateInformationMiddleware; -use tonic::service::InterceptorLayer; use tower::limit::GlobalConcurrencyLimitLayer; +use tower_governor::GovernorError; use tower_governor::governor::GovernorConfigBuilder; -use tower_governor::key_extractor::SmartIpKeyExtractor; +use tower_governor::key_extractor::{KeyExtractor, SmartIpKeyExtractor}; -use super::connect_info::ConnectInfoInterceptor; use crate::clap::GrpcOptionsExternal; -/// Creates the gRPC interceptor layer that attaches connection metadata. -pub fn connect_info_layer() -> InterceptorLayer { - InterceptorLayer::new(ConnectInfoInterceptor) -} - /// Builds a global concurrency limit layer using the configured semaphore. pub fn rate_limit_concurrent_connections( grpc_options: GrpcOptionsExternal, @@ -26,11 +21,7 @@ pub fn rate_limit_concurrent_connections( pub fn rate_limit_per_ip( grpc_options: GrpcOptionsExternal, ) -> anyhow::Result< - tower_governor::GovernorLayer< - SmartIpKeyExtractor, - StateInformationMiddleware, - tonic::body::Body, - >, + tower_governor::GovernorLayer, > { let nanos_per_replenish = Duration::from_secs(1) .as_nanos() @@ -44,7 +35,7 @@ pub fn rate_limit_per_ip( u64::try_from(nanos_per_replenish).context("invalid gRPC rate limit configuration")?, ); let config = GovernorConfigBuilder::default() - .key_extractor(SmartIpKeyExtractor) + .key_extractor(GrpcIpExtractor::default()) .period(replenish_period) .burst_size(grpc_options.burst_size.into()) .use_headers() @@ -61,3 +52,43 @@ pub fn rate_limit_per_ip( }); Ok(tower_governor::GovernorLayer::new(config)) } + +/// Wraps [`SmartIpKeyExtractor`] by providing a fallback to the client IP address provided by the +/// gRPC transport. +/// +/// [`SmartIpKeyExtractor`]'s own fallback of checking the peer IP directly fails because we are in +/// a gRPC transport and not the typical `SocketAddr` as it expects. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct GrpcIpExtractor(SmartIpKeyExtractor); + +impl Default for GrpcIpExtractor { + fn default() -> Self { + Self(SmartIpKeyExtractor) + } +} + +impl GrpcIpExtractor { + #[expect(clippy::result_large_err, reason = "this is a third party error type")] + fn extract_tonic_address( + request: &http::Request, + ) -> Result<::Key, GovernorError> { + request + .extensions() + .get::() + .and_then(tonic::transport::server::TcpConnectInfo::remote_addr) + .as_ref() + .map(SocketAddr::ip) + .ok_or(GovernorError::UnableToExtractKey) + } +} + +impl KeyExtractor for GrpcIpExtractor { + type Key = IpAddr; + + fn extract( + &self, + request: &http::Request, + ) -> Result { + self.0.extract(request).or_else(|_| Self::extract_tonic_address(request)) + } +} diff --git a/crates/utils/src/tracing/grpc.rs b/crates/utils/src/tracing/grpc.rs index a4516aaad1..6d88e0fa5c 100644 --- a/crates/utils/src/tracing/grpc.rs +++ b/crates/utils/src/tracing/grpc.rs @@ -1,3 +1,4 @@ +use http::header::HeaderName; use tracing::field; use crate::tracing::OpenTelemetrySpanExt; @@ -74,6 +75,21 @@ pub fn grpc_trace_fn(request: &http::Request) -> tracing::Span { } } + for header in [ + http::header::ACCEPT, + http::header::ORIGIN, + http::header::USER_AGENT, + http::header::FORWARDED, + HeaderName::from_static("x-forwarded-for"), + HeaderName::from_static("x-real-ip"), + ] { + if let Some(value) = request.headers().get(&header) { + if let Ok(value) = value.to_str() { + span.set_attribute(format!("http.request.header.{header}"), value); + } + } + } + span } From e08c476b07a125562cfbc07d809a9cc971c4056c Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Wed, 22 Apr 2026 17:55:42 +0200 Subject: [PATCH 02/28] feat: inject basic trace fields for requests (#1983) --- CHANGELOG.md | 1 + crates/proto/src/decode/mod.rs | 3 + crates/proto/src/decode/utils.rs | 74 +++++++++++++++ crates/rpc/src/server/api.rs | 109 +++++++++++++++++++--- crates/store/src/server/api.rs | 73 +-------------- crates/store/src/server/block_producer.rs | 6 +- crates/store/src/server/ntx_builder.rs | 10 +- crates/store/src/server/rpc_api.rs | 20 ++-- 8 files changed, 188 insertions(+), 108 deletions(-) create mode 100644 crates/proto/src/decode/utils.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index fed5d6d5d2..40bb856610 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## v0.14.10 (TBD) +- Trace additional RPC request properties e.g. `account.id` in `GetAccount` ([#1983](https://github.com/0xMiden/node/pull/1983)). - Added `accept`, `origin`, `user-agent`, `forwarded`, `x-forwarded-for` and `x-real-ip` headers to telemetry for gRPC requests ([#1982](https://github.com/0xMiden/node/pull/1982). ## v0.14.9 (2026-04-21) diff --git a/crates/proto/src/decode/mod.rs b/crates/proto/src/decode/mod.rs index 63693ce138..a67ab309c6 100644 --- a/crates/proto/src/decode/mod.rs +++ b/crates/proto/src/decode/mod.rs @@ -2,6 +2,9 @@ use std::marker::PhantomData; use miden_protocol::utils::serde::Deserializable; +mod utils; +pub use utils::*; + use crate::errors::ConversionError; // Re-export so callers can import from `conv`. pub use crate::errors::ConversionResultExt; diff --git a/crates/proto/src/decode/utils.rs b/crates/proto/src/decode/utils.rs new file mode 100644 index 0000000000..451fbd9d5c --- /dev/null +++ b/crates/proto/src/decode/utils.rs @@ -0,0 +1,74 @@ +use miden_protocol::Word; +use miden_protocol::account::AccountId; + +use crate::decode::{ConversionResultExt, GrpcStructDecoder}; +use crate::errors::ConversionError; +use crate::{decode, generated as proto}; + +/// Reads a block range from a request, returning a specific error type if the field is missing +pub fn read_block_range( + block_range: Option, + entity: &'static str, +) -> Result +where + E: From, +{ + block_range.ok_or_else(|| { + ConversionError::message(format!("{entity}: missing field `block_range`")).into() + }) +} + +/// Reads and converts a root field from a request to Word, returning a specific error type if +/// conversion fails +pub fn read_root( + root: Option, + entity: &'static str, +) -> Result +where + E: From, +{ + root.ok_or_else(|| ConversionError::message(format!("{entity}: missing field `root`")))? + .try_into() + .context("root") + .map_err(|e: ConversionError| e.into()) +} + +/// Converts a collection of proto primitives to Words, returning a specific error type if +/// conversion fails +pub fn convert_digests_to_words(digests: I) -> Result, E> +where + E: From, + I: IntoIterator, + I::Item: TryInto, +{ + digests + .into_iter() + .map(TryInto::try_into) + .collect::, ConversionError>>() + .context("digests") + .map_err(Into::into) +} + +/// Reads account IDs from a request, returning a specific error type if conversion fails +pub fn read_account_ids(account_ids: I) -> Result, E> +where + E: From, + I: IntoIterator, +{ + account_ids + .into_iter() + .map(AccountId::try_from) + .collect::>() + .context("account_ids") + .map_err(Into::into) +} + +pub fn read_account_id( + account_id: Option, +) -> Result +where + E: From, +{ + let decoder = GrpcStructDecoder::::default(); + decode!(decoder, account_id).map_err(|e: ConversionError| e.into()) +} diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index 56386570f8..915e5643ab 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -9,6 +9,8 @@ use miden_node_proto::clients::{ StoreRpcClient, ValidatorClient, }; +use miden_node_proto::decode::{read_account_id, read_account_ids, read_block_range}; +use miden_node_proto::domain::account::{AccountRequest, SlotData}; use miden_node_proto::errors::ConversionError; use miden_node_proto::generated::rpc::MempoolStats; use miden_node_proto::generated::rpc::api_server::{self, Api}; @@ -23,6 +25,7 @@ use miden_node_utils::limiter::{ QueryParamNullifierLimit, QueryParamStorageMapKeyTotalLimit, }; +use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::batch::{ProposedBatch, ProvenBatch}; use miden_protocol::block::{BlockHeader, BlockNumber}; use miden_protocol::transaction::{ @@ -36,7 +39,7 @@ use miden_protocol::{MIN_PROOF_SECURITY_LEVEL, Word}; use miden_tx::TransactionVerifier; use miden_tx_batch_prover::LocalBatchProver; use tonic::{IntoRequest, Request, Response, Status}; -use tracing::{debug, info, info_span}; +use tracing::{Span, debug, info, info_span}; use url::Url; use crate::COMPONENT; @@ -214,6 +217,13 @@ impl api_server::Api for RpcService { &self, request: Request, ) -> Result, Status> { + let range = + read_block_range::(request.get_ref().block_range, "SyncNullifiersRequest")?; + + let span = Span::current(); + span.set_attribute("block_range.from", range.block_from); + span.set_attribute("block_range.to", range.block_to()); + debug!(target: COMPONENT, request = ?request.get_ref()); check::(request.get_ref().nullifiers.len())?; @@ -227,7 +237,9 @@ impl api_server::Api for RpcService { &self, request: Request, ) -> Result, Status> { - info!(target: COMPONENT, request = ?request.get_ref()); + debug!(target: COMPONENT, request = ?request.get_ref()); + + Span::current().set_attribute("block.number", request.get_ref().block_num()); self.store.clone().get_block_header_by_number(request).await } @@ -236,6 +248,8 @@ impl api_server::Api for RpcService { &self, request: Request, ) -> Result, Status> { + Span::current().set_attribute("block.number", request.get_ref().block_num); + let request = request.into_inner(); debug!(target: COMPONENT, ?request); @@ -247,6 +261,14 @@ impl api_server::Api for RpcService { &self, request: Request, ) -> Result, Status> { + let range = + read_block_range::(request.get_ref().block_range, "SyncChainMmrRequest")?; + + let span = Span::current(); + span.set_attribute("block_range.from", range.block_from); + span.set_attribute("block_range.to", range.block_to()); + span.set_attribute("finality", request.get_ref().finality().as_str_name()); + debug!(target: COMPONENT, request = ?request.get_ref()); self.store.clone().sync_chain_mmr(request).await @@ -258,6 +280,11 @@ impl api_server::Api for RpcService { &self, request: Request, ) -> Result, Status> { + let range = read_block_range::(request.get_ref().block_range, "SyncNotesRequest")?; + + let span = Span::current(); + span.set_attribute("block_range.from", range.block_from); + span.set_attribute("block_range.to", range.block_to()); debug!(target: COMPONENT, request = ?request.get_ref()); check::(request.get_ref().note_tags.len())?; @@ -301,6 +328,19 @@ impl api_server::Api for RpcService { &self, request: Request, ) -> Result, Status> { + let account_id = read_account_id::( + request.get_ref().account_id.clone(), + )?; + let range = read_block_range::( + request.get_ref().block_range, + "SyncAccountStorageMapsRequest", + )?; + + let span = Span::current(); + span.set_attribute("account.id", account_id); + span.set_attribute("block_range.from", range.block_from); + span.set_attribute("block_range.to", range.block_to()); + debug!(target: COMPONENT, request = ?request.get_ref()); self.store.clone().sync_account_storage_maps(request).await @@ -311,6 +351,17 @@ impl api_server::Api for RpcService { request: tonic::Request, ) -> std::result::Result, tonic::Status> { + let account_id = read_account_id::( + request.get_ref().account_id.clone(), + )?; + let range = + read_block_range::(request.get_ref().block_range, "SyncAccountVaultRequest")?; + + let span = Span::current(); + span.set_attribute("account.id", account_id); + span.set_attribute("block_range.from", range.block_from); + span.set_attribute("block_range.to", range.block_to()); + debug!(target: COMPONENT, request = ?request.get_ref()); self.store.clone().sync_account_vault(request).await @@ -319,32 +370,34 @@ impl api_server::Api for RpcService { /// Validates storage map key limits before forwarding the account request to the store. async fn get_account( &self, - request: Request, + raw_request: Request, ) -> Result, Status> { - use proto::rpc::account_request::account_detail_request::storage_map_detail_request::{ - SlotData::AllEntries as ProtoMapAllEntries, SlotData::MapKeys as ProtoMapKeys, - }; + let raw_request = raw_request.into_inner(); + debug!(target: COMPONENT, ?raw_request); - let request = request.into_inner(); + let request = AccountRequest::try_from(raw_request.clone())?; - debug!(target: COMPONENT, ?request); + let span = Span::current(); + span.set_attribute("account.id", request.account_id); + if let Some(block) = request.block_num { + span.set_attribute("block.number", block); + } // Validate total storage map key limit before forwarding to store if let Some(details) = &request.details { let _span = info_span!(target: COMPONENT, "validate_storage_map_keys").entered(); let total_keys: usize = details - .storage_maps + .storage_requests .iter() - .filter_map(|m| m.slot_data.as_ref()) - .filter_map(|d| match d { - ProtoMapKeys(keys) => Some(keys.map_keys.len()), - ProtoMapAllEntries(_) => None, + .filter_map(|d| match &d.slot_data { + SlotData::All => None, + SlotData::MapKeys(items) => Some(items.len()), }) .sum(); check::(total_keys)?; } - self.store.clone().get_account(request).await + self.store.clone().get_account(raw_request).await } // -- Transaction submission -------------------------------------------------------------- @@ -370,6 +423,13 @@ impl api_server::Api for RpcService { Status::invalid_argument(err.as_report_context("invalid transaction")) })?; + let span = Span::current(); + span.set_attribute("transaction.id", tx.id()); + span.set_attribute("account.id", tx.account_id()); + span.set_attribute("transaction.expires_at", tx.expiration_block_num()); + span.set_attribute("transaction.reference_block.number", tx.ref_block_num()); + span.set_attribute("transaction.reference_block.commitment", tx.ref_block_commitment()); + // Rebuild a new ProvenTransaction with decorators removed from output notes let account_update = TxAccountUpdate::new( tx.account_id(), @@ -441,6 +501,15 @@ impl api_server::Api for RpcService { Status::invalid_argument(err.as_report_context("invalid proven_batch")) })?; + let span = Span::current(); + span.set_attribute("batch.id", proven_batch.id()); + span.set_attribute("batch.expires_at", proven_batch.batch_expiration_block_num()); + span.set_attribute("batch.reference_block.number", proven_batch.reference_block_num()); + span.set_attribute( + "batch.reference_block.commitment", + proven_batch.reference_block_commitment(), + ); + let proposed_batch = request .proposed_batch .as_deref() @@ -532,6 +601,18 @@ impl api_server::Api for RpcService { &self, request: Request, ) -> Result, Status> { + let range = + read_block_range::(request.get_ref().block_range, "SyncTransactionsRequest")?; + let n_accounts = request.get_ref().account_ids.len(); + let account_ids = + read_account_ids::(request.get_ref().account_ids.iter().take(10).cloned())?; + + let span = Span::current(); + span.set_attribute("block_range.from", range.block_from); + span.set_attribute("block_range.to", range.block_to()); + span.set_attribute("account.ids", format!("{account_ids:?}").as_str()); + span.set_attribute("account.ids.count", n_accounts); + debug!(target: COMPONENT, request = ?request); check::(request.get_ref().account_ids.len())?; diff --git a/crates/store/src/server/api.rs b/crates/store/src/server/api.rs index d73cad9c16..5f12fc4278 100644 --- a/crates/store/src/server/api.rs +++ b/crates/store/src/server/api.rs @@ -1,12 +1,11 @@ use std::collections::BTreeSet; use std::sync::Arc; -use miden_node_proto::decode::{ConversionResultExt, GrpcStructDecoder}; +use miden_node_proto::decode::ConversionResultExt; use miden_node_proto::errors::ConversionError; -use miden_node_proto::{decode, generated as proto}; +use miden_node_proto::generated as proto; use miden_node_utils::ErrorReport; use miden_protocol::Word; -use miden_protocol::account::AccountId; use miden_protocol::batch::OrderedBatches; use miden_protocol::block::{BlockInputs, BlockNumber}; use miden_protocol::note::Nullifier; @@ -103,74 +102,6 @@ pub fn conversion_error_to_status(value: &ConversionError) -> Status { invalid_argument(value.as_report_context("Invalid nullifier format")) } -/// Reads a block range from a request, returning a specific error type if the field is missing -pub fn read_block_range( - block_range: Option, - entity: &'static str, -) -> Result -where - E: From, -{ - block_range.ok_or_else(|| { - ConversionError::message(format!("{entity}: missing field `block_range`")).into() - }) -} - -/// Reads and converts a root field from a request to Word, returning a specific error type if -/// conversion fails -pub fn read_root( - root: Option, - entity: &'static str, -) -> Result -where - E: From, -{ - root.ok_or_else(|| ConversionError::message(format!("{entity}: missing field `root`")))? - .try_into() - .context("root") - .map_err(|e: ConversionError| e.into()) -} - -/// Converts a collection of proto primitives to Words, returning a specific error type if -/// conversion fails -pub fn convert_digests_to_words(digests: I) -> Result, E> -where - E: From, - I: IntoIterator, - I::Item: TryInto, -{ - digests - .into_iter() - .map(TryInto::try_into) - .collect::, ConversionError>>() - .context("digests") - .map_err(Into::into) -} - -/// Reads account IDs from a request, returning a specific error type if conversion fails -pub fn read_account_ids(account_ids: &[proto::account::AccountId]) -> Result, E> -where - E: From, -{ - account_ids - .iter() - .cloned() - .map(AccountId::try_from) - .collect::>() - .context("account_ids") - .map_err(Into::into) -} - -pub fn read_account_id( - account_id: Option, -) -> Result -where - E: From, -{ - let decoder = GrpcStructDecoder::::default(); - decode!(decoder, account_id).map_err(|e: ConversionError| e.into()) -} - #[instrument( level = "debug", target = COMPONENT, diff --git a/crates/store/src/server/block_producer.rs b/crates/store/src/server/block_producer.rs index 16c2ee4886..1ea67c885c 100644 --- a/crates/store/src/server/block_producer.rs +++ b/crates/store/src/server/block_producer.rs @@ -1,7 +1,7 @@ use std::convert::Infallible; use miden_crypto::dsa::ecdsa_k256_keccak::Signature; -use miden_node_proto::decode::GrpcDecodeExt; +use miden_node_proto::decode::{GrpcDecodeExt, read_account_id, read_account_ids}; use miden_node_proto::domain::proof_request::BlockProofRequest; use miden_node_proto::errors::ConversionError; use miden_node_proto::generated::store::block_producer_server; @@ -20,8 +20,6 @@ use crate::errors::ApplyBlockError; use crate::server::api::{ StoreApi, conversion_error_to_status, - read_account_id, - read_account_ids, read_block_numbers, validate_note_commitments, validate_nullifiers, @@ -136,7 +134,7 @@ impl block_producer_server::BlockProducer for StoreApi { ) -> Result, Status> { let request = request.into_inner(); - let account_ids = read_account_ids::(&request.account_ids)?; + let account_ids = read_account_ids::(request.account_ids)?; let nullifiers = validate_nullifiers(&request.nullifiers) .map_err(|err| conversion_error_to_status(&err))?; let unauthenticated_note_commitments = diff --git a/crates/store/src/server/ntx_builder.rs b/crates/store/src/server/ntx_builder.rs index f7973f51fb..0da5c55afd 100644 --- a/crates/store/src/server/ntx_builder.rs +++ b/crates/store/src/server/ntx_builder.rs @@ -2,6 +2,7 @@ use std::collections::BTreeSet; use std::num::{NonZero, TryFromIntError}; use miden_crypto::merkle::smt::SmtProof; +use miden_node_proto::decode::{read_account_id, read_block_range, read_root}; use miden_node_proto::domain::account::AccountInfo; use miden_node_proto::errors::ConversionError; use miden_node_proto::generated as proto; @@ -23,14 +24,7 @@ use crate::errors::{ GetNoteScriptByRootError, GetWitnessesError, }; -use crate::server::api::{ - StoreApi, - internal_error, - invalid_argument, - read_account_id, - read_block_range, - read_root, -}; +use crate::server::api::{StoreApi, internal_error, invalid_argument}; // NTX BUILDER ENDPOINTS // ================================================================================================ diff --git a/crates/store/src/server/rpc_api.rs b/crates/store/src/server/rpc_api.rs index e684b20a0e..a595c4123f 100644 --- a/crates/store/src/server/rpc_api.rs +++ b/crates/store/src/server/rpc_api.rs @@ -1,4 +1,11 @@ use miden_node_proto::convert; +use miden_node_proto::decode::{ + convert_digests_to_words, + read_account_id, + read_account_ids, + read_block_range, + read_root, +}; use miden_node_proto::domain::block::InvalidBlockRange; use miden_node_proto::errors::ConversionError; use miden_node_proto::generated::store::rpc_server; @@ -31,16 +38,7 @@ use crate::errors::{ SyncNullifiersError, SyncTransactionsError, }; -use crate::server::api::{ - StoreApi, - convert_digests_to_words, - internal_error, - read_account_id, - read_account_ids, - read_block_range, - read_root, - validate_nullifiers, -}; +use crate::server::api::{StoreApi, internal_error, validate_nullifiers}; // CLIENT ENDPOINTS // ================================================================================================ @@ -423,7 +421,7 @@ impl rpc_server::Rpc for StoreApi { .into_inclusive_range::(&chain_tip)?; let account_ids: Vec = - read_account_ids::(&request.account_ids)?; + read_account_ids::(request.account_ids)?; // Validate account IDs count check::(account_ids.len())?; From 7ef15da74dc5a55f5e38e5fa11bfcb7f82ce1ec6 Mon Sep 17 00:00:00 2001 From: KOVACS Krisztian Date: Fri, 24 Apr 2026 08:23:25 +0200 Subject: [PATCH 03/28] refactor(store): get account vault details from account state forest (#1981) --- CHANGELOG.md | 3 +- crates/store/src/account_state_forest/mod.rs | 25 ++++++- .../store/src/account_state_forest/tests.rs | 64 +++++++++++++++++- crates/store/src/db/mod.rs | 13 ---- .../store/src/db/models/queries/accounts.rs | 5 +- .../db/models/queries/accounts/at_block.rs | 65 +------------------ .../db/models/queries/accounts/delta/tests.rs | 2 +- .../src/db/models/queries/accounts/tests.rs | 60 +++++++++++++++++ crates/store/src/state/mod.rs | 23 +++++-- 9 files changed, 165 insertions(+), 95 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 40bb856610..b120372fde 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,8 +2,9 @@ ## v0.14.10 (TBD) +- Optimize `GetAccount` implementation to serve vault assets from `AccountStateForest` ([#1981](https://github.com/0xMiden/node/pull/1981)). - Trace additional RPC request properties e.g. `account.id` in `GetAccount` ([#1983](https://github.com/0xMiden/node/pull/1983)). -- Added `accept`, `origin`, `user-agent`, `forwarded`, `x-forwarded-for` and `x-real-ip` headers to telemetry for gRPC requests ([#1982](https://github.com/0xMiden/node/pull/1982). +- Added `accept`, `origin`, `user-agent`, `forwarded`, `x-forwarded-for` and `x-real-ip` headers to telemetry for gRPC requests ([#1982](https://github.com/0xMiden/node/pull/1982)). ## v0.14.9 (2026-04-21) diff --git a/crates/store/src/account_state_forest/mod.rs b/crates/store/src/account_state_forest/mod.rs index d20ecdf2d2..0c9adceb3a 100644 --- a/crates/store/src/account_state_forest/mod.rs +++ b/crates/store/src/account_state_forest/mod.rs @@ -2,7 +2,7 @@ use std::collections::BTreeSet; use miden_crypto::hash::rpo::Rpo256; use miden_crypto::merkle::smt::ForestInMemoryBackend; -use miden_node_proto::domain::account::AccountStorageMapDetails; +use miden_node_proto::domain::account::{AccountStorageMapDetails, AccountVaultDetails}; use miden_node_utils::ErrorReport; use miden_protocol::account::delta::{AccountDelta, AccountStorageDelta, AccountVaultDelta}; use miden_protocol::account::{ @@ -12,7 +12,7 @@ use miden_protocol::account::{ StorageMapWitness, StorageSlotName, }; -use miden_protocol::asset::{AssetVaultKey, AssetWitness, FungibleAsset}; +use miden_protocol::asset::{Asset, AssetVaultKey, AssetWitness, FungibleAsset}; use miden_protocol::block::BlockNumber; use miden_protocol::crypto::merkle::smt::{ ForestOperation, @@ -270,6 +270,27 @@ impl AccountStateForest { witnessees } + /// Enumerates vault contents for the specified account at the requested block. + #[instrument(target = COMPONENT, skip_all)] + pub(crate) fn get_vault_details( + &self, + account_id: AccountId, + block_num: BlockNumber, + ) -> Result { + let lineage = Self::vault_lineage_id(account_id); + let tree = self.get_tree_id(lineage, block_num).ok_or(WitnessError::RootNotFound)?; + // TODO: we should be checking `.entry_count()` instead of pulling entries from the tree + // once the optimization making `.entry_count()` cheap once `miden-crypto` is upgraded to + // > 0.23. + let entries = self.forest.entries(tree).map_err(Self::map_forest_error_to_witness)?; + let assets = entries + .take(AccountVaultDetails::MAX_RETURN_ENTRIES + 1) + .map(|entry| Asset::from_key_value_words(entry.key, entry.value)) + .collect::, _>>()?; + + Ok(AccountVaultDetails::from_assets(assets)) + } + /// Opens a storage map and returns storage map details with SMT proofs for the given keys. /// /// Returns `None` if no storage root is tracked for this account/slot/block combination. diff --git a/crates/store/src/account_state_forest/tests.rs b/crates/store/src/account_state_forest/tests.rs index ed931f03a4..6d5dd7011c 100644 --- a/crates/store/src/account_state_forest/tests.rs +++ b/crates/store/src/account_state_forest/tests.rs @@ -1,13 +1,20 @@ use assert_matches::assert_matches; -use miden_node_proto::domain::account::StorageMapEntries; +use miden_node_proto::domain::account::{AccountVaultDetails, StorageMapEntries}; use miden_protocol::Felt; -use miden_protocol::account::{AccountCode, StorageMapKey}; -use miden_protocol::asset::{Asset, AssetVault, FungibleAsset}; +use miden_protocol::account::{AccountCode, AccountStorageMode, AccountType, StorageMapKey}; +use miden_protocol::asset::{ + Asset, + AssetVault, + FungibleAsset, + NonFungibleAsset, + NonFungibleAssetDetails, +}; use miden_protocol::crypto::merkle::smt::SmtProof; use miden_protocol::testing::account_id::{ ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE, ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2, + AccountIdBuilder, }; use super::*; @@ -161,6 +168,57 @@ fn vault_incremental_updates_with_add_and_remove() { assert_eq!(root_after_120, root_full_state_120); } +#[test] +fn vault_details_returns_latest_and_historical_assets() { + let mut forest = AccountStateForest::new(); + let account_id = dummy_account(); + let faucet_id = dummy_faucet(); + + let block_1 = BlockNumber::GENESIS.child(); + let asset_100 = dummy_fungible_asset(faucet_id, 100); + let full_delta = dummy_full_state_delta(account_id, &[asset_100]); + forest.update_account(block_1, &full_delta).unwrap(); + + let block_2 = block_1.child(); + let mut vault_delta_2 = AccountVaultDelta::default(); + vault_delta_2.add_asset(dummy_fungible_asset(faucet_id, 50)).unwrap(); + let delta_2 = dummy_partial_delta(account_id, vault_delta_2, AccountStorageDelta::default()); + forest.update_account(block_2, &delta_2).unwrap(); + + let historical = forest.get_vault_details(account_id, block_1).unwrap(); + assert_eq!(historical, AccountVaultDetails::Assets(vec![asset_100])); + + let latest = forest.get_vault_details(account_id, block_2).unwrap(); + assert_eq!(latest, AccountVaultDetails::Assets(vec![dummy_fungible_asset(faucet_id, 150)])); +} + +#[test] +fn vault_details_limit_exceeded_for_large_vault() { + let mut forest = AccountStateForest::new(); + let account_id = dummy_account(); + let block_num = BlockNumber::GENESIS.child(); + + let faucet_id = AccountIdBuilder::new() + .account_type(AccountType::NonFungibleFaucet) + .storage_mode(AccountStorageMode::Public) + .build_with_seed([7; 32]); + let assets = (0..=AccountVaultDetails::MAX_RETURN_ENTRIES) + .map(|i| { + let details = + NonFungibleAssetDetails::new(faucet_id, vec![i as u8, (i >> 8) as u8]).unwrap(); + Asset::NonFungible(NonFungibleAsset::new(&details).unwrap()) + }) + .collect::>(); + + let full_delta = dummy_full_state_delta(account_id, &assets); + forest.update_account(block_num, &full_delta).unwrap(); + + assert_eq!( + forest.get_vault_details(account_id, block_num).unwrap(), + AccountVaultDetails::LimitExceeded + ); +} + #[test] fn forest_versions_are_continuous_for_sequential_updates() { use std::collections::BTreeMap; diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 86581a9d96..93dec452e6 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -453,19 +453,6 @@ impl Db { .await } - /// Queries vault assets at a specific block - #[instrument(target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn select_account_vault_at_block( - &self, - account_id: AccountId, - block_num: BlockNumber, - ) -> Result> { - self.transact("Get account vault at block", move |conn| { - queries::select_account_vault_at_block(conn, account_id, block_num) - }) - .await - } - /// Queries the account code by its commitment hash. /// /// Returns `None` if no code exists with that commitment. diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index d41ee09b38..29e068ba55 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -47,10 +47,7 @@ use crate::db::{AccountVaultValue, schema}; use crate::errors::DatabaseError; mod at_block; -pub(crate) use at_block::{ - select_account_header_with_storage_header_at_block, - select_account_vault_at_block, -}; +pub(crate) use at_block::select_account_header_with_storage_header_at_block; mod delta; use delta::{ diff --git a/crates/store/src/db/models/queries/accounts/at_block.rs b/crates/store/src/db/models/queries/accounts/at_block.rs index fc2ddb00e6..cfe91995cd 100644 --- a/crates/store/src/db/models/queries/accounts/at_block.rs +++ b/crates/store/src/db/models/queries/accounts/at_block.rs @@ -1,8 +1,7 @@ -use diesel::prelude::{Queryable, QueryableByName}; +use diesel::prelude::Queryable; use diesel::query_dsl::methods::SelectDsl; use diesel::{ExpressionMethods, OptionalExtension, QueryDsl, RunQueryDsl, SqliteConnection}; use miden_protocol::account::{AccountHeader, AccountId, AccountStorageHeader}; -use miden_protocol::asset::Asset; use miden_protocol::block::BlockNumber; use miden_protocol::utils::serde::{Deserializable, Serializable}; use miden_protocol::{Felt, Word}; @@ -100,65 +99,3 @@ pub(crate) fn select_account_header_with_storage_header_at_block( Ok(Some((account_header, storage_header))) } - -// ACCOUNT VAULT -// ================================================================================================ - -/// Query vault assets at a specific block by finding the most recent update for each `vault_key`. -/// -/// Uses a single raw SQL query with a subquery join: -/// ```sql -/// SELECT a.asset FROM account_vault_assets a -/// INNER JOIN ( -/// SELECT vault_key, MAX(block_num) as max_block -/// FROM account_vault_assets -/// WHERE account_id = ? AND block_num <= ? -/// GROUP BY vault_key -/// ) latest ON a.vault_key = latest.vault_key AND a.block_num = latest.max_block -/// WHERE a.account_id = ? -/// ``` -pub(crate) fn select_account_vault_at_block( - conn: &mut SqliteConnection, - account_id: AccountId, - block_num: BlockNumber, -) -> Result, DatabaseError> { - use diesel::sql_types::{BigInt, Binary}; - - let account_id_bytes = account_id.to_bytes(); - let block_num_sql = block_num.to_raw_sql(); - - let entries: Vec>> = diesel::sql_query( - r" - SELECT a.asset FROM account_vault_assets a - INNER JOIN ( - SELECT vault_key, MAX(block_num) as max_block - FROM account_vault_assets - WHERE account_id = ? AND block_num <= ? - GROUP BY vault_key - ) latest ON a.vault_key = latest.vault_key AND a.block_num = latest.max_block - WHERE a.account_id = ? - ", - ) - .bind::(&account_id_bytes) - .bind::(block_num_sql) - .bind::(&account_id_bytes) - .load::(conn)? - .into_iter() - .map(|row| row.asset) - .collect(); - - // Convert to assets, filtering out deletions (None values) - let mut assets = Vec::new(); - for asset_bytes in entries.into_iter().flatten() { - let asset = Asset::read_from_bytes(&asset_bytes)?; - assets.push(asset); - } - - Ok(assets) -} - -#[derive(QueryableByName)] -struct AssetRow { - #[diesel(sql_type = diesel::sql_types::Nullable)] - asset: Option>, -} diff --git a/crates/store/src/db/models/queries/accounts/delta/tests.rs b/crates/store/src/db/models/queries/accounts/delta/tests.rs index 1e73ab4ebe..3e82a5d9a7 100644 --- a/crates/store/src/db/models/queries/accounts/delta/tests.rs +++ b/crates/store/src/db/models/queries/accounts/delta/tests.rs @@ -41,9 +41,9 @@ use miden_standards::account::auth::AuthSingleSig; use miden_standards::code_builder::CodeBuilder; use crate::db::migrations::MIGRATIONS; +use crate::db::models::queries::accounts::tests::select_account_vault_at_block; use crate::db::models::queries::accounts::{ select_account_header_with_storage_header_at_block, - select_account_vault_at_block, select_full_account, upsert_accounts, }; diff --git a/crates/store/src/db/models/queries/accounts/tests.rs b/crates/store/src/db/models/queries/accounts/tests.rs index f660efc4f9..87a1fc5deb 100644 --- a/crates/store/src/db/models/queries/accounts/tests.rs +++ b/crates/store/src/db/models/queries/accounts/tests.rs @@ -252,6 +252,66 @@ fn assert_storage_map_slot_entries( assert_eq!(&entries, expected, "map entries mismatch"); } +/// Test helper: query vault assets at a specific block by finding the most recent +/// update for each `vault_key`. +/// +/// Uses a single raw SQL query with a subquery join: +/// ```sql +/// SELECT a.asset FROM account_vault_assets a +/// INNER JOIN ( +/// SELECT vault_key, MAX(block_num) as max_block +/// FROM account_vault_assets +/// WHERE account_id = ? AND block_num <= ? +/// GROUP BY vault_key +/// ) latest ON a.vault_key = latest.vault_key AND a.block_num = latest.max_block +/// WHERE a.account_id = ? +/// ``` +pub(super) fn select_account_vault_at_block( + conn: &mut SqliteConnection, + account_id: AccountId, + block_num: BlockNumber, +) -> Result, DatabaseError> { + use diesel::sql_types::{BigInt, Binary}; + + let account_id_bytes = account_id.to_bytes(); + let block_num_sql = block_num.to_raw_sql(); + + let entries: Vec>> = diesel::sql_query( + r" + SELECT a.asset FROM account_vault_assets a + INNER JOIN ( + SELECT vault_key, MAX(block_num) as max_block + FROM account_vault_assets + WHERE account_id = ? AND block_num <= ? + GROUP BY vault_key + ) latest ON a.vault_key = latest.vault_key AND a.block_num = latest.max_block + WHERE a.account_id = ? + ", + ) + .bind::(&account_id_bytes) + .bind::(block_num_sql) + .bind::(&account_id_bytes) + .load::(conn)? + .into_iter() + .map(|row| row.asset) + .collect(); + + // Convert to assets, filtering out deletions (None values) + let mut assets = Vec::new(); + for asset_bytes in entries.into_iter().flatten() { + let asset = Asset::read_from_bytes(&asset_bytes)?; + assets.push(asset); + } + + Ok(assets) +} + +#[derive(QueryableByName)] +struct AssetRow { + #[diesel(sql_type = diesel::sql_types::Nullable)] + asset: Option>, +} + // ACCOUNT HEADER AT BLOCK TESTS // ================================================================================================ diff --git a/crates/store/src/state/mod.rs b/crates/store/src/state/mod.rs index 8d9fe376c1..c53d7ad13e 100644 --- a/crates/store/src/state/mod.rs +++ b/crates/store/src/state/mod.rs @@ -744,11 +744,17 @@ impl State { Some(commitment) if commitment == account_header.vault_root() => { AccountVaultDetails::empty() }, - Some(_) => { - let vault_assets = - self.db.select_account_vault_at_block(account_id, block_num).await?; - AccountVaultDetails::from_assets(vault_assets) - }, + Some(_) => self + .forest + .read() + .instrument(tracing::info_span!("acquire_forest_for_vault")) + .await + .get_vault_details(account_id, block_num) + .map_err(|err| { + DatabaseError::DataCorrupted(format!( + "failed to reconstruct vault for account {account_id} at block {block_num}: {err}" + )) + })?, None => AccountVaultDetails::empty(), }; @@ -775,8 +781,11 @@ impl State { let mut storage_map_details_by_index = vec![None; storage_request_slots.len()]; if !map_keys_requests.is_empty() { - let forest_guard = - self.forest.read().instrument(tracing::info_span!("acquire_forest")).await; + let forest_guard = self + .forest + .read() + .instrument(tracing::info_span!("acquire_forest_for_storage_map")) + .await; for (index, slot_name, keys) in map_keys_requests { let details = forest_guard .get_storage_map_details_for_keys( From 8a73b9fa6f2d00d35559f99fcd959289ea8f1615 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Fri, 24 Apr 2026 10:08:56 +0200 Subject: [PATCH 04/28] feat(rpc): cache reference block commitments (#1996) --- crates/rpc/src/server/api.rs | 89 +++++++++++++++++++++++++----------- crates/rpc/src/server/mod.rs | 3 ++ crates/rpc/src/tests.rs | 59 ++++++++++++++++++++++-- 3 files changed, 119 insertions(+), 32 deletions(-) diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index 915e5643ab..7be85f0b77 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -1,3 +1,4 @@ +use std::num::NonZeroUsize; use std::sync::LazyLock; use std::time::Duration; @@ -25,6 +26,7 @@ use miden_node_utils::limiter::{ QueryParamNullifierLimit, QueryParamStorageMapKeyTotalLimit, }; +use miden_node_utils::lru_cache::LruCache; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::batch::{ProposedBatch, ProvenBatch}; use miden_protocol::block::{BlockHeader, BlockNumber}; @@ -53,6 +55,7 @@ pub struct RpcService { validator: ValidatorClient, ntx_builder: Option, genesis_commitment: Option, + block_commitment_cache: LruCache, } impl RpcService { @@ -61,6 +64,7 @@ impl RpcService { block_producer_url: Option, validator_url: Url, ntx_builder_url: Option, + commitment_cache_capacity: NonZeroUsize, ) -> Self { let store = { info!(target: COMPONENT, store_endpoint = %store_url, "Initializing store client"); @@ -124,6 +128,7 @@ impl RpcService { validator, ntx_builder, genesis_commitment: None, + block_commitment_cache: LruCache::new(commitment_cache_capacity), } } @@ -186,6 +191,52 @@ impl RpcService { } } } + + /// Returns the given block's onchain commitment. + /// + /// This is retrieved from the local LRU cache, or otherwise from the store on cache miss. + #[tracing::instrument(target = COMPONENT, name = "get_block_commitment", skip_all, fields(block.number = %block))] + async fn get_block_commitment(&self, block: BlockNumber) -> Result { + if let Some(commitment) = self.block_commitment_cache.get(&block).await { + return Ok(commitment); + } + + let header = self + .store + .clone() + .get_block_header_by_number(Request::new(proto::rpc::BlockHeaderByNumberRequest { + block_num: Some(block.as_u32()), + include_mmr_proof: false.into(), + })) + .await? + .into_inner() + .block_header + .map(BlockHeader::try_from) + .transpose()? + .ok_or_else(|| Status::invalid_argument(format!("unknown block {block}")))?; + + let commitment = header.commitment(); + self.block_commitment_cache.put(block, commitment).await; + + Ok(commitment) + } + + /// Returns an error if the provided block's commitment does not match the one on chain. + async fn verify_reference_commitment( + &self, + block: BlockNumber, + commitment: Word, + ) -> Result<(), Status> { + let onchain = self.get_block_commitment(block).await?; + + if onchain != commitment { + return Err(Status::invalid_argument(format!( + "reference block's commitment {commitment} at block {block} does not match the chain's commitment of {onchain}", + ))); + } + + Ok(()) + } } // API IMPLEMENTATION @@ -430,6 +481,10 @@ impl api_server::Api for RpcService { span.set_attribute("transaction.reference_block.number", tx.ref_block_num()); span.set_attribute("transaction.reference_block.commitment", tx.ref_block_commitment()); + // Verify the reference block is actually part of the chain. + self.verify_reference_commitment(tx.ref_block_num(), tx.ref_block_commitment()) + .await?; + // Rebuild a new ProvenTransaction with decorators removed from output notes let account_update = TxAccountUpdate::new( tx.account_id(), @@ -465,7 +520,6 @@ impl api_server::Api for RpcService { } let tx_verifier = TransactionVerifier::new(MIN_PROOF_SECURITY_LEVEL); - tx_verifier.verify(&tx).map_err(|err| { Status::invalid_argument(format!( "Invalid proof for transaction {}: {}", @@ -520,6 +574,13 @@ impl api_server::Api for RpcService { })? .ok_or(Status::invalid_argument("missing `proposed_batch` field"))?; + // Verify the reference block is actually part of the chain. + self.verify_reference_commitment( + proven_batch.reference_block_num(), + proven_batch.reference_block_commitment(), + ) + .await?; + // Perform this check here since its cheap. If this passes we can safely zip inputs and // transactions. if request.transaction_inputs.len() != proposed_batch.transactions().len() { @@ -555,32 +616,6 @@ impl api_server::Api for RpcService { return Err(Status::invalid_argument("batch proof did not match proposed batch")); } - // Verify the reference header matches the canonical chain. - let reference_header = self - .get_block_header_by_number(Request::new(proto::rpc::BlockHeaderByNumberRequest { - block_num: expected_proof.reference_block_num().as_u32().into(), - include_mmr_proof: false.into(), - })) - .await? - .into_inner() - .block_header - .map(BlockHeader::try_from) - .transpose()? - .ok_or_else(|| { - Status::invalid_argument(format!( - "unknown reference block {}", - expected_proof.reference_block_num() - )) - })?; - if reference_header.commitment() != expected_proof.reference_block_commitment() { - return Err(Status::invalid_argument(format!( - "batch reference commitment {} at block {} does not match canonical chain's commitment of {}", - expected_proof.reference_block_commitment(), - expected_proof.reference_block_num(), - reference_header.commitment() - ))); - } - // Submit each transaction to the validator. // // SAFETY: We checked earlier that the two iterators are the same length. diff --git a/crates/rpc/src/server/mod.rs b/crates/rpc/src/server/mod.rs index 2e4c1caa79..f788a83642 100644 --- a/crates/rpc/src/server/mod.rs +++ b/crates/rpc/src/server/mod.rs @@ -1,3 +1,5 @@ +use std::num::NonZeroUsize; + use accept::AcceptHeaderLayer; use anyhow::Context; use miden_node_proto::generated::rpc::api_server; @@ -48,6 +50,7 @@ impl Rpc { self.block_producer_url.clone(), self.validator_url, self.ntx_builder_url.clone(), + NonZeroUsize::new(1_000_000).unwrap(), ); let genesis = api diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index 794d9b6042..29613be0d6 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -66,7 +66,11 @@ fn build_test_account(seed: [u8; 32]) -> (Account, AccountDelta) { /// /// This uses `ExecutionProof::new_dummy()` and is intended for tests that /// need to test validation logic. -fn build_test_proven_tx(account: &Account, delta: &AccountDelta) -> ProvenTransaction { +fn build_test_proven_tx( + account: &Account, + delta: &AccountDelta, + genesis: Word, +) -> ProvenTransaction { let account_id = AccountId::dummy( [0; 15], AccountIdVersion::Version0, @@ -88,7 +92,7 @@ fn build_test_proven_tx(account: &Account, delta: &AccountDelta) -> ProvenTransa Vec::::new(), Vec::::new(), 0.into(), - Word::default(), + genesis, test_fee(), u32::MAX.into(), ExecutionProof::new_dummy(), @@ -305,7 +309,7 @@ async fn rpc_server_rejects_proven_transactions_with_invalid_commitment() { // Build a valid proven transaction let (account, account_delta) = build_test_account([0; 32]); - let tx = build_test_proven_tx(&account, &account_delta); + let tx = build_test_proven_tx(&account, &account_delta, genesis); // Create an incorrect delta commitment from a different account let (other_account, _) = build_test_account([1; 32]); @@ -338,11 +342,56 @@ async fn rpc_server_rejects_proven_transactions_with_invalid_commitment() { shutdown_store(store_runtime).await; } +#[tokio::test] +async fn rpc_server_rejects_proven_transactions_with_invalid_reference_block() { + // Start the RPC. + let (_, rpc_addr, store_listener) = start_rpc().await; + let (store_runtime, _data_directory, genesis, _store_addr) = start_store(store_listener).await; + + // Wait for the store to be ready before sending requests. + tokio::time::sleep(Duration::from_millis(100)).await; + + // Override the client so that the ACCEPT header is not set. + let mut rpc_client = + miden_node_proto::clients::Builder::new(Url::parse(&format!("http://{rpc_addr}")).unwrap()) + .without_tls() + .with_timeout(Duration::from_secs(5)) + .without_metadata_version() + .with_metadata_genesis(genesis.to_hex()) + .without_otel_context_injection() + .connect_lazy::(); + + // Build a valid proven transaction but with the incorrect hash (empty). + let invalid = Word::empty(); + let (account, account_delta) = build_test_account([0; 32]); + let tx = build_test_proven_tx(&account, &account_delta, invalid); + + let request = proto::transaction::ProvenTransaction { + transaction: tx.to_bytes(), + transaction_inputs: None, + }; + + let response = rpc_client.submit_proven_transaction(request).await; + + // Assert that the server rejected our request. + assert!(response.is_err()); + + // Rejection should be from invalid reference block. + let err = response.as_ref().unwrap_err().message(); + assert!( + err.contains("does not match the chain's commitment of"), + "expected error message to contain reference block error but got: {err}" + ); + + // Shutdown to avoid runtime drop error. + shutdown_store(store_runtime).await; +} + #[tokio::test] async fn rpc_server_rejects_tx_submissions_without_genesis() { // Start the RPC. let (_, rpc_addr, store_listener) = start_rpc().await; - let (store_runtime, _data_directory, _genesis, _store_addr) = start_store(store_listener).await; + let (store_runtime, _data_directory, genesis, _store_addr) = start_store(store_listener).await; // Override the client so that the ACCEPT header is not set. let mut rpc_client = @@ -355,7 +404,7 @@ async fn rpc_server_rejects_tx_submissions_without_genesis() { .connect_lazy::(); let (account, account_delta) = build_test_account([0; 32]); - let tx = build_test_proven_tx(&account, &account_delta); + let tx = build_test_proven_tx(&account, &account_delta, genesis); let request = proto::transaction::ProvenTransaction { transaction: tx.to_bytes(), From 9fe7ec4b81d25ad1dff017ac4141683a259af48b Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Fri, 24 Apr 2026 10:57:46 +0200 Subject: [PATCH 05/28] fix: mempool occasional panics when store is ahead by a block (#1984) --- CHANGELOG.md | 3 +- crates/block-producer/src/mempool/mod.rs | 42 ++++++++++--------- crates/block-producer/src/mempool/tests.rs | 22 ++++++---- .../src/mempool/tests/add_transaction.rs | 30 ++++++------- 4 files changed, 55 insertions(+), 42 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b120372fde..f5b934563d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,8 @@ - Optimize `GetAccount` implementation to serve vault assets from `AccountStateForest` ([#1981](https://github.com/0xMiden/node/pull/1981)). - Trace additional RPC request properties e.g. `account.id` in `GetAccount` ([#1983](https://github.com/0xMiden/node/pull/1983)). -- Added `accept`, `origin`, `user-agent`, `forwarded`, `x-forwarded-for` and `x-real-ip` headers to telemetry for gRPC requests ([#1982](https://github.com/0xMiden/node/pull/1982)). +- Added `accept`, `origin`, `user-agent`, `forwarded`, `x-forwarded-for` and `x-real-ip` headers to telemetry for gRPC requests ([#1982](https://github.com/0xMiden/node/pull/1982). +- Fixed occasional mempool panic during transaction submission, causing the lock to be held for longer than expected ([#1984](https://github.com/0xMiden/node/pull/1984)). ## v0.14.9 (2026-04-21) diff --git a/crates/block-producer/src/mempool/mod.rs b/crates/block-producer/src/mempool/mod.rs index 734cb9204d..9395a41114 100644 --- a/crates/block-producer/src/mempool/mod.rs +++ b/crates/block-producer/src/mempool/mod.rs @@ -174,7 +174,7 @@ pub struct Mempool { /// committed it is appended here, and the oldest block's state is pruned. committed_blocks: VecDeque, - chain_tip: BlockNumber, + committed_chain_tip: BlockNumber, config: MempoolConfig, subscription: subscription::SubscriptionProvider, @@ -192,7 +192,7 @@ impl Mempool { fn new(chain_tip: BlockNumber, config: MempoolConfig) -> Mempool { Self { config, - chain_tip, + committed_chain_tip: chain_tip, subscription: SubscriptionProvider::new(chain_tip), transactions: graph::TransactionGraph::default(), batches: graph::BatchGraph::default(), @@ -203,9 +203,11 @@ impl Mempool { /// Returns the current chain tip height as seen by the mempool. /// - /// This reflects the latest committed block that the block producer is aware of. + /// This includes the block currently being built, if any. pub fn chain_tip(&self) -> BlockNumber { - self.chain_tip + self.pending_block + .as_ref() + .map_or(self.committed_chain_tip, |pending| pending.block_number) } // TRANSACTION & BATCH LIFECYCLE @@ -246,7 +248,7 @@ impl Mempool { self.subscription.transaction_added(&tx); self.inject_telemetry(); - Ok(self.chain_tip) + Ok(self.committed_chain_tip) } #[instrument(target = COMPONENT, name = "mempool.add_user_batch", skip_all)] @@ -283,7 +285,7 @@ impl Mempool { } self.inject_telemetry(); - Ok(self.chain_tip) + Ok(self.committed_chain_tip) } /// Returns a set of transactions for the next batch. @@ -364,7 +366,7 @@ impl Mempool { self.pending_block.as_ref().unwrap().block_number ); - let block_number = self.chain_tip.child(); + let block_number = self.chain_tip().child(); let batches = self.batches.select_block(self.config.block_budget); let block = SelectedBlock { block_number, batches }; self.pending_block = Some(block.clone()); @@ -389,7 +391,7 @@ impl Mempool { /// Panics if there is no matching block in flight. #[instrument(target = COMPONENT, name = "mempool.commit_block", skip_all)] pub fn commit_block(&mut self, block_header: BlockHeader) { - assert_eq!(self.chain_tip.child(), block_header.block_num()); + assert_eq!(self.committed_chain_tip.child(), block_header.block_num()); let block = self .pending_block .take_if(|pending| pending.block_number == block_header.block_num()) @@ -402,7 +404,7 @@ impl Mempool { .map(miden_protocol::transaction::TransactionHeader::id) .collect(); - self.chain_tip = self.chain_tip.child(); + self.committed_chain_tip = self.committed_chain_tip.child(); self.subscription.block_committed(block_header, tx_ids); self.committed_blocks.push_back(block); @@ -542,18 +544,18 @@ impl Mempool { /// Transactions from batches are requeued. Expired transactions and their descendants are then /// reverted as well. fn revert_expired(&mut self) -> HashSet { - let batches = self.batches.revert_expired(self.chain_tip); + let batches = self.batches.revert_expired(self.chain_tip()); for batch in batches { self.transactions.requeue_transactions(&batch); } - self.transactions.revert_expired(self.chain_tip) + self.transactions.revert_expired(self.chain_tip()) } /// Rejects authentication heights that fall outside the overlap guaranteed by the locally /// retained state. /// - /// The acceptable window is `[chain_tip - state_retention + 1, chain_tip]`; values below this - /// range are rejected as stale because the mempool no longer tracks the intermediate history. + /// If our oldest local block is at `N`, then we allow `N-1` and newer since this means we're + /// covering the full blockchain. /// /// # Panics /// @@ -565,9 +567,11 @@ impl Mempool { authentication_height: BlockNumber, ) -> Result<(), MempoolSubmissionError> { let limit = self - .chain_tip - .checked_sub(self.committed_blocks.len() as u32) - .expect("number of committed blocks cannot exceed the chain tip"); + .committed_blocks + .front() + .map_or(self.chain_tip(), |block| block.block_number) + .parent() + .unwrap_or_default(); if authentication_height < limit { return Err(MempoolSubmissionError::StaleInputs { @@ -577,16 +581,16 @@ impl Mempool { } assert!( - authentication_height <= self.chain_tip, + authentication_height <= self.chain_tip(), "Authentication height {authentication_height} exceeded the chain tip {}", - self.chain_tip + self.chain_tip() ); Ok(()) } fn expiration_check(&self, expired_at: BlockNumber) -> Result<(), MempoolSubmissionError> { - let limit = self.chain_tip + self.config.expiration_slack; + let limit = self.chain_tip() + self.config.expiration_slack; if expired_at <= limit { return Err(MempoolSubmissionError::Expired { expired_at, limit }); } diff --git a/crates/block-producer/src/mempool/tests.rs b/crates/block-producer/src/mempool/tests.rs index 6a6e15f971..946891207d 100644 --- a/crates/block-producer/src/mempool/tests.rs +++ b/crates/block-producer/src/mempool/tests.rs @@ -131,31 +131,37 @@ fn failed_batch_transactions_are_requeued() { fn block_commit_reverts_expired_txns() { let (mut uut, _) = Mempool::for_tests(); uut.config.expiration_slack = 0; + let mut reference = uut.clone(); let tx_to_commit = MockProvenTxBuilder::with_account_index(0).build(); let tx_to_commit = Arc::new(AuthenticatedTransaction::from_inner(tx_to_commit)); - // Force the tx into a pending block. + // Force the tx into the next block by batching it. uut.add_transaction(tx_to_commit.clone()).unwrap(); uut.select_batch().unwrap(); uut.commit_batch(Arc::new(ProvenBatch::mocked_from_transactions([ tx_to_commit.raw_proven_transaction() ]))); - let block = uut.select_block(); - // A reverted transaction behaves as if it never existed, the current state is the expected - // outcome, plus an extra committed block at the end. - let mut reference = uut.clone(); - // Add a new transaction which will expire when the pending block is committed. + // Add a new transaction which will expire when the block is committed. let tx_to_revert = MockProvenTxBuilder::with_account_index(1) - .expiration_block_num(block.block_number) + .expiration_block_num(uut.chain_tip().child()) .build(); let tx_to_revert = Arc::new(AuthenticatedTransaction::from_inner(tx_to_revert)); uut.add_transaction(tx_to_revert).unwrap(); - // Commit the pending block which should revert the above tx. + // Create and commit the block which should revert the above tx. + let block = uut.select_block(); let arb_header = BlockHeader::mock(block.block_number, None, None, &[], Word::empty()); uut.commit_block(arb_header.clone()); + + // A reverted transaction behaves as if it never existed. + reference.add_transaction(tx_to_commit.clone()).unwrap(); + reference.select_batch().unwrap(); + reference.commit_batch(Arc::new(ProvenBatch::mocked_from_transactions([ + tx_to_commit.raw_proven_transaction() + ]))); + reference.select_block(); reference.commit_block(arb_header); assert_eq!(uut, reference); diff --git a/crates/block-producer/src/mempool/tests/add_transaction.rs b/crates/block-producer/src/mempool/tests/add_transaction.rs index dc73f317dc..6abe596572 100644 --- a/crates/block-producer/src/mempool/tests/add_transaction.rs +++ b/crates/block-producer/src/mempool/tests/add_transaction.rs @@ -79,12 +79,13 @@ mod tx_expiration { #[test] fn expiration_after_slack_limit_is_accepted() { let mut uut = setup(); - let limit = uut.chain_tip + uut.config.expiration_slack; + let limit = uut.chain_tip() + uut.config.expiration_slack; let tx = MockProvenTxBuilder::with_account_index(0) .expiration_block_num(limit.child()) .build(); - let tx = AuthenticatedTransaction::from_inner(tx).with_authentication_height(uut.chain_tip); + let tx = + AuthenticatedTransaction::from_inner(tx).with_authentication_height(uut.chain_tip()); let tx = Arc::new(tx); uut.add_transaction(tx).unwrap(); } @@ -92,14 +93,14 @@ mod tx_expiration { #[test] fn expiration_within_slack_limit_is_rejected() { let mut uut = setup(); - let limit = uut.chain_tip + uut.config.expiration_slack; + let limit = uut.chain_tip() + uut.config.expiration_slack; - for i in uut.chain_tip.child().as_u32()..=limit.as_u32() { + for i in uut.chain_tip().child().as_u32()..=limit.as_u32() { let tx = MockProvenTxBuilder::with_account_index(0) .expiration_block_num(i.into()) .build(); - let tx = - AuthenticatedTransaction::from_inner(tx).with_authentication_height(uut.chain_tip); + let tx = AuthenticatedTransaction::from_inner(tx) + .with_authentication_height(uut.chain_tip()); let tx = Arc::new(tx); let result = uut.add_transaction(tx); @@ -115,9 +116,10 @@ mod tx_expiration { fn already_expired_is_rejected() { let mut uut = setup(); let tx = MockProvenTxBuilder::with_account_index(0) - .expiration_block_num(uut.chain_tip) + .expiration_block_num(uut.chain_tip()) .build(); - let tx = AuthenticatedTransaction::from_inner(tx).with_authentication_height(uut.chain_tip); + let tx = + AuthenticatedTransaction::from_inner(tx).with_authentication_height(uut.chain_tip()); let tx = Arc::new(tx); let result = uut.add_transaction(tx); @@ -154,11 +156,11 @@ mod authentication_height { fn stale_inputs_are_rejected() { let mut uut = setup(); - let oldest_local = uut.chain_tip.as_u32() - uut.config.state_retention.get() as u32 + 1; + let oldest_mempool = uut.committed_blocks.front().map(|block| block.block_number).unwrap(); let tx = MockProvenTxBuilder::with_account_index(0).build(); let tx = AuthenticatedTransaction::from_inner(tx) - .with_authentication_height((oldest_local - 2).into()); + .with_authentication_height((oldest_mempool.as_u32() - 2).into()); let tx = Arc::new(tx); uut.add_transaction(tx).unwrap_err(); } @@ -173,7 +175,7 @@ mod authentication_height { let tx = MockProvenTxBuilder::with_account_index(0).build(); let tx = AuthenticatedTransaction::from_inner(tx) - .with_authentication_height(uut.chain_tip.child()); + .with_authentication_height(uut.chain_tip().child()); let tx = Arc::new(tx); let _ = uut.add_transaction(tx); } @@ -186,9 +188,9 @@ mod authentication_height { fn inputs_from_within_overlap_are_accepted() { let mut uut = setup(); - let oldest_local = uut.chain_tip.as_u32() - uut.config.state_retention.get() as u32 + 1; + let oldest_local = uut.chain_tip().as_u32() - uut.config.state_retention.get() as u32 + 1; - for i in oldest_local - 1..=uut.chain_tip.as_u32() { + for i in oldest_local - 1..=uut.chain_tip().as_u32() { let tx = MockProvenTxBuilder::with_account_index(i).build(); let tx = AuthenticatedTransaction::from_inner(tx).with_authentication_height(i.into()); let tx = Arc::new(tx); @@ -199,7 +201,7 @@ mod authentication_height { result, Ok(..), "Failed run with authentication height {i}, chain tip {} and oldest local {oldest_local}", - uut.chain_tip + uut.chain_tip() ); } } From 29a7c3456218767d5cf1fb76a4842c3876b1ec65 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Fri, 24 Apr 2026 11:30:31 +0200 Subject: [PATCH 06/28] feat(mempool): Avoid allocating in `SelectedBatch` query (#1991) --- crates/block-producer/src/domain/batch.rs | 21 +++++++++++++++++-- .../block-producer/src/mempool/graph/batch.rs | 11 ++-------- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/crates/block-producer/src/domain/batch.rs b/crates/block-producer/src/domain/batch.rs index 9c3cbb7e17..ca739b796b 100644 --- a/crates/block-producer/src/domain/batch.rs +++ b/crates/block-producer/src/domain/batch.rs @@ -1,4 +1,4 @@ -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::sync::Arc; use miden_protocol::Word; @@ -23,6 +23,7 @@ pub(crate) struct SelectedBatch { txs: Vec>, id: BatchId, account_updates: HashMap)>, + unauthenticated_notes: HashSet, } impl SelectedBatch { @@ -55,6 +56,10 @@ impl SelectedBatch { .map(|(account, (from, to, store))| (*account, *from, *to, *store)) } + pub(crate) fn unauthenticated_note_commitments(&self) -> impl Iterator { + self.unauthenticated_notes.iter().copied() + } + pub(crate) fn expires_at(&self) -> BlockNumber { self.txs .iter() @@ -116,6 +121,18 @@ not match the current commitment {}", let Self { txs, account_updates } = self; let id = BatchId::from_ids(txs.iter().map(|tx| (tx.id(), tx.account_id()))); - SelectedBatch { txs, id, account_updates } + let mut unauthenticated_notes: HashSet<_> = + txs.iter().flat_map(|tx| tx.unauthenticated_note_commitments()).collect(); + + for output_note in txs.iter().flat_map(|tx| tx.output_note_commitments()) { + unauthenticated_notes.remove(&output_note); + } + + SelectedBatch { + txs, + id, + account_updates, + unauthenticated_notes, + } } } diff --git a/crates/block-producer/src/mempool/graph/batch.rs b/crates/block-producer/src/mempool/graph/batch.rs index e46cb6e563..a3200de168 100644 --- a/crates/block-producer/src/mempool/graph/batch.rs +++ b/crates/block-producer/src/mempool/graph/batch.rs @@ -1,4 +1,4 @@ -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use std::sync::Arc; use miden_protocol::Word; @@ -29,14 +29,7 @@ impl GraphNode for SelectedBatch { } fn unauthenticated_notes(&self) -> Box + '_> { - // Filter notes that are produced within this batch. - let output_notes: HashSet = self.output_notes().collect(); - Box::new( - self.transactions() - .iter() - .flat_map(|tx| tx.unauthenticated_note_commitments()) - .filter(move |note| !output_notes.contains(note)), - ) + Box::new(self.unauthenticated_note_commitments()) } fn account_updates( From 046a728ea4ae7d2df66f0cdb9fd0da42bfa92c48 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Fri, 24 Apr 2026 12:57:21 +0200 Subject: [PATCH 07/28] ci: cache rust builds on `main` (#2000) --- .github/workflows/ci.yml | 50 +++++++++++++++++++++------------------- 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cf3ceddf67..bd265dd935 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -27,6 +27,8 @@ env: # # This provides a convenient way to evict old or corrupted cache. RUST_CACHE_KEY: rust-cache-2026.02.02 + # Shared branch-aware cache namespace for rust-cache. + RUST_CACHE_SHARED_KEY: ${{ github.workflow }}-build-${{ github.base_ref || github.ref_name }} # Reduce cache usage by removing debug information. CARGO_PROFILE_DEV_DEBUG: 0 @@ -53,9 +55,9 @@ jobs: run: rustup update --no-self-update - uses: Swatinem/rust-cache@v2 with: - shared-key: ${{ github.workflow }}-build + shared-key: ${{ env.RUST_CACHE_SHARED_KEY }} prefix-key: ${{ env.RUST_CACHE_KEY }} - save-if: ${{ github.ref == 'refs/heads/next' }} + save-if: ${{ github.ref == 'refs/heads/next' || github.ref == 'refs/heads/main' }} - name: cargo build run: cargo build --workspace --all-targets --locked - name: Check static linkage @@ -106,7 +108,7 @@ jobs: run: rustup update --no-self-update - uses: Swatinem/rust-cache@v2 with: - shared-key: ${{ github.workflow }}-build + shared-key: ${{ env.RUST_CACHE_SHARED_KEY }} prefix-key: ${{ env.RUST_CACHE_KEY }} save-if: false - name: clippy @@ -125,7 +127,7 @@ jobs: tool: nextest@0.9.122 - uses: Swatinem/rust-cache@v2 with: - shared-key: ${{ github.workflow }}-build + shared-key: ${{ env.RUST_CACHE_SHARED_KEY }} prefix-key: ${{ env.RUST_CACHE_KEY }} save-if: false - name: Build tests @@ -146,7 +148,7 @@ jobs: run: rustup update --no-self-update - uses: Swatinem/rust-cache@v2 with: - shared-key: ${{ github.workflow }}-build + shared-key: ${{ env.RUST_CACHE_SHARED_KEY }} prefix-key: ${{ env.RUST_CACHE_KEY }} save-if: false - name: Build docs @@ -166,7 +168,7 @@ jobs: run: rustup update --no-self-update - uses: Swatinem/rust-cache@v2 with: - shared-key: ${{ github.workflow }}-build + shared-key: ${{ env.RUST_CACHE_SHARED_KEY }} prefix-key: ${{ env.RUST_CACHE_KEY }} save-if: false - uses: taiki-e/install-action@v2 @@ -181,12 +183,12 @@ jobs: cargo run --bin miden-node-stress-test seed-store \ --data-directory ${{ env.DATA_DIR }} \ --num-accounts 500 --public-accounts-percentage 50 - # TODO re-introduce + # TODO re-introduce # - name: Benchmark state sync - # run: | - # cargo run --bin miden-node-stress-test benchmark-store \ - # --data-directory ${{ env.DATA_DIR }} \ - # --iterations 10 --concurrency 1 sync-state + # run: | + # cargo run --bin miden-node-stress-test benchmark-store \ + # --data-directory ${{ env.DATA_DIR }} \ + # --iterations 10 --concurrency 1 sync-state - name: Benchmark notes sync run: | cargo run --bin miden-node-stress-test benchmark-store \ @@ -210,19 +212,19 @@ jobs: name: wasm targets runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@v6 - - name: Rustup - run: rustup update --no-self-update - - name: cargo build - run: | - cargo build --locked -p miden-remote-prover-client \ - --target wasm32-unknown-unknown --no-default-features \ - --features batch-prover,block-prover,tx-prover # no-std compatible build - - name: clippy - run: | - cargo clippy --locked -p miden-remote-prover-client \ - --target wasm32-unknown-unknown --no-default-features \ - --features batch-prover,block-prover,tx-prover -- -D warnings + - uses: actions/checkout@v6 + - name: Rustup + run: rustup update --no-self-update + - name: cargo build + run: | + cargo build --locked -p miden-remote-prover-client \ + --target wasm32-unknown-unknown --no-default-features \ + --features batch-prover,block-prover,tx-prover # no-std compatible build + - name: clippy + run: | + cargo clippy --locked -p miden-remote-prover-client \ + --target wasm32-unknown-unknown --no-default-features \ + --features batch-prover,block-prover,tx-prover -- -D warnings # =============================================================================================== # Jobs that don't require caching to be efficient From 44775689515dd56cd6624abe97848c17e70d6215 Mon Sep 17 00:00:00 2001 From: KOVACS Krisztian Date: Fri, 24 Apr 2026 13:39:19 +0200 Subject: [PATCH 08/28] fix(github): remove prover proxy job from debian publish workflow (#2001) --- .github/workflows/publish-debian-all.yml | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/.github/workflows/publish-debian-all.yml b/.github/workflows/publish-debian-all.yml index 3aea36b5c9..1b5ccdb742 100644 --- a/.github/workflows/publish-debian-all.yml +++ b/.github/workflows/publish-debian-all.yml @@ -67,29 +67,6 @@ jobs: crate: miden-remote-prover arch: ${{ matrix.arch }} - publish-prover-proxy: - name: Publish Prover Proxy ${{ matrix.arch }} Debian - strategy: - matrix: - arch: [amd64, arm64] - runs-on: - labels: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-24.04' }} - steps: - - name: Checkout repo - uses: actions/checkout@main - with: - fetch-depth: 0 - - name: Build and Publish Prover Proxy - uses: ./.github/actions/debian - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - gitref: ${{ env.version }} - crate_dir: remote-prover - package: miden-prover-proxy - packaging_dir: prover-proxy - crate: miden-remote-prover - arch: ${{ matrix.arch }} - publish-network-monitor: name: Publish Network Monitor ${{ matrix.arch }} Debian strategy: From 4ac37ea4eb613b77ba062ebbb82af01cad411c11 Mon Sep 17 00:00:00 2001 From: KOVACS Krisztian Date: Fri, 24 Apr 2026 14:01:14 +0200 Subject: [PATCH 09/28] fix(docker): use correct runtime-base image (#1999) --- bin/node/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/node/Dockerfile b/bin/node/Dockerfile index 04cb6783cd..5986451a28 100644 --- a/bin/node/Dockerfile +++ b/bin/node/Dockerfile @@ -28,7 +28,7 @@ COPY . . RUN cargo build --release --locked --bin miden-node # Base line runtime image with runtime dependencies installed. -FROM debian:bullseye-slim AS runtime-base +FROM debian:bookworm-slim AS runtime-base RUN apt-get update && \ apt-get -y upgrade && \ apt-get install -y --no-install-recommends sqlite3 \ From a44d980470f4b3a9250e752d0e0c6698d89aeeaa Mon Sep 17 00:00:00 2001 From: KOVACS Krisztian Date: Fri, 24 Apr 2026 14:37:29 +0200 Subject: [PATCH 10/28] perf(db): disable SQLite memory accounting (#1988) Co-authored-by: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> --- .cargo/config.toml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.cargo/config.toml b/.cargo/config.toml index 2e07606d52..003c52b3df 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,2 +1,9 @@ [target.wasm32-unknown-unknown] rustflags = ['--cfg', 'getrandom_backend="wasm_js"'] + +[env] +# These compile-time flags are used to disable SQLite's memory +# accounting features. These introduce global mutexes into SQLite's +# memory allocator which can cause contention and performance +# degradation in high-concurrency scenarios we have in the node. +LIBSQLITE3_FLAGS = "-USQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_DEFAULT_MEMSTATUS=0" From 7d4c33f498f5270b7a2d3ca2f83a1ca7bc3f0f7a Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Tue, 28 Apr 2026 09:59:10 +0200 Subject: [PATCH 11/28] ci: optimise for test runtime execution (#2004) --- .github/workflows/ci.yml | 84 +++++++++++++++++++++++++++++++--------- Cargo.toml | 19 +++++++-- 2 files changed, 81 insertions(+), 22 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bd265dd935..8ed5c6972d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,6 +1,6 @@ # Continuous integration jobs. # -# These get run on every pull-request, with github cache updated on push into `next`. +# These get run on every pull-request. name: CI permissions: @@ -29,6 +29,10 @@ env: RUST_CACHE_KEY: rust-cache-2026.02.02 # Shared branch-aware cache namespace for rust-cache. RUST_CACHE_SHARED_KEY: ${{ github.workflow }}-build-${{ github.base_ref || github.ref_name }} + # Per-run cache used to share build outputs across jobs within a single workflow run. + RUN_CACHE_KEY: ${{ github.workflow }}-run-${{ github.run_id }} + # Match rust-cache's compilation mode so restored outputs stay reusable downstream. + CARGO_INCREMENTAL: 0 # Reduce cache usage by removing debug information. CARGO_PROFILE_DEV_DEBUG: 0 @@ -42,7 +46,8 @@ jobs: # Conventional builds, lints and tests that re-use a single cache for efficiency # =============================================================================================== - # Normal cargo build that populates a cache for all subsequent jobs to re-use. + # Normal cargo build that maintains a persistent trunk cache and publishes a + # single per-run cache for downstream jobs to restore immediately. build: runs-on: ubuntu-24.04 steps: @@ -97,6 +102,14 @@ jobs: fi done echo "Static linkage check passed for all of ${bin_targets[@]}" + - name: Save run cache + uses: actions/cache/save@v4 + with: + key: ${{ env.RUN_CACHE_KEY }} + path: | + target + ~/.cargo/registry + ~/.cargo/git clippy: name: lint - clippy @@ -106,11 +119,14 @@ jobs: - uses: actions/checkout@v6 - name: Rustup run: rustup update --no-self-update - - uses: Swatinem/rust-cache@v2 + - name: Restore run cache + uses: actions/cache/restore@v4 with: - shared-key: ${{ env.RUST_CACHE_SHARED_KEY }} - prefix-key: ${{ env.RUST_CACHE_KEY }} - save-if: false + key: ${{ env.RUN_CACHE_KEY }} + path: | + target + ~/.cargo/registry + ~/.cargo/git - name: clippy run: cargo clippy --locked --all-targets --all-features --workspace -- -D warnings @@ -125,11 +141,14 @@ jobs: - uses: taiki-e/install-action@v2 with: tool: nextest@0.9.122 - - uses: Swatinem/rust-cache@v2 + - name: Restore run cache + uses: actions/cache/restore@v4 with: - shared-key: ${{ env.RUST_CACHE_SHARED_KEY }} - prefix-key: ${{ env.RUST_CACHE_KEY }} - save-if: false + key: ${{ env.RUN_CACHE_KEY }} + path: | + target + ~/.cargo/registry + ~/.cargo/git - name: Build tests run: cargo nextest run --all-features --workspace --no-run - name: Run tests @@ -146,11 +165,14 @@ jobs: uses: ./.github/actions/cleanup-runner - name: Rustup run: rustup update --no-self-update - - uses: Swatinem/rust-cache@v2 + - name: Restore run cache + uses: actions/cache/restore@v4 with: - shared-key: ${{ env.RUST_CACHE_SHARED_KEY }} - prefix-key: ${{ env.RUST_CACHE_KEY }} - save-if: false + key: ${{ env.RUN_CACHE_KEY }} + path: | + target + ~/.cargo/registry + ~/.cargo/git - name: Build docs run: cargo doc --no-deps --workspace --all-features --locked @@ -166,11 +188,14 @@ jobs: - uses: actions/checkout@v6 - name: Rustup run: rustup update --no-self-update - - uses: Swatinem/rust-cache@v2 + - name: Restore run cache + uses: actions/cache/restore@v4 with: - shared-key: ${{ env.RUST_CACHE_SHARED_KEY }} - prefix-key: ${{ env.RUST_CACHE_KEY }} - save-if: false + key: ${{ env.RUN_CACHE_KEY }} + path: | + target + ~/.cargo/registry + ~/.cargo/git - uses: taiki-e/install-action@v2 with: tool: nextest@0.9.122 @@ -200,6 +225,29 @@ jobs: --data-directory ${{ env.DATA_DIR }} \ --iterations 10 --concurrency 1 sync-nullifiers --prefixes 10 + cleanup-run-cache: + name: cleanup run cache + runs-on: ubuntu-24.04 + if: ${{ always() }} + needs: + - build + - clippy + - tests + - doc + - stress-test + permissions: + actions: write + contents: read + steps: + - name: Delete run cache + env: + GH_TOKEN: ${{ github.token }} + run: | + gh api \ + --method DELETE \ + -H "Accept: application/vnd.github+json" \ + "/repos/${{ github.repository }}/actions/caches?key=${{ env.RUN_CACHE_KEY }}" + # =============================================================================================== # WASM related jobs # =============================================================================================== diff --git a/Cargo.toml b/Cargo.toml index 567ca37ca9..949541aa3d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,13 +34,24 @@ repository = "https://github.com/0xMiden/node" rust-version = "1.93" version = "0.14.9" -# Optimize the cryptography for faster tests involving account creation. -[profile.test.package.miden-crypto] -opt-level = 2 - [profile.release] debug = true +# Optimise for test execution speed. +# +# Proving in particular is extremely slow without this. +# +# Unfortunately, we cannot simply optimise specific dependencies because at the +# moment the slow code is monomorphised only at the final executable stage, +# which means it inherits the binaries optimisation level. +# +# This uses the dev profile because we share a single build in CI for all jobs so this +# cannot be set for test only. +[profile.dev.package."*"] +opt-level = 2 +[profile.dev.package.miden-remote-prover] +opt-level = 2 + [workspace.dependencies] # Workspace crates. miden-large-smt-backend-rocksdb = { path = "crates/large-smt-backend-rocksdb", version = "0.14" } From dd6556a6dcbaed47dd96c0cfa2dd4e8799b86255 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Tue, 28 Apr 2026 11:57:46 +0200 Subject: [PATCH 12/28] ci: fix profile overrides not observed by cache (#2009) --- .cargo/config.toml | 21 +++++++++++++++++++++ .github/workflows/ci.yml | 1 + Cargo.toml | 18 ------------------ 3 files changed, 22 insertions(+), 18 deletions(-) diff --git a/.cargo/config.toml b/.cargo/config.toml index 003c52b3df..fd6fa0925a 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -7,3 +7,24 @@ rustflags = ['--cfg', 'getrandom_backend="wasm_js"'] # memory allocator which can cause contention and performance # degradation in high-concurrency scenarios we have in the node. LIBSQLITE3_FLAGS = "-USQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_DEFAULT_MEMSTATUS=0" + +[profile.release] +debug = true + +# Optimise for test execution speed. These overrides live here so that CI +# rust-cache picks up on them as part of its key hash. Changes in Cargo.toml +# are ignored because they're part of a virtual manifest and not a package per se. +# +# Proving in particular is extremely slow without this. +# +# Unfortunately, we cannot simply optimise specific dependencies because at the +# moment the slow code is monomorphised only at the final executable stage, +# which means it inherits the binaries optimisation level. +# +# This uses the dev profile because we share a single build in CI for all jobs so this +# cannot be set for test only. +[profile.dev.package."*"] +opt-level = 2 + +[profile.dev.package.miden-remote-prover] +opt-level = 2 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8ed5c6972d..52fb35fd19 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -62,6 +62,7 @@ jobs: with: shared-key: ${{ env.RUST_CACHE_SHARED_KEY }} prefix-key: ${{ env.RUST_CACHE_KEY }} + cache-workspace-crates: true save-if: ${{ github.ref == 'refs/heads/next' || github.ref == 'refs/heads/main' }} - name: cargo build run: cargo build --workspace --all-targets --locked diff --git a/Cargo.toml b/Cargo.toml index 949541aa3d..b8024e0eb8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,24 +34,6 @@ repository = "https://github.com/0xMiden/node" rust-version = "1.93" version = "0.14.9" -[profile.release] -debug = true - -# Optimise for test execution speed. -# -# Proving in particular is extremely slow without this. -# -# Unfortunately, we cannot simply optimise specific dependencies because at the -# moment the slow code is monomorphised only at the final executable stage, -# which means it inherits the binaries optimisation level. -# -# This uses the dev profile because we share a single build in CI for all jobs so this -# cannot be set for test only. -[profile.dev.package."*"] -opt-level = 2 -[profile.dev.package.miden-remote-prover] -opt-level = 2 - [workspace.dependencies] # Workspace crates. miden-large-smt-backend-rocksdb = { path = "crates/large-smt-backend-rocksdb", version = "0.14" } From d3c030e5c5f0d97586e0eee2c2d2d318a2aa9fed Mon Sep 17 00:00:00 2001 From: Serge Radinovich <47865535+sergerad@users.noreply.github.com> Date: Tue, 28 Apr 2026 23:50:24 +1200 Subject: [PATCH 13/28] chore: Split apply_block and improve instrumentation (#1896) Co-authored-by: Bobbin Threadbare <43513081+bobbinth@users.noreply.github.com> Co-authored-by: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> --- crates/db/src/lib.rs | 11 +- crates/store/src/db/mod.rs | 9 +- crates/store/src/state/apply_block.rs | 326 +++++++++++++++----------- 3 files changed, 201 insertions(+), 145 deletions(-) diff --git a/crates/db/src/lib.rs b/crates/db/src/lib.rs index 7000f131d1..0afc56a1a5 100644 --- a/crates/db/src/lib.rs +++ b/crates/db/src/lib.rs @@ -46,10 +46,13 @@ impl Db { .await .map_err(|e| DatabaseError::ConnectionPoolObtainError(Box::new(e)))?; - conn.interact(|conn| <_ as diesel::Connection>::transaction::(conn, query)) - .in_current_span() - .await - .map_err(|err| E::from(DatabaseError::interact(&msg.to_string(), &err)))? + let span = tracing::Span::current(); + conn.interact(move |conn| { + let _guard = span.enter(); + <_ as diesel::Connection>::transaction::(conn, query) + }) + .await + .map_err(|err| E::from(DatabaseError::interact(&msg.to_string(), &err)))? } /// Run the query _without_ a transaction diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 93dec452e6..1f26619374 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -548,12 +548,17 @@ impl Db { // XXX FIXME TODO free floating mutex MUST NOT exist // it doesn't bind it properly to the data locked! - if allow_acquire.send(()).is_err() { - tracing::warn!(target: COMPONENT, "failed to send notification for successful block application, potential deadlock"); + { + let _span = tracing::info_span!(target: COMPONENT, "acquire_write_lock").entered(); + if allow_acquire.send(()).is_err() { + tracing::warn!(target: COMPONENT, "failed to send notification for successful block application, potential deadlock"); + } } models::queries::prune_history(conn, signed_block.header().block_num())?; + let _span = + tracing::info_span!(target: COMPONENT, "acquire_done_lock").entered(); acquire_done.blocking_recv()?; Ok(()) diff --git a/crates/store/src/state/apply_block.rs b/crates/store/src/state/apply_block.rs index d8aab21e70..00cfb96c8c 100644 --- a/crates/store/src/state/apply_block.rs +++ b/crates/store/src/state/apply_block.rs @@ -2,9 +2,12 @@ use std::sync::Arc; use miden_node_proto::BlockProofRequest; use miden_node_utils::ErrorReport; +use miden_protocol::Word; use miden_protocol::account::delta::AccountUpdateDetails; -use miden_protocol::block::SignedBlock; -use miden_protocol::note::NoteDetails; +use miden_protocol::block::account_tree::AccountMutationSet; +use miden_protocol::block::nullifier_tree::NullifierMutationSet; +use miden_protocol::block::{BlockBody, BlockHeader, SignedBlock}; +use miden_protocol::note::{NoteDetails, Nullifier}; use miden_protocol::transaction::OutputNote; use miden_protocol::utils::serde::Serializable; use tokio::sync::oneshot; @@ -44,7 +47,6 @@ impl State { /// /// Returns an error if `proving_inputs` is `None` and the block is not the genesis block. // TODO: This span is logged in a root span, we should connect it to the parent span. - #[expect(clippy::too_many_lines)] #[instrument(target = COMPONENT, skip_all, err)] pub async fn apply_block( &self, @@ -56,36 +58,10 @@ impl State { let header = signed_block.header(); let body = signed_block.body(); - // Validate that header and body match. - let tx_commitment = body.transactions().commitment(); - if header.tx_commitment() != tx_commitment { - return Err(InvalidBlockError::InvalidBlockTxCommitment { - expected: tx_commitment, - actual: header.tx_commitment(), - } - .into()); - } - let block_num = header.block_num(); let block_commitment = header.commitment(); - // Validate that the applied block is the next block in sequence. - let prev_block = self - .db - .select_block_header_by_block_num(None) - .await? - .ok_or(ApplyBlockError::DbBlockHeaderEmpty)?; - let expected_block_num = prev_block.block_num().child(); - if block_num != expected_block_num { - return Err(InvalidBlockError::NewBlockInvalidBlockNum { - expected: expected_block_num, - submitted: block_num, - } - .into()); - } - if header.prev_block_commitment() != prev_block.commitment() { - return Err(InvalidBlockError::NewBlockInvalidPrevCommitment.into()); - } + self.validate_block_header(header, body).await?; // Save the block to the block store. In a case of a rolled-back DB transaction, the // in-memory state will be unchanged, but the block might still be written into the @@ -98,117 +74,14 @@ impl State { async move { store.save_block(block_num, &signed_block_bytes).await }.in_current_span(), ); - // Scope to read in-memory data, compute mutations required for updating account - // and nullifier trees, and validate the request. let ( nullifier_tree_old_root, nullifier_tree_update, account_tree_old_root, account_tree_update, - ) = { - let inner = self.inner.read().await; - - let _span = info_span!(target: COMPONENT, "update_in_memory_structs").entered(); - - // nullifiers can be produced only once - let duplicate_nullifiers: Vec<_> = body - .created_nullifiers() - .iter() - .filter(|&nullifier| inner.nullifier_tree.get_block_num(nullifier).is_some()) - .copied() - .collect(); - if !duplicate_nullifiers.is_empty() { - return Err(InvalidBlockError::DuplicatedNullifiers(duplicate_nullifiers).into()); - } + ) = self.compute_tree_mutations(header, body).await?; - // compute updates for the in-memory data structures - - // new_block.chain_root must be equal to the chain MMR root prior to the update - let peaks = inner.blockchain.peaks(); - if peaks.hash_peaks() != header.chain_commitment() { - return Err(InvalidBlockError::NewBlockInvalidChainCommitment.into()); - } - - // compute update for nullifier tree - let nullifier_tree_update = inner - .nullifier_tree - .compute_mutations( - body.created_nullifiers().iter().map(|nullifier| (*nullifier, block_num)), - ) - .map_err(InvalidBlockError::NewBlockNullifierAlreadySpent)?; - - if nullifier_tree_update.as_mutation_set().root() != header.nullifier_root() { - // We do our best here to notify the serve routine, if it doesn't care (dropped the - // receiver) we can't do much. - let _ = self.termination_ask.try_send(ApplyBlockError::InvalidBlockError( - InvalidBlockError::NewBlockInvalidNullifierRoot, - )); - return Err(InvalidBlockError::NewBlockInvalidNullifierRoot.into()); - } - - // compute update for account tree - let account_tree_update = inner - .account_tree - .compute_mutations( - body.updated_accounts() - .iter() - .map(|update| (update.account_id(), update.final_state_commitment())), - ) - .map_err(|e| match e { - HistoricalError::AccountTreeError(err) => { - InvalidBlockError::NewBlockDuplicateAccountIdPrefix(err) - }, - HistoricalError::MerkleError(_) => { - panic!("Unexpected MerkleError during account tree mutation computation") - }, - })?; - - if account_tree_update.as_mutation_set().root() != header.account_root() { - let _ = self.termination_ask.try_send(ApplyBlockError::InvalidBlockError( - InvalidBlockError::NewBlockInvalidAccountRoot, - )); - return Err(InvalidBlockError::NewBlockInvalidAccountRoot.into()); - } - - ( - inner.nullifier_tree.root(), - nullifier_tree_update, - inner.account_tree.root_latest(), - account_tree_update, - ) - }; - - // Build note tree. - let note_tree = body.compute_block_note_tree(); - if note_tree.root() != header.note_root() { - return Err(InvalidBlockError::NewBlockInvalidNoteRoot.into()); - } - - let notes = body - .output_notes() - .map(|(note_index, note)| { - let (details, nullifier) = match note { - OutputNote::Public(note) => { - (Some(NoteDetails::from(note.as_note())), Some(note.as_note().nullifier())) - }, - OutputNote::Private(_) => (None, None), - }; - - let inclusion_path = note_tree.open(note_index); - - let note_record = NoteRecord { - block_num, - note_index, - note_id: note.id().as_word(), - note_commitment: note.to_commitment(), - metadata: note.metadata().clone(), - details, - inclusion_path, - }; - - Ok((note_record, nullifier)) - }) - .collect::, InvalidBlockError>>()?; + let notes = Self::build_note_records(header, body)?; // Signals the transaction is ready to be committed, and the write lock can be acquired. let (allow_acquire, acquired_allowed) = oneshot::channel::<()>(); @@ -238,8 +111,12 @@ impl State { .in_current_span(), ); - // Wait for the message from the DB update task, that we ready to commit the DB transaction. - acquired_allowed.await.map_err(ApplyBlockError::ClosedChannel)?; + // Wait for the message from the DB update task, that we ready to commit the DB + // transaction. + acquired_allowed + .instrument(info_span!(target: COMPONENT, "await_db_readiness")) + .await + .map_err(ApplyBlockError::ClosedChannel)?; // Awaiting the block saving task to complete without errors. block_save_task.await??; @@ -249,7 +126,11 @@ impl State { // We need to hold the write lock here to prevent inconsistency between the in-memory // state and the DB state. Thus, we need to wait for the DB update task to complete // successfully. - let mut inner = self.inner.write().await; + let mut inner = self + .inner + .write() + .instrument(info_span!(target: COMPONENT, "acquire_inner_write_lock")) + .await; // We need to check that neither the nullifier tree nor the account tree have changed // while we were waiting for the DB preparation task to complete. If either of them @@ -292,10 +173,177 @@ impl State { .in_current_span() .await?; - self.forest.write().await.apply_block_updates(block_num, account_deltas)?; + self.forest + .write() + .instrument(info_span!(target: COMPONENT, "acquire_forest_write_lock")) + .await + .apply_block_updates(block_num, account_deltas)?; info!(%block_commitment, block_num = block_num.as_u32(), COMPONENT, "apply_block successful"); Ok(()) } + + /// Validates that the block header is consistent with the block body and the current state. + #[instrument(target = COMPONENT, skip_all, err)] + async fn validate_block_header( + &self, + header: &BlockHeader, + body: &BlockBody, + ) -> Result<(), ApplyBlockError> { + // Validate that header and body match. + let tx_commitment = body.transactions().commitment(); + if header.tx_commitment() != tx_commitment { + return Err(InvalidBlockError::InvalidBlockTxCommitment { + expected: tx_commitment, + actual: header.tx_commitment(), + } + .into()); + } + + let block_num = header.block_num(); + + // Validate that the applied block is the next block in sequence. + let prev_block = self + .db + .select_block_header_by_block_num(None) + .await? + .ok_or(ApplyBlockError::DbBlockHeaderEmpty)?; + let expected_block_num = prev_block.block_num().child(); + if block_num != expected_block_num { + return Err(InvalidBlockError::NewBlockInvalidBlockNum { + expected: expected_block_num, + submitted: block_num, + } + .into()); + } + if header.prev_block_commitment() != prev_block.commitment() { + return Err(InvalidBlockError::NewBlockInvalidPrevCommitment.into()); + } + + Ok(()) + } + + /// Computes nullifier and account tree mutations, validating roots against the block header. + #[instrument(target = COMPONENT, skip_all, err)] + async fn compute_tree_mutations( + &self, + header: &BlockHeader, + body: &BlockBody, + ) -> Result<(Word, NullifierMutationSet, Word, AccountMutationSet), ApplyBlockError> { + let inner = self + .inner + .read() + .instrument(info_span!(target: COMPONENT, "acquire_inner_read_lock")) + .await; + + let block_num = header.block_num(); + + // nullifiers can be produced only once + let duplicate_nullifiers: Vec<_> = body + .created_nullifiers() + .iter() + .filter(|&nullifier| inner.nullifier_tree.get_block_num(nullifier).is_some()) + .copied() + .collect(); + if !duplicate_nullifiers.is_empty() { + return Err(InvalidBlockError::DuplicatedNullifiers(duplicate_nullifiers).into()); + } + + // new_block.chain_root must be equal to the chain MMR root prior to the update + let peaks = inner.blockchain.peaks(); + if peaks.hash_peaks() != header.chain_commitment() { + return Err(InvalidBlockError::NewBlockInvalidChainCommitment.into()); + } + + // compute update for nullifier tree + let nullifier_tree_update = inner + .nullifier_tree + .compute_mutations( + body.created_nullifiers().iter().map(|nullifier| (*nullifier, block_num)), + ) + .map_err(InvalidBlockError::NewBlockNullifierAlreadySpent)?; + + if nullifier_tree_update.as_mutation_set().root() != header.nullifier_root() { + // We do our best here to notify the serve routine, if it doesn't care (dropped the + // receiver) we can't do much. + let _ = self.termination_ask.try_send(ApplyBlockError::InvalidBlockError( + InvalidBlockError::NewBlockInvalidNullifierRoot, + )); + return Err(InvalidBlockError::NewBlockInvalidNullifierRoot.into()); + } + + // compute update for account tree + let account_tree_update = inner + .account_tree + .compute_mutations( + body.updated_accounts() + .iter() + .map(|update| (update.account_id(), update.final_state_commitment())), + ) + .map_err(|e| match e { + HistoricalError::AccountTreeError(err) => { + InvalidBlockError::NewBlockDuplicateAccountIdPrefix(err) + }, + HistoricalError::MerkleError(_) => { + panic!("Unexpected MerkleError during account tree mutation computation") + }, + })?; + + if account_tree_update.as_mutation_set().root() != header.account_root() { + let _ = self.termination_ask.try_send(ApplyBlockError::InvalidBlockError( + InvalidBlockError::NewBlockInvalidAccountRoot, + )); + return Err(InvalidBlockError::NewBlockInvalidAccountRoot.into()); + } + + Ok(( + inner.nullifier_tree.root(), + nullifier_tree_update, + inner.account_tree.root_latest(), + account_tree_update, + )) + } + + /// Builds note records with inclusion proofs from the block body. + #[instrument(target = COMPONENT, skip_all, err)] + fn build_note_records( + header: &BlockHeader, + body: &BlockBody, + ) -> Result)>, ApplyBlockError> { + let block_num = header.block_num(); + + let note_tree = body.compute_block_note_tree(); + if note_tree.root() != header.note_root() { + return Err(InvalidBlockError::NewBlockInvalidNoteRoot.into()); + } + + let notes = body + .output_notes() + .map(|(note_index, note)| { + let (details, nullifier) = match note { + OutputNote::Public(note) => { + (Some(NoteDetails::from(note.as_note())), Some(note.as_note().nullifier())) + }, + OutputNote::Private(_) => (None, None), + }; + + let inclusion_path = note_tree.open(note_index); + + let note_record = NoteRecord { + block_num, + note_index, + note_id: note.id().as_word(), + note_commitment: note.to_commitment(), + metadata: note.metadata().clone(), + details, + inclusion_path, + }; + + Ok((note_record, nullifier)) + }) + .collect::, InvalidBlockError>>()?; + + Ok(notes) + } } From bb74d9d9b0168bfc99e45e409ec65f7de108674a Mon Sep 17 00:00:00 2001 From: KOVACS Krisztian Date: Wed, 29 Apr 2026 11:15:17 +0200 Subject: [PATCH 14/28] refactor(lru_cache): simplify locking in LRU cache wrapper (#2015) --- crates/ntx-builder/src/actor/execute.rs | 6 +++--- crates/rpc/src/server/api.rs | 4 ++-- crates/utils/src/lru_cache.rs | 18 ++++++++++-------- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/crates/ntx-builder/src/actor/execute.rs b/crates/ntx-builder/src/actor/execute.rs index 9718056eea..84a357a786 100644 --- a/crates/ntx-builder/src/actor/execute.rs +++ b/crates/ntx-builder/src/actor/execute.rs @@ -566,7 +566,7 @@ impl DataStore for NtxDataStore { ) -> impl FutureMaybeSend, DataStoreError>> { async move { // 1. In-memory LRU cache. - if let Some(cached_script) = self.script_cache.get(&script_root).await { + if let Some(cached_script) = self.script_cache.get(&script_root) { return Ok(Some(cached_script)); } @@ -574,7 +574,7 @@ impl DataStore for NtxDataStore { if let Some(script) = self.db.lookup_note_script(script_root).await.map_err(|err| { DataStoreError::other_with_source("failed to look up note script in local DB", err) })? { - self.script_cache.put(script_root, script.clone()).await; + self.script_cache.put(script_root, script.clone()); return Ok(Some(script)); } @@ -590,7 +590,7 @@ impl DataStore for NtxDataStore { if let Some(script) = maybe_script { // Collect for later persistence by the coordinator. self.fetched_scripts.lock().await.push((script_root, script.clone())); - self.script_cache.put(script_root, script.clone()).await; + self.script_cache.put(script_root, script.clone()); Ok(Some(script)) } else { Ok(None) diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index 7be85f0b77..e1b7eef685 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -197,7 +197,7 @@ impl RpcService { /// This is retrieved from the local LRU cache, or otherwise from the store on cache miss. #[tracing::instrument(target = COMPONENT, name = "get_block_commitment", skip_all, fields(block.number = %block))] async fn get_block_commitment(&self, block: BlockNumber) -> Result { - if let Some(commitment) = self.block_commitment_cache.get(&block).await { + if let Some(commitment) = self.block_commitment_cache.get(&block) { return Ok(commitment); } @@ -216,7 +216,7 @@ impl RpcService { .ok_or_else(|| Status::invalid_argument(format!("unknown block {block}")))?; let commitment = header.commitment(); - self.block_commitment_cache.put(block, commitment).await; + self.block_commitment_cache.put(block, commitment); Ok(commitment) } diff --git a/crates/utils/src/lru_cache.rs b/crates/utils/src/lru_cache.rs index fd4feadb11..a2013da5bc 100644 --- a/crates/utils/src/lru_cache.rs +++ b/crates/utils/src/lru_cache.rs @@ -1,9 +1,8 @@ use std::hash::Hash; use std::num::NonZeroUsize; -use std::sync::Arc; +use std::sync::{Arc, Mutex, MutexGuard}; use lru::LruCache as InnerCache; -use tokio::sync::{Mutex, MutexGuard}; use tracing::instrument; /// A newtype wrapper around an LRU cache. Ensures that the cache lock is not held across @@ -22,17 +21,20 @@ where } /// Retrieves a value from the cache. - pub async fn get(&self, key: &K) -> Option { - self.lock().await.get(key).cloned() + pub fn get(&self, key: &K) -> Option { + self.lock().get(key).cloned() } /// Puts a value into the cache. - pub async fn put(&self, key: K, value: V) { - self.lock().await.put(key, value); + pub fn put(&self, key: K, value: V) { + self.lock().put(key, value); } #[instrument(name = "lru.lock", skip_all)] - async fn lock(&self) -> MutexGuard<'_, InnerCache> { - self.0.lock().await + fn lock(&self) -> MutexGuard<'_, InnerCache> { + // SAFETY: The mutex is only held for the duration of the get/put operation + // where panics are possible only if we're running out of memory, in which + // case the entire process is likely to be unstable anyway. + self.0.lock().expect("LRU cache mutex poisoned") } } From 0364fc375db486c32169e9737efb869ab176f8a3 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Wed, 29 Apr 2026 11:50:32 +0200 Subject: [PATCH 15/28] ci: simplify caching (#2011) --- .github/workflows/ci.yml | 112 ++++++++++++++++++++++----------------- 1 file changed, 62 insertions(+), 50 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 52fb35fd19..674d4ddd49 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -26,11 +26,28 @@ env: # Shared prefix key for the rust cache. # # This provides a convenient way to evict old or corrupted cache. - RUST_CACHE_KEY: rust-cache-2026.02.02 + RUST_CACHE_PREFIX: rust-cache # Shared branch-aware cache namespace for rust-cache. - RUST_CACHE_SHARED_KEY: ${{ github.workflow }}-build-${{ github.base_ref || github.ref_name }} - # Per-run cache used to share build outputs across jobs within a single workflow run. - RUN_CACHE_KEY: ${{ github.workflow }}-run-${{ github.run_id }} + # + # Pushes save to the persistent trunk cache, while pull requests append the + # PR number so they save into an ephemeral PR-specific cache. + # + # The format is
with an optional `-pr-` for pull requests. + # We default to next if no suitable is found. + RUST_CACHE_SUFFIX: >- + ${{ + format( + '{0}{1}', + github.base_ref == 'main' && 'main' + || github.base_ref == 'next' && 'next' + || github.ref == 'refs/heads/main' && 'main' + || github.ref == 'refs/heads/next' && 'next' + || 'next', + github.event_name == 'pull_request' + && format('-pr-{0}', github.event.pull_request.number) + || '' + ) + }} # Match rust-cache's compilation mode so restored outputs stay reusable downstream. CARGO_INCREMENTAL: 0 # Reduce cache usage by removing debug information. @@ -46,7 +63,7 @@ jobs: # Conventional builds, lints and tests that re-use a single cache for efficiency # =============================================================================================== - # Normal cargo build that maintains a persistent trunk cache and publishes a + # Normal cargo build that saves either the persistent trunk cache or a # single per-run cache for downstream jobs to restore immediately. build: runs-on: ubuntu-24.04 @@ -60,10 +77,10 @@ jobs: run: rustup update --no-self-update - uses: Swatinem/rust-cache@v2 with: - shared-key: ${{ env.RUST_CACHE_SHARED_KEY }} - prefix-key: ${{ env.RUST_CACHE_KEY }} + shared-key: ${{ env.RUST_CACHE_SUFFIX }} + prefix-key: ${{ env.RUST_CACHE_PREFIX }} cache-workspace-crates: true - save-if: ${{ github.ref == 'refs/heads/next' || github.ref == 'refs/heads/main' }} + save-if: true - name: cargo build run: cargo build --workspace --all-targets --locked - name: Check static linkage @@ -103,14 +120,6 @@ jobs: fi done echo "Static linkage check passed for all of ${bin_targets[@]}" - - name: Save run cache - uses: actions/cache/save@v4 - with: - key: ${{ env.RUN_CACHE_KEY }} - path: | - target - ~/.cargo/registry - ~/.cargo/git clippy: name: lint - clippy @@ -120,14 +129,12 @@ jobs: - uses: actions/checkout@v6 - name: Rustup run: rustup update --no-self-update - - name: Restore run cache - uses: actions/cache/restore@v4 + - uses: Swatinem/rust-cache@v2 with: - key: ${{ env.RUN_CACHE_KEY }} - path: | - target - ~/.cargo/registry - ~/.cargo/git + shared-key: ${{ env.RUST_CACHE_SUFFIX }} + prefix-key: ${{ env.RUST_CACHE_PREFIX }} + cache-workspace-crates: true + save-if: false - name: clippy run: cargo clippy --locked --all-targets --all-features --workspace -- -D warnings @@ -142,14 +149,12 @@ jobs: - uses: taiki-e/install-action@v2 with: tool: nextest@0.9.122 - - name: Restore run cache - uses: actions/cache/restore@v4 + - uses: Swatinem/rust-cache@v2 with: - key: ${{ env.RUN_CACHE_KEY }} - path: | - target - ~/.cargo/registry - ~/.cargo/git + shared-key: ${{ env.RUST_CACHE_SUFFIX }} + prefix-key: ${{ env.RUST_CACHE_PREFIX }} + cache-workspace-crates: true + save-if: false - name: Build tests run: cargo nextest run --all-features --workspace --no-run - name: Run tests @@ -166,14 +171,12 @@ jobs: uses: ./.github/actions/cleanup-runner - name: Rustup run: rustup update --no-self-update - - name: Restore run cache - uses: actions/cache/restore@v4 + - uses: Swatinem/rust-cache@v2 with: - key: ${{ env.RUN_CACHE_KEY }} - path: | - target - ~/.cargo/registry - ~/.cargo/git + shared-key: ${{ env.RUST_CACHE_SUFFIX }} + prefix-key: ${{ env.RUST_CACHE_PREFIX }} + cache-workspace-crates: true + save-if: false - name: Build docs run: cargo doc --no-deps --workspace --all-features --locked @@ -189,14 +192,12 @@ jobs: - uses: actions/checkout@v6 - name: Rustup run: rustup update --no-self-update - - name: Restore run cache - uses: actions/cache/restore@v4 + - uses: Swatinem/rust-cache@v2 with: - key: ${{ env.RUN_CACHE_KEY }} - path: | - target - ~/.cargo/registry - ~/.cargo/git + shared-key: ${{ env.RUST_CACHE_SUFFIX }} + prefix-key: ${{ env.RUST_CACHE_PREFIX }} + cache-workspace-crates: true + save-if: false - uses: taiki-e/install-action@v2 with: tool: nextest@0.9.122 @@ -229,7 +230,7 @@ jobs: cleanup-run-cache: name: cleanup run cache runs-on: ubuntu-24.04 - if: ${{ always() }} + if: ${{ always() && github.event_name == 'pull_request' }} needs: - build - clippy @@ -240,14 +241,25 @@ jobs: actions: write contents: read steps: - - name: Delete run cache + - name: Delete PR rust cache env: GH_TOKEN: ${{ github.token }} run: | - gh api \ - --method DELETE \ - -H "Accept: application/vnd.github+json" \ - "/repos/${{ github.repository }}/actions/caches?key=${{ env.RUN_CACHE_KEY }}" + mapfile -t cache_entries < <( + gh api \ + -H "Accept: application/vnd.github+json" \ + "/repos/${{ github.repository }}/actions/caches?key=${{ env.RUST_CACHE_PREFIX }}&ref=${{ github.ref }}" \ + --jq '.actions_caches[] | @json' + ) + for cache_entry in "${cache_entries[@]}"; do + cache_id="$(jq -r '.id' <<< "${cache_entry}")" + cache_key="$(jq -r '.key' <<< "${cache_entry}")" + echo "Deleting rust cache key=${cache_key}" + gh api \ + --method DELETE \ + -H "Accept: application/vnd.github+json" \ + "/repos/${{ github.repository }}/actions/caches/${cache_id}" + done # =============================================================================================== # WASM related jobs From b92751587b057023ae64a9af79874814774d050a Mon Sep 17 00:00:00 2001 From: Bobbin Threadbare Date: Wed, 29 Apr 2026 19:09:56 -0700 Subject: [PATCH 16/28] chore: increment crate versions to v0.14.10 --- CHANGELOG.md | 4 ++-- Cargo.lock | 36 ++++++++++++++++++------------------ Cargo.toml | 2 +- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f5b934563d..24ccb2991b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,10 +1,10 @@ # Changelog -## v0.14.10 (TBD) +## v0.14.10 (2026-05-29) - Optimize `GetAccount` implementation to serve vault assets from `AccountStateForest` ([#1981](https://github.com/0xMiden/node/pull/1981)). +- Added `accept`, `origin`, `user-agent`, `forwarded`, `x-forwarded-for` and `x-real-ip` headers to telemetry for gRPC requests ([#1982](https://github.com/0xMiden/node/pull/1982)). - Trace additional RPC request properties e.g. `account.id` in `GetAccount` ([#1983](https://github.com/0xMiden/node/pull/1983)). -- Added `accept`, `origin`, `user-agent`, `forwarded`, `x-forwarded-for` and `x-real-ip` headers to telemetry for gRPC requests ([#1982](https://github.com/0xMiden/node/pull/1982). - Fixed occasional mempool panic during transaction submission, causing the lock to be held for longer than expected ([#1984](https://github.com/0xMiden/node/pull/1984)). ## v0.14.9 (2026-04-21) diff --git a/Cargo.lock b/Cargo.lock index 006e46f007..f16e6e7040 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3058,7 +3058,7 @@ dependencies = [ [[package]] name = "miden-genesis" -version = "0.14.9" +version = "0.14.10" dependencies = [ "anyhow", "clap", @@ -3077,7 +3077,7 @@ dependencies = [ [[package]] name = "miden-large-smt-backend-rocksdb" -version = "0.14.9" +version = "0.14.10" dependencies = [ "miden-crypto", "miden-node-rocksdb-cxx-linkage-fix", @@ -3138,7 +3138,7 @@ dependencies = [ [[package]] name = "miden-network-monitor" -version = "0.14.9" +version = "0.14.10" dependencies = [ "anyhow", "axum", @@ -3166,7 +3166,7 @@ dependencies = [ [[package]] name = "miden-node" -version = "0.14.9" +version = "0.14.10" dependencies = [ "anyhow", "clap", @@ -3186,7 +3186,7 @@ dependencies = [ [[package]] name = "miden-node-block-producer" -version = "0.14.9" +version = "0.14.10" dependencies = [ "anyhow", "assert_matches", @@ -3221,7 +3221,7 @@ dependencies = [ [[package]] name = "miden-node-db" -version = "0.14.9" +version = "0.14.10" dependencies = [ "deadpool", "deadpool-diesel", @@ -3234,7 +3234,7 @@ dependencies = [ [[package]] name = "miden-node-grpc-error-macro" -version = "0.14.9" +version = "0.14.10" dependencies = [ "quote", "syn 2.0.117", @@ -3242,7 +3242,7 @@ dependencies = [ [[package]] name = "miden-node-ntx-builder" -version = "0.14.9" +version = "0.14.10" dependencies = [ "anyhow", "build-rs", @@ -3275,7 +3275,7 @@ dependencies = [ [[package]] name = "miden-node-proto" -version = "0.14.9" +version = "0.14.10" dependencies = [ "anyhow", "assert_matches", @@ -3300,7 +3300,7 @@ dependencies = [ [[package]] name = "miden-node-proto-build" -version = "0.14.9" +version = "0.14.10" dependencies = [ "build-rs", "fs-err", @@ -3311,11 +3311,11 @@ dependencies = [ [[package]] name = "miden-node-rocksdb-cxx-linkage-fix" -version = "0.14.9" +version = "0.14.10" [[package]] name = "miden-node-rpc" -version = "0.14.9" +version = "0.14.10" dependencies = [ "anyhow", "futures", @@ -3347,7 +3347,7 @@ dependencies = [ [[package]] name = "miden-node-store" -version = "0.14.9" +version = "0.14.10" dependencies = [ "anyhow", "assert_matches", @@ -3393,7 +3393,7 @@ dependencies = [ [[package]] name = "miden-node-stress-test" -version = "0.14.9" +version = "0.14.10" dependencies = [ "clap", "fs-err", @@ -3421,7 +3421,7 @@ dependencies = [ [[package]] name = "miden-node-utils" -version = "0.14.9" +version = "0.14.10" dependencies = [ "anyhow", "bytes", @@ -3454,7 +3454,7 @@ dependencies = [ [[package]] name = "miden-node-validator" -version = "0.14.9" +version = "0.14.10" dependencies = [ "anyhow", "aws-config", @@ -3590,7 +3590,7 @@ dependencies = [ [[package]] name = "miden-remote-prover" -version = "0.14.9" +version = "0.14.10" dependencies = [ "anyhow", "assert_matches", @@ -3627,7 +3627,7 @@ dependencies = [ [[package]] name = "miden-remote-prover-client" -version = "0.14.9" +version = "0.14.10" dependencies = [ "build-rs", "fs-err", diff --git a/Cargo.toml b/Cargo.toml index b8024e0eb8..8720181051 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,7 +32,7 @@ license = "MIT" readme = "README.md" repository = "https://github.com/0xMiden/node" rust-version = "1.93" -version = "0.14.9" +version = "0.14.10" [workspace.dependencies] # Workspace crates. From 1468f8e41f4e2c20277de851b4bb549ee79dc29f Mon Sep 17 00:00:00 2001 From: KOVACS Krisztian Date: Thu, 30 Apr 2026 09:59:08 +0200 Subject: [PATCH 17/28] refactor(store): pull storage map entries from account state forest (#2012) --- CHANGELOG.md | 1 + bin/stress-test/README.md | 131 ++++--- bin/stress-test/src/main.rs | 37 +- bin/stress-test/src/seeding/mod.rs | 345 ++++++++++++++++-- bin/stress-test/src/seeding/tests.rs | 136 +++++++ bin/stress-test/src/store/mod.rs | 177 +++++++++ crates/store/src/account_state_forest/mod.rs | 124 ++++++- .../store/src/account_state_forest/tests.rs | 76 ++++ crates/store/src/state/mod.rs | 94 ++++- crates/utils/src/lru_cache.rs | 22 ++ 10 files changed, 1047 insertions(+), 96 deletions(-) create mode 100644 bin/stress-test/src/seeding/tests.rs diff --git a/CHANGELOG.md b/CHANGELOG.md index 24ccb2991b..569d8257ed 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ - Added `accept`, `origin`, `user-agent`, `forwarded`, `x-forwarded-for` and `x-real-ip` headers to telemetry for gRPC requests ([#1982](https://github.com/0xMiden/node/pull/1982)). - Trace additional RPC request properties e.g. `account.id` in `GetAccount` ([#1983](https://github.com/0xMiden/node/pull/1983)). - Fixed occasional mempool panic during transaction submission, causing the lock to be held for longer than expected ([#1984](https://github.com/0xMiden/node/pull/1984)). +- Optimize `GetAccount` implementation: `all_entries` requests now mostly use state from `AccountStateForest` ([#2012](https://github.com/0xMiden/node/pull/2012)). ## v0.14.9 (2026-04-21) diff --git a/bin/stress-test/README.md b/bin/stress-test/README.md index d60a611907..557a3a96b7 100644 --- a/bin/stress-test/README.md +++ b/bin/stress-test/README.md @@ -14,20 +14,42 @@ After building the binary, you can run the following command to generate one mil The store file will then be located at `./data/miden-store.sqlite3`. +The seed data can be tuned for account-detail benchmarks: + +- `--public-accounts-percentage` controls how many generated accounts are public. The default is `0`. +- `--storage-map-entries` adds a deterministic storage map with the given number of entries to every public account. The default is `0`. +- `--vault-entries` adds the given number of distinct fungible assets to every public account's vault. The default is `1`, and the value must fit within the protocol note asset limit. +- `--account-update-blocks` appends the given number of blocks after account initialization. These blocks randomly update existing accounts and rotate updates through the seeded storage-map entries. The default is `0`. + +For example, this creates public accounts with storage maps, multiple vault assets, and additional account-update history: + +```bash +miden-node-stress-test seed-store \ + --data-directory ./data \ + --num-accounts 100000 \ + --public-accounts-percentage 50 \ + --storage-map-entries 128 \ + --vault-entries 5 \ + --account-update-blocks 100 +``` + ## Benchmark Store This command allows to run stress tests against the Store component. These tests use the dump file with accounts ids created when seeding the store, so be sure to run the `seed-store` command beforehand. The endpoints that you can test are: -- `load_state` -- `sync_notes` -- `sync_nullifiers` -- `sync_transactions` +- `load-state` +- `get-account` +- `sync-notes` +- `sync-nullifiers` +- `sync-transactions` - `sync-chain-mmr` -Most benchmarks accept options to control the number of iterations and concurrency level. The `load_state` endpoint is different - it simply measures the one-time startup cost of loading the state from disk. +Most benchmarks accept options to control the number of iterations and concurrency level. The `load-state` endpoint is different - it simply measures the one-time startup cost of loading the state from disk. + +The `get-account` benchmark uses the account id dump created by `seed-store`, selects public accounts, and requests account details from the store. Each request asks for vault details and all entries from a storage map slot. By default, it uses the slot created by `--storage-map-entries`: `miden::mock::stress_test::map`. You can request a different slot with `--storage-map-slot`. -**Note on Concurrency**: For the endpoints that support it (`sync_notes`, `sync_nullifiers`), the concurrency parameter controls how many requests are sent in parallel to the store. Since these benchmarks run against a local store (no network overhead), higher concurrency values can help identify bottlenecks in the store's internal processing. The latency measurements exclude network time and represent pure store processing time. +**Note on Concurrency**: For request benchmarks, the concurrency parameter controls how many requests are sent in parallel to the store. Since these benchmarks run against a local store (no network overhead), higher concurrency values can help identify bottlenecks in the store's internal processing. The latency measurements exclude network time and represent pure store processing time. Example usage: @@ -39,6 +61,16 @@ miden-node-stress-test benchmark-store \ sync-notes ``` +To benchmark public account detail loading, seed public accounts first and then run: + +```bash +miden-node-stress-test benchmark-store \ + --data-directory ./data \ + --iterations 10000 \ + --concurrency 16 \ + get-account +``` + ### Results The following results were obtained using a store with 100k accounts, half of which are public. @@ -58,49 +90,49 @@ Average DB growth rate: 325.3 KB per block > Note: Each block contains 256 transactions (16 batches * 16 transactions). -| Block | Insert Time (ms) | Get Block Inputs Time (ms) | Get Batch Inputs Time (ms) | Block Size (KB) | DB Size (MB) | -| ------ | ------------------ | ----------------------------- | ------------------------------ | ------------------ | ------------- | -| 0 | 22 | 1 | 0 | 375.6 | 0.3 | -| 50 | 186 | 9 | 1 | 473.6 | 22.2 | -| 100 | 199 | 10 | 1 | 473.6 | 40.7 | -| 150 | 219 | 10 | 1 | 473.6 | 58.1 | -| 200 | 218 | 11 | 1 | 473.6 | 74.8 | -| 250 | 222 | 11 | 1 | 473.6 | 91.6 | -| 300 | 228 | 12 | 1 | 473.6 | 108.1 | -| 350 | 232 | 13 | 1 | 473.6 | 124.4 | +| Block | Insert Time (ms) | Get Block Inputs Time (ms) | Get Batch Inputs Time (ms) | Block Size (KB) | DB Size (MB) | +| ----- | ---------------- | -------------------------- | -------------------------- | --------------- | ------------ | +| 0 | 22 | 1 | 0 | 375.6 | 0.3 | +| 50 | 186 | 9 | 1 | 473.6 | 22.2 | +| 100 | 199 | 10 | 1 | 473.6 | 40.7 | +| 150 | 219 | 10 | 1 | 473.6 | 58.1 | +| 200 | 218 | 11 | 1 | 473.6 | 74.8 | +| 250 | 222 | 11 | 1 | 473.6 | 91.6 | +| 300 | 228 | 12 | 1 | 473.6 | 108.1 | +| 350 | 232 | 13 | 1 | 473.6 | 124.4 | #### Database stats > Note: Database contains 100215 accounts and 100215 notes across all blocks. -| Table | Size (MB) | KB/Entry | -| ---------------------------------- | --------------- | ---------- | -| accounts | 26.1 | 0.3 | -| account_deltas | 1.2 | 0.0 | -| account_fungible_asset_deltas | 2.2 | 0.0 | -| account_non_fungible_asset_updates | 0.0 | - | -| account_storage_map_updates | 0.0 | - | -| account_storage_slot_updates | 3.1 | 0.1 | -| block_headers | 0.1 | 0.3 | -| notes | 49.1 | 0.5 | -| note_scripts | 0.0 | 8.0 | -| nullifiers | 4.6 | 0.0 | -| transactions | 6.0 | 0.1 | +| Table | Size (MB) | KB/Entry | +| ---------------------------------- | --------- | -------- | +| accounts | 26.1 | 0.3 | +| account_deltas | 1.2 | 0.0 | +| account_fungible_asset_deltas | 2.2 | 0.0 | +| account_non_fungible_asset_updates | 0.0 | - | +| account_storage_map_updates | 0.0 | - | +| account_storage_slot_updates | 3.1 | 0.1 | +| block_headers | 0.1 | 0.3 | +| notes | 49.1 | 0.5 | +| note_scripts | 0.0 | 8.0 | +| nullifiers | 4.6 | 0.0 | +| transactions | 6.0 | 0.1 | #### Index stats -| Index | Size (MB) | -| ---------------------------------- | --------------- | -| idx_accounts_network_prefix | 0.0 | -| idx_notes_note_id | 4.4 | -| idx_notes_sender | 2.9 | -| idx_notes_tag | 1.6 | -| idx_notes_nullifier | 4.4 | -| idx_unconsumed_network_notes | 1.1 | -| idx_nullifiers_prefix | 4.3 | -| idx_nullifiers_block_num | 4.2 | -| idx_transactions_account_id | 5.6 | -| idx_transactions_block_num | 4.2 | +| Index | Size (MB) | +| ---------------------------- | --------- | +| idx_accounts_network_prefix | 0.0 | +| idx_notes_note_id | 4.4 | +| idx_notes_sender | 2.9 | +| idx_notes_tag | 1.6 | +| idx_notes_nullifier | 4.4 | +| idx_unconsumed_network_notes | 1.1 | +| idx_nullifiers_prefix | 4.3 | +| idx_nullifiers_block_num | 4.2 | +| idx_transactions_account_id | 5.6 | +| idx_transactions_block_num | 4.2 | Current results of the store stress-tests: @@ -175,5 +207,22 @@ Pagination statistics: Average pages per run: 1.00 ``` +- get-account +``` bash +$ miden-node-stress-test benchmark-store --data-directory ./data --iterations 10000 --concurrency 16 get-account + +Average request latency: 937.969µs +P50 request latency: 688.332µs +P95 request latency: 932.549µs +P99 request latency: 1.119977ms +P99.9 request latency: 42.992839ms +GetAccount statistics: + Total runs: 10000 + Storage map limit exceeded responses: 0 + Average returned storage map entries: 64.00 + Vault limit exceeded responses: 0 + Average returned vault assets: 2.00 +``` + ## License This project is [MIT licensed](../../LICENSE). diff --git a/bin/stress-test/src/main.rs b/bin/stress-test/src/main.rs index a5cc82f9f4..9e02f4dd55 100644 --- a/bin/stress-test/src/main.rs +++ b/bin/stress-test/src/main.rs @@ -4,6 +4,7 @@ use clap::{Parser, Subcommand}; use miden_node_utils::logging::OpenTelemetry; use seeding::seed_store; use store::{ + bench_get_account, bench_sync_chain_mmr, bench_sync_notes, bench_sync_nullifiers, @@ -39,6 +40,18 @@ pub enum Command { /// private accounts. #[arg(short, long, value_name = "PUBLIC_ACCOUNTS_PERCENTAGE", default_value = "0")] public_accounts_percentage: u8, + + /// Number of entries to add to a deterministic storage map on every public account. + #[arg(long, value_name = "STORAGE_MAP_ENTRIES", default_value = "0")] + storage_map_entries: usize, + + /// Number of distinct vault assets to add to every public account. + #[arg(long, value_name = "VAULT_ENTRIES", default_value = "1")] + vault_entries: usize, + + /// Number of post-initialization blocks to generate with random account updates. + #[arg(long, value_name = "ACCOUNT_UPDATE_BLOCKS", default_value = "0")] + account_update_blocks: usize, }, /// Benchmark the performance of the store endpoints. @@ -62,7 +75,7 @@ pub enum Command { }, } -#[derive(Subcommand, Clone, Copy)] +#[derive(Subcommand, Clone)] pub enum Endpoint { #[command(name = "sync-nullifiers")] SyncNullifiers { @@ -89,6 +102,12 @@ pub enum Endpoint { }, #[command(name = "load-state")] LoadState, + #[command(name = "get-account")] + GetAccount { + /// Storage slot name to request with all entries. + #[arg(long, value_name = "SLOT_NAME", default_value = seeding::BENCHMARK_STORAGE_MAP_SLOT_NAME)] + storage_map_slot: String, + }, } #[tokio::main] @@ -103,8 +122,19 @@ async fn main() { data_directory, num_accounts, public_accounts_percentage, + storage_map_entries, + vault_entries, + account_update_blocks, } => { - seed_store(data_directory, num_accounts, public_accounts_percentage).await; + seed_store( + data_directory, + num_accounts, + public_accounts_percentage, + storage_map_entries, + vault_entries, + account_update_blocks, + ) + .await; }, Command::BenchmarkStore { endpoint, @@ -134,6 +164,9 @@ async fn main() { Endpoint::LoadState => { load_state(&data_directory).await; }, + Endpoint::GetAccount { storage_map_slot } => { + bench_get_account(data_directory, iterations, concurrency, storage_map_slot).await; + }, }, } } diff --git a/bin/stress-test/src/seeding/mod.rs b/bin/stress-test/src/seeding/mod.rs index 0b860838ca..f1da81554f 100644 --- a/bin/stress-test/src/seeding/mod.rs +++ b/bin/stress-test/src/seeding/mod.rs @@ -15,10 +15,18 @@ use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{ Account, AccountBuilder, + AccountComponent, + AccountComponentMetadata, AccountDelta, AccountId, + AccountStorageDelta, AccountStorageMode, AccountType, + AccountVaultDelta, + StorageMap, + StorageMapKey, + StorageSlot, + StorageSlotName, }; use miden_protocol::asset::{Asset, FungibleAsset, TokenSymbol}; use miden_protocol::batch::{BatchAccountUpdate, BatchId, ProvenBatch}; @@ -35,7 +43,7 @@ use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey as EcdsaSecretKey; use miden_protocol::crypto::dsa::falcon512_poseidon2::{PublicKey, SecretKey}; use miden_protocol::crypto::rand::RandomCoin; use miden_protocol::errors::AssetError; -use miden_protocol::note::{Note, NoteHeader, NoteId, NoteInclusionProof}; +use miden_protocol::note::{Note, NoteAssets, NoteHeader, NoteId, NoteInclusionProof}; use miden_protocol::transaction::{ InputNote, InputNoteCommitment, @@ -53,8 +61,10 @@ use miden_protocol::{Felt, ONE, Word}; use miden_standards::account::auth::AuthSingleSig; use miden_standards::account::faucets::BasicFungibleFaucet; use miden_standards::account::wallets::BasicWallet; +use miden_standards::code_builder::CodeBuilder; use miden_standards::note::P2idNote; use rand::Rng; +use rand::seq::SliceRandom; use rayon::iter::{IntoParallelIterator, ParallelIterator}; use rayon::prelude::ParallelSlice; use tokio::io::AsyncWriteExt; @@ -65,6 +75,8 @@ use tonic::transport::Channel; use url::Url; mod metrics; +#[cfg(test)] +mod tests; // CONSTANTS // ================================================================================================ @@ -74,6 +86,8 @@ const TRANSACTIONS_PER_BATCH: usize = 16; pub const ACCOUNTS_FILENAME: &str = "accounts.txt"; +pub const BENCHMARK_STORAGE_MAP_SLOT_NAME: &str = "miden::mock::stress_test::map"; + // SEED STORE // ================================================================================================ @@ -82,8 +96,16 @@ pub async fn seed_store( data_directory: PathBuf, num_accounts: usize, public_accounts_percentage: u8, + storage_map_entries: usize, + vault_entries: usize, + account_update_blocks: usize, ) { let start = Instant::now(); + assert!( + vault_entries <= NoteAssets::MAX_NUM_ASSETS, + "--vault-entries must be at most {}", + NoteAssets::MAX_NUM_ASSETS + ); // Recreate the data directory (it should be empty for store bootstrapping). // @@ -92,10 +114,12 @@ pub async fn seed_store( fs_err::create_dir_all(&data_directory).expect("created data directory"); // generate the faucet account and the genesis state - let faucet = create_faucet(); + let benchmark_faucets = create_benchmark_faucets(vault_entries); + let faucet = benchmark_faucets[0].clone(); + let asset_faucet_ids = benchmark_faucets.iter().map(Account::id).collect::>(); let fee_params = FeeParameters::new(faucet.id(), 0).unwrap(); let signer = EcdsaSecretKey::new(); - let genesis_state = GenesisState::new(vec![faucet.clone()], fee_params, 1, 1, signer.clone()); + let genesis_state = GenesisState::new(benchmark_faucets, fee_params, 1, 1, signer.clone()); let genesis_block = genesis_state .clone() .into_block() @@ -121,6 +145,10 @@ pub async fn seed_store( data_directory, accounts_filepath, &signer, + storage_map_entries, + vault_entries, + account_update_blocks, + asset_faucet_ids, ) .await; @@ -133,6 +161,7 @@ pub async fn seed_store( /// The first transaction in each batch sends assets from the faucet to 255 accounts. /// The rest of the transactions consume the notes created by the faucet in the previous block. #[expect(clippy::too_many_arguments)] +#[expect(clippy::too_many_lines)] async fn generate_blocks( num_accounts: usize, public_accounts_percentage: u8, @@ -142,6 +171,10 @@ async fn generate_blocks( data_directory: DataDirectory, accounts_filepath: PathBuf, signer: &EcdsaSecretKey, + storage_map_entries: usize, + vault_entries: usize, + account_update_blocks: usize, + asset_faucet_ids: Vec, ) -> SeedingMetrics { // Each block is composed of [`BATCHES_PER_BLOCK`] batches, and each batch is composed of // [`TRANSACTIONS_PER_BATCH`] txs. The first note of the block is always a send assets tx @@ -152,8 +185,10 @@ async fn generate_blocks( let mut account_ids = vec![]; let mut note_nullifiers = vec![]; + let mut account_states: BTreeMap = BTreeMap::new(); - let mut consume_notes_txs = vec![]; + let mut consume_notes_txs: Vec = vec![]; + let mut pending_consumed_accounts: Vec = vec![]; let consumes_per_block = TRANSACTIONS_PER_BATCH * BATCHES_PER_BLOCK - 1; #[expect(clippy::cast_sign_loss, clippy::cast_precision_loss)] @@ -184,8 +219,10 @@ async fn generate_blocks( AccountStorageMode::Public, &key_pair, &rng, - faucet.id(), + &asset_faucet_ids, i, + storage_map_entries, + vault_entries, ); // create private accounts and notes that mint assets for these accounts @@ -194,8 +231,10 @@ async fn generate_blocks( AccountStorageMode::Private, &key_pair, &rng, - faucet.id(), + &asset_faucet_ids, i, + storage_map_entries, + vault_entries, ); let notes = [pub_notes, priv_notes].concat(); @@ -222,6 +261,8 @@ async fn generate_blocks( // update blocks prev_block_header = apply_block(batches, block_inputs, store_client, &mut metrics, signer).await; + account_states + .extend(pending_consumed_accounts.into_iter().map(|account| (account.id(), account))); if current_anchor_header.block_epoch() != prev_block_header.block_epoch() { current_anchor_header = prev_block_header.clone(); } @@ -229,8 +270,13 @@ async fn generate_blocks( // create the consume notes txs to be used in the next block let batch_inputs = get_batch_inputs(store_client, &prev_block_header, ¬es, &mut metrics).await; - consume_notes_txs = - create_consume_note_txs(&prev_block_header, accounts, notes, &batch_inputs.note_proofs); + (pending_consumed_accounts, consume_notes_txs) = create_consume_note_txs( + &prev_block_header, + accounts, + notes, + &batch_inputs.note_proofs, + None, + ); // track store size every 50 blocks if i % 50 == 0 { @@ -238,6 +284,67 @@ async fn generate_blocks( } } + let update_note_faucet_ids = + asset_faucet_ids.iter().take(vault_entries).copied().collect::>(); + let mut random = rand::rng(); + for update_block_index in 0..account_update_blocks { + let mut block_txs = Vec::with_capacity(BATCHES_PER_BLOCK * TRANSACTIONS_PER_BATCH); + + let selected_account_ids = select_random_account_ids_for_update_notes( + &account_states, + &pending_consumed_accounts, + consumes_per_block, + &mut random, + ); + let notes = { + let mut note_rng = rng.lock().unwrap(); + selected_account_ids + .iter() + .map(|account_id| create_note(&update_note_faucet_ids, *account_id, &mut note_rng)) + .collect::>() + }; + + let emit_note_tx = create_emit_note_tx(&prev_block_header, &mut faucet, notes.clone()); + block_txs.push(emit_note_tx); + block_txs.extend(consume_notes_txs); + + let batches: Vec = block_txs + .par_chunks(TRANSACTIONS_PER_BATCH) + .map(|txs| create_batch(txs, &prev_block_header)) + .collect(); + + let block_inputs = get_block_inputs(store_client, &batches, &mut metrics).await; + + prev_block_header = + apply_block(batches, block_inputs, store_client, &mut metrics, signer).await; + account_states + .extend(pending_consumed_accounts.into_iter().map(|account| (account.id(), account))); + if current_anchor_header.block_epoch() != prev_block_header.block_epoch() { + current_anchor_header = prev_block_header.clone(); + } + + let batch_inputs = + get_batch_inputs(store_client, &prev_block_header, ¬es, &mut metrics).await; + let accounts = selected_account_ids + .iter() + .filter_map(|account_id| account_states.get(account_id).cloned()) + .collect::>(); + (pending_consumed_accounts, consume_notes_txs) = create_consume_note_txs( + &prev_block_header, + accounts, + notes, + &batch_inputs.note_proofs, + Some(BenchmarkStorageUpdate { + block_index: update_block_index, + storage_map_entries, + }), + ); + + if update_block_index % 50 == 0 { + metrics.record_store_size(); + } + } + // dump account ids to a file let mut file = fs::File::create(accounts_filepath).await.unwrap(); for id in account_ids { @@ -290,14 +397,28 @@ fn fee_from_block(block_ref: &BlockHeader) -> Result /// Returns a tuple with: /// - The list of new accounts /// - The list of new notes +#[expect(clippy::too_many_arguments)] fn create_accounts_and_notes( num_accounts: usize, storage_mode: AccountStorageMode, key_pair: &SecretKey, rng: &Arc>, - faucet_id: AccountId, + asset_faucet_ids: &[AccountId], block_num: usize, + storage_map_entries: usize, + vault_entries: usize, ) -> (Vec, Vec) { + assert!( + !asset_faucet_ids.is_empty(), + "at least one faucet id is required to create benchmark notes" + ); + let note_faucet_ids = match storage_mode { + AccountStorageMode::Public => { + asset_faucet_ids.iter().take(vault_entries).copied().collect() + }, + AccountStorageMode::Private | AccountStorageMode::Network => vec![asset_faucet_ids[0]], + }; + (0..num_accounts) .into_par_iter() .map(|account_index| { @@ -305,24 +426,29 @@ fn create_accounts_and_notes( key_pair.public_key(), ((block_num * num_accounts) + account_index) as u64, storage_mode, + storage_map_entries, ); let note = { let mut rng = rng.lock().unwrap(); - create_note(faucet_id, account.id(), &mut rng) + create_note(¬e_faucet_ids, account.id(), &mut rng) }; (account, note) }) .collect() } -/// Creates a public P2ID note containing 10 tokens of the fungible asset associated with the -/// specified `faucet_id` and sent to the specified target account. -fn create_note(faucet_id: AccountId, target_id: AccountId, rng: &mut RandomCoin) -> Note { - let asset = Asset::Fungible(FungibleAsset::new(faucet_id, 10).unwrap()); +/// Creates a public P2ID note containing 10 tokens for each requested fungible asset and sends it +/// to the specified target account. +fn create_note(faucet_ids: &[AccountId], target_id: AccountId, rng: &mut RandomCoin) -> Note { + let assets = faucet_ids + .iter() + .map(|faucet_id| Asset::Fungible(FungibleAsset::new(*faucet_id, 10).unwrap())) + .collect(); + let sender = faucet_ids.first().copied().unwrap_or(target_id); P2idNote::create( - faucet_id, + sender, target_id, - vec![asset], + assets, miden_protocol::note::NoteType::Public, miden_protocol::note::NoteAttachment::default(), rng, @@ -330,28 +456,125 @@ fn create_note(faucet_id: AccountId, target_id: AccountId, rng: &mut RandomCoin) .expect("note creation failed") } -/// Creates a new private account with a given public key and anchor block. Generates the seed from -/// the given index. -fn create_account(public_key: PublicKey, index: u64, storage_mode: AccountStorageMode) -> Account { +fn select_random_account_ids_for_update_notes( + account_states: &BTreeMap, + pending_accounts: &[Account], + max_accounts: usize, + rng: &mut R, +) -> Vec { + let mut account_ids = account_states.keys().copied().collect::>(); + for account in pending_accounts { + let account_id = account.id(); + if !account_states.contains_key(&account_id) { + account_ids.push(account_id); + } + } + + account_ids.shuffle(rng); + account_ids.truncate(max_accounts); + account_ids +} + +#[derive(Clone, Copy)] +struct BenchmarkStorageUpdate { + block_index: usize, + storage_map_entries: usize, +} + +fn benchmark_storage_map_update_value(block_index: usize, tx_index: usize, key_index: u32) -> Word { + Word::from([ + Felt::ZERO, + Felt::from(u32::try_from(block_index).expect("update block index fits into u32")), + Felt::from(u32::try_from(tx_index).expect("transaction index fits into u32")), + Felt::from(key_index), + ]) +} + +fn update_benchmark_storage_map_entry( + account: &mut Account, + block_index: usize, + tx_index: usize, + storage_map_entries: usize, +) -> bool { + if !account.is_public() || storage_map_entries == 0 { + return false; + } + + let key_index = + u32::try_from((tx_index % storage_map_entries) + 1).expect("storage map key fits into u32"); + let key = StorageMapKey::from_index(key_index); + let value = benchmark_storage_map_update_value(block_index, tx_index, key_index); + + account + .storage_mut() + .set_map_item(&benchmark_storage_map_slot(), key, value) + .is_ok() +} + +/// Creates a new account with a given public key and storage mode. Generates the seed from the +/// given index. +pub fn benchmark_storage_map_slot() -> StorageSlotName { + StorageSlotName::new(BENCHMARK_STORAGE_MAP_SLOT_NAME).unwrap() +} + +fn create_account( + public_key: PublicKey, + index: u64, + storage_mode: AccountStorageMode, + storage_map_entries: usize, +) -> Account { let init_seed: Vec<_> = index.to_be_bytes().into_iter().chain([0u8; 24]).collect(); - AccountBuilder::new(init_seed.try_into().unwrap()) + let mut builder = AccountBuilder::new(init_seed.try_into().unwrap()) .account_type(AccountType::RegularAccountImmutableCode) .storage_mode(storage_mode) .with_auth_component(AuthSingleSig::new(public_key.into(), AuthScheme::Falcon512Poseidon2)) - .with_component(BasicWallet) - .build() - .unwrap() + .with_component(BasicWallet); + + if storage_mode == AccountStorageMode::Public && storage_map_entries > 0 { + let entries = (1..=storage_map_entries) + .map(|i| { + let i = u32::try_from(i).expect("storage map entry index fits into u32"); + ( + StorageMapKey::from_index(i), + Word::from([Felt::ZERO, Felt::ZERO, Felt::ZERO, Felt::from(i)]), + ) + }) + .collect::>(); + let storage_map = StorageMap::with_entries(entries).unwrap(); + let component_storage = + vec![StorageSlot::with_map(benchmark_storage_map_slot(), storage_map)]; + let component_code = CodeBuilder::default() + .compile_component_code("benchmark::storage_map", "pub proc noop push.0 drop end") + .unwrap(); + let component = AccountComponent::new( + component_code, + component_storage, + AccountComponentMetadata::new( + "benchmark_storage_map", + [AccountType::RegularAccountImmutableCode], + ), + ) + .unwrap(); + builder = builder.with_component(component); + } + + builder.build().unwrap() } -/// Creates a new faucet account. -fn create_faucet() -> Account { +fn create_benchmark_faucets(vault_entries: usize) -> Vec { + (0..vault_entries.max(1)) + .map(|index| create_faucet_with_seed(index as u64)) + .collect() +} + +fn create_faucet_with_seed(index: u64) -> Account { let coin_seed: [u64; 4] = rand::rng().random(); let mut rng = RandomCoin::new(coin_seed.map(Felt::new).into()); let key_pair = SecretKey::with_rng(&mut rng); - let init_seed = [0_u8; 32]; + let init_seed: Vec<_> = index.to_be_bytes().into_iter().chain([0u8; 24]).collect(); let token_symbol = TokenSymbol::new("TEST").unwrap(); - AccountBuilder::new(init_seed) + AccountBuilder::new(init_seed.try_into().unwrap()) .account_type(AccountType::FungibleFaucet) .storage_mode(AccountStorageMode::Private) .with_component(BasicFungibleFaucet::new(token_symbol, 2, Felt::new(u64::MAX)).unwrap()) @@ -390,19 +613,22 @@ fn create_consume_note_txs( accounts: Vec, notes: Vec, note_proofs: &BTreeMap, -) -> Vec { + storage_update: Option, +) -> (Vec, Vec) { accounts .into_iter() .zip(notes) - .map(|(account, note)| { + .enumerate() + .map(|(tx_index, (account, note))| { let inclusion_proof = note_proofs.get(¬e.id()).unwrap(); create_consume_note_tx( block_ref, account, InputNote::authenticated(note, inclusion_proof.clone()), + storage_update.map(|update| (update, tx_index)), ) }) - .collect() + .unzip() } /// Creates a transaction that creates an account and consumes the given input note. @@ -412,17 +638,32 @@ fn create_consume_note_tx( block_ref: &BlockHeader, mut account: Account, input_note: InputNote, -) -> ProvenTransaction { + storage_update: Option<(BenchmarkStorageUpdate, usize)>, +) -> (Account, ProvenTransaction) { let init_hash = account.initial_commitment(); + let is_new_account = account.is_new(); input_note.note().assets().iter().for_each(|asset| { account.vault_mut().add_asset(*asset).unwrap(); }); + if let Some((storage_update, tx_index)) = storage_update { + update_benchmark_storage_map_entry( + &mut account, + storage_update.block_index, + tx_index, + storage_update.storage_map_entries, + ); + } + account.increment_nonce(ONE).unwrap(); let (details, account_delta_commitment) = if account.is_public() { - let account_delta = AccountDelta::try_from(account.clone()).unwrap(); + let account_delta = if is_new_account { + AccountDelta::try_from(account.clone()).unwrap() + } else { + create_existing_account_delta(&account, input_note.note().assets(), storage_update) + }; let commitment = account_delta.clone().to_commitment(); (AccountUpdateDetails::Delta(account_delta), commitment) } else { @@ -437,7 +678,7 @@ fn create_consume_note_tx( details, ) .unwrap(); - ProvenTransaction::new( + let transaction = ProvenTransaction::new( account_update, vec![InputNoteCommitment::from(input_note)], Vec::::new(), @@ -447,7 +688,43 @@ fn create_consume_note_tx( u32::MAX.into(), ExecutionProof::new_dummy(), ) - .unwrap() + .unwrap(); + + (account, transaction) +} + +fn create_existing_account_delta( + account: &Account, + note_assets: &NoteAssets, + storage_update: Option<(BenchmarkStorageUpdate, usize)>, +) -> AccountDelta { + let mut vault_delta = AccountVaultDelta::default(); + for asset in note_assets.iter() { + vault_delta.add_asset(*asset).unwrap(); + } + + let mut storage_delta = AccountStorageDelta::new(); + if let Some((storage_update, tx_index)) = storage_update { + if storage_update.storage_map_entries > 0 + && account.storage().get(&benchmark_storage_map_slot()).is_some() + { + let key_index = u32::try_from((tx_index % storage_update.storage_map_entries) + 1) + .expect("storage map key fits into u32"); + storage_delta + .set_map_item( + benchmark_storage_map_slot(), + StorageMapKey::from_index(key_index), + benchmark_storage_map_update_value( + storage_update.block_index, + tx_index, + key_index, + ), + ) + .unwrap(); + } + } + + AccountDelta::new(account.id(), storage_delta, vault_delta, ONE).unwrap() } /// Creates a transaction from the faucet that creates the given output notes. diff --git a/bin/stress-test/src/seeding/tests.rs b/bin/stress-test/src/seeding/tests.rs new file mode 100644 index 0000000000..3084d2a8be --- /dev/null +++ b/bin/stress-test/src/seeding/tests.rs @@ -0,0 +1,136 @@ +use miden_protocol::account::StorageSlotContent; + +use super::*; + +fn benchmark_fungible_faucet_ids(vault_entries: usize) -> Vec { + create_benchmark_faucets(vault_entries) + .into_iter() + .map(|account| account.id()) + .collect() +} + +#[test] +fn public_account_can_be_created_with_large_storage_map() { + let coin_seed = [1, 2, 3, 4].map(Felt::new); + let mut rng = RandomCoin::new(coin_seed.into()); + let key_pair = SecretKey::with_rng(&mut rng); + + let account = create_account(key_pair.public_key(), 42, AccountStorageMode::Public, 128); + + let map_slot = account + .storage() + .slots() + .iter() + .find(|slot| slot.name() == &benchmark_storage_map_slot()) + .expect("benchmark storage map slot should exist"); + + let StorageSlotContent::Map(storage_map) = map_slot.content() else { + panic!("benchmark slot should be a storage map"); + }; + + assert_eq!(storage_map.num_entries(), 128); +} + +#[test] +fn private_account_ignores_large_storage_map_entries() { + let coin_seed = [1, 2, 3, 4].map(Felt::new); + let mut rng = RandomCoin::new(coin_seed.into()); + let key_pair = SecretKey::with_rng(&mut rng); + + let account = create_account(key_pair.public_key(), 42, AccountStorageMode::Private, 128); + + assert!( + account + .storage() + .slots() + .iter() + .all(|slot| slot.name() != &benchmark_storage_map_slot()) + ); +} + +#[test] +fn public_account_note_contains_requested_distinct_vault_assets() { + let coin_seed = [1, 2, 3, 4].map(Felt::new); + let rng = Arc::new(Mutex::new(RandomCoin::new(coin_seed.into()))); + let mut key_rng = rng.lock().unwrap(); + let key_pair = SecretKey::with_rng(&mut *key_rng); + drop(key_rng); + + let faucet_ids = benchmark_fungible_faucet_ids(5); + let (_, notes) = create_accounts_and_notes( + 1, + AccountStorageMode::Public, + &key_pair, + &rng, + &faucet_ids, + 0, + 0, + 5, + ); + + let assets = notes[0].assets(); + assert_eq!(assets.num_assets(), 5); + + let distinct_vault_keys = + assets.iter().map(Asset::vault_key).collect::>(); + assert_eq!(distinct_vault_keys.len(), 5); +} + +#[test] +fn private_account_note_keeps_single_vault_asset() { + let coin_seed = [1, 2, 3, 4].map(Felt::new); + let rng = Arc::new(Mutex::new(RandomCoin::new(coin_seed.into()))); + let mut key_rng = rng.lock().unwrap(); + let key_pair = SecretKey::with_rng(&mut *key_rng); + drop(key_rng); + + let faucet_ids = benchmark_fungible_faucet_ids(5); + let (_, notes) = create_accounts_and_notes( + 1, + AccountStorageMode::Private, + &key_pair, + &rng, + &faucet_ids, + 0, + 0, + 5, + ); + + assert_eq!(notes[0].assets().num_assets(), 1); +} + +#[test] +fn public_account_storage_map_entry_can_be_updated_for_benchmark_blocks() { + let coin_seed = [1, 2, 3, 4].map(Felt::new); + let mut rng = RandomCoin::new(coin_seed.into()); + let key_pair = SecretKey::with_rng(&mut rng); + let mut account = create_account(key_pair.public_key(), 42, AccountStorageMode::Public, 4); + + let key = StorageMapKey::from_index(2); + let old_value = account + .storage() + .get_map_item(&benchmark_storage_map_slot(), key.into()) + .unwrap(); + + let updated = update_benchmark_storage_map_entry(&mut account, 3, 9, 4); + + let new_value = account + .storage() + .get_map_item(&benchmark_storage_map_slot(), key.into()) + .unwrap(); + assert!(updated); + assert_ne!(new_value, old_value); + assert_eq!(new_value, benchmark_storage_map_update_value(3, 9, 2)); +} + +#[test] +fn private_account_storage_map_update_is_skipped() { + let coin_seed = [1, 2, 3, 4].map(Felt::new); + let mut rng = RandomCoin::new(coin_seed.into()); + let key_pair = SecretKey::with_rng(&mut rng); + let mut account = create_account(key_pair.public_key(), 42, AccountStorageMode::Private, 4); + + let updated = update_benchmark_storage_map_entry(&mut account, 3, 9, 4); + + assert!(!updated); +} diff --git a/bin/stress-test/src/store/mod.rs b/bin/stress-test/src/store/mod.rs index 7c68b025e8..f31a18eb64 100644 --- a/bin/stress-test/src/store/mod.rs +++ b/bin/stress-test/src/store/mod.rs @@ -7,6 +7,7 @@ use miden_node_proto::generated::{self as proto}; use miden_node_store::state::State; use miden_node_utils::clap::StorageOptions; use miden_node_utils::tracing::grpc::OtelInterceptor; +use miden_protocol::Word; use miden_protocol::account::AccountId; use miden_protocol::note::{NoteDetails, NoteTag}; use miden_protocol::utils::serde::{Deserializable, Serializable}; @@ -34,6 +35,182 @@ const NOTE_IDS_PER_NULLIFIERS_CHECK: usize = 20; /// Number of attempts the benchmark will make to reach the store before proceeding. const STORE_STATUS_RETRIES: usize = 10; +// GET ACCOUNT +// ================================================================================================ + +/// Sends multiple `get_account` requests to the store and prints the performance. +/// +/// Each request asks for all entries in `storage_map_slot`, which is intended to exercise the +/// storage-map reconstruction path for public accounts seeded by this stress-test tool. +pub async fn bench_get_account( + data_directory: PathBuf, + iterations: usize, + concurrency: usize, + storage_map_slot: String, +) { + let accounts_file = data_directory.join(ACCOUNTS_FILENAME); + let accounts = fs::read_to_string(&accounts_file) + .await + .unwrap_or_else(|e| panic!("missing file {}: {e:?}", accounts_file.display())); + let mut account_ids: Vec = accounts + .lines() + .map(|a| AccountId::from_hex(a).expect("invalid account id")) + .filter(AccountId::has_public_state) + .collect(); + + assert!( + !account_ids.is_empty(), + "no public accounts found in {}; seed with --public-accounts-percentage > 0", + accounts_file.display() + ); + + let mut rng = rand::rng(); + account_ids.shuffle(&mut rng); + let mut account_ids = account_ids.into_iter().cycle(); + + let (store_client, _) = start_store(data_directory).await; + + wait_for_store(&store_client).await.unwrap(); + + let request = |_| { + let mut client = store_client.clone(); + let account_id = account_ids.next().expect("cycled public account ids never end"); + let storage_map_slot = storage_map_slot.clone(); + tokio::spawn(async move { get_account(&mut client, account_id, storage_map_slot).await }) + }; + + let results = stream::iter(0..iterations) + .map(request) + .buffer_unordered(concurrency) + .map(|res| res.unwrap()) + .collect::>() + .await; + + let timers_accumulator: Vec = results.iter().map(|r| r.duration).collect(); + print_summary(&timers_accumulator); + + let total_runs = results.len(); + let storage_map_limit_exceeded = + results.iter().filter(|r| r.storage_map_limit_exceeded).count(); + let vault_limit_exceeded = results.iter().filter(|r| r.vault_limit_exceeded).count(); + #[expect(clippy::cast_precision_loss)] + let average_storage_map_entries = if total_runs > 0 { + results.iter().map(|r| r.storage_map_entries as f64).sum::() / total_runs as f64 + } else { + 0.0 + }; + #[expect(clippy::cast_precision_loss)] + let average_vault_assets = if total_runs > 0 { + results.iter().map(|r| r.vault_assets as f64).sum::() / total_runs as f64 + } else { + 0.0 + }; + + println!("GetAccount statistics:"); + println!(" Total runs: {total_runs}"); + println!(" Storage map limit exceeded responses: {storage_map_limit_exceeded}"); + println!(" Average returned storage map entries: {average_storage_map_entries:.2}"); + println!(" Vault limit exceeded responses: {vault_limit_exceeded}"); + println!(" Average returned vault assets: {average_vault_assets:.2}"); +} + +#[derive(Clone)] +struct GetAccountRun { + duration: Duration, + storage_map_entries: usize, + storage_map_limit_exceeded: bool, + vault_assets: usize, + vault_limit_exceeded: bool, +} + +async fn get_account( + api_client: &mut RpcClient>, + account_id: AccountId, + storage_map_slot: String, +) -> GetAccountRun { + use proto::rpc::account_storage_details::account_storage_map_details::Entries; + + let request = get_account_request(account_id, storage_map_slot); + + let start = Instant::now(); + let response = api_client.get_account(request).await.unwrap().into_inner(); + let duration = start.elapsed(); + + let details = response.details; + let map_details = details + .as_ref() + .and_then(|details| details.storage_details.as_ref()) + .and_then(|storage_details| storage_details.map_details.first()); + let (storage_map_entries, storage_map_limit_exceeded) = match map_details { + Some(details) if details.too_many_entries => (0, true), + Some(details) => match &details.entries { + Some(Entries::AllEntries(entries)) => (entries.entries.len(), false), + _ => (0, false), + }, + None => (0, false), + }; + + let vault_details = details.and_then(|details| details.vault_details); + let (vault_assets, vault_limit_exceeded) = match vault_details { + Some(details) if details.too_many_assets => (0, true), + Some(details) => (details.assets.len(), false), + None => (0, false), + }; + + GetAccountRun { + duration, + storage_map_entries, + storage_map_limit_exceeded, + vault_assets, + vault_limit_exceeded, + } +} + +fn get_account_request( + account_id: AccountId, + storage_map_slot: String, +) -> proto::rpc::AccountRequest { + use proto::rpc::account_request::AccountDetailRequest; + use proto::rpc::account_request::account_detail_request::StorageMapDetailRequest; + use proto::rpc::account_request::account_detail_request::storage_map_detail_request::SlotData; + + proto::rpc::AccountRequest { + account_id: Some(proto::account::AccountId { id: account_id.to_bytes() }), + block_num: None, + details: Some(AccountDetailRequest { + code_commitment: None, + asset_vault_commitment: Some(proto::primitives::Digest::from(Word::empty())), + storage_maps: vec![StorageMapDetailRequest { + slot_name: storage_map_slot, + slot_data: Some(SlotData::AllEntries(true)), + }], + }), + } +} + +#[cfg(test)] +mod tests { + use miden_protocol::testing::account_id::ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE; + + use super::*; + + #[test] + fn get_account_request_includes_vault_details() { + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE) + .expect("test account id should be valid"); + let request = get_account_request( + account_id, + crate::seeding::BENCHMARK_STORAGE_MAP_SLOT_NAME.to_string(), + ); + + let details = request.details.expect("details should be requested"); + assert!( + details.asset_vault_commitment.is_some(), + "benchmark get-account should request vault asset details" + ); + } +} + // SYNC NOTES // ================================================================================================ diff --git a/crates/store/src/account_state_forest/mod.rs b/crates/store/src/account_state_forest/mod.rs index 0c9adceb3a..ca58c13871 100644 --- a/crates/store/src/account_state_forest/mod.rs +++ b/crates/store/src/account_state_forest/mod.rs @@ -1,9 +1,11 @@ use std::collections::BTreeSet; +use std::num::NonZeroUsize; use miden_crypto::hash::rpo::Rpo256; use miden_crypto::merkle::smt::ForestInMemoryBackend; use miden_node_proto::domain::account::{AccountStorageMapDetails, AccountVaultDetails}; use miden_node_utils::ErrorReport; +use miden_node_utils::lru_cache::LruCache; use miden_protocol::account::delta::{AccountDelta, AccountStorageDelta, AccountVaultDelta}; use miden_protocol::account::{ AccountId, @@ -37,6 +39,8 @@ pub use crate::db::models::queries::HISTORICAL_BLOCK_RETENTION; #[cfg(test)] mod tests; +const HASHED_STORAGE_MAP_KEY_CACHE_CAPACITY: usize = 65_536; + // ERRORS // ================================================================================================ @@ -63,16 +67,36 @@ pub enum WitnessError { // ACCOUNT STATE FOREST // ================================================================================================ +/// Result of retrieving storage map details for all entries in a storage map. +#[derive(Debug, PartialEq)] +pub enum AccountStorageMapResult { + NotFound, + CannotReconstructKeysFromCache, + Details(AccountStorageMapDetails), +} + /// Container for forest-related state that needs to be updated atomically. pub(crate) struct AccountStateForest { /// `LargeSmtForest` for efficient account storage reconstruction. /// Populated during block import with storage and vault SMTs. forest: LargeSmtForest, + + /// Reverse lookup from hashed SMT storage keys to raw storage map keys. + /// + /// Ideally this would be a mapping from `StorageMapKeyHash` to `StorageMapKey` but + /// unfortunately `StorageMapKeyHash` does not implement `Hash`. + storage_map_key_cache: LruCache, } impl AccountStateForest { pub(crate) fn new() -> Self { - Self { forest: Self::create_forest() } + Self { + forest: Self::create_forest(), + storage_map_key_cache: LruCache::new( + NonZeroUsize::new(HASHED_STORAGE_MAP_KEY_CACHE_CAPACITY) + .expect("storage map key cache capacity must be non-zero"), + ), + } } fn create_forest() -> LargeSmtForest { @@ -136,6 +160,24 @@ impl AccountStateForest { .collect() } + fn cache_storage_map_keys_from_delta(&mut self, delta: &AccountDelta) { + let raw_keys = delta + .storage() + .maps() + .flat_map(|(_slot_name, map_delta)| map_delta.entries().keys().copied()); + self.cache_storage_map_keys(raw_keys); + } + + pub(crate) fn cache_storage_map_keys(&self, raw_keys: impl IntoIterator) { + self.storage_map_key_cache + .put_many(raw_keys.into_iter().map(|raw_key| (raw_key.hash().into(), raw_key))); + } + + #[cfg(test)] + fn clear_storage_map_key_cache(&self) { + self.storage_map_key_cache.clear(); + } + fn apply_forest_updates( &mut self, lineage: LineageId, @@ -314,6 +356,84 @@ impl AccountStateForest { Some(proofs.map(|proofs| AccountStorageMapDetails::from_proofs(slot_name, proofs))) } + /// Enumerates a storage map as it is stored in the SMT. + /// + /// Storage map keys are hashed before insertion, so returned keys are hashed SMT keys rather + /// than the raw [`StorageMapKey`] values supplied by users. + /// + /// Returns `None` when no storage root is tracked for this account/slot/block combination. + /// Returns at most `limit` entries. + fn get_storage_map_entries( + &self, + account_id: AccountId, + slot_name: &StorageSlotName, + block_num: BlockNumber, + limit: usize, + ) -> Option, MerkleError>> { + let lineage = Self::storage_lineage_id(account_id, slot_name); + let tree = self.get_tree_id(lineage, block_num)?; + + Some( + self.forest + .entries(tree) + .map_err(Self::map_forest_error) + .map(|entries| entries.take(limit).map(|entry| (entry.key, entry.value)).collect()), + ) + } + + /// Returns all storage map entries when the forest and reverse-key cache contain enough data. + /// + /// Returns `AccountStorageMapResult::NotFound` when no storage root is tracked for this + /// account/slot/block combination. + /// Returns `AccountStorageMapResult::CannotReconstructKeysFromCache` when the forest has hashed + /// entries but at least one raw key is missing from the reverse-key cache, so the caller + /// should fall back to database reconstruction. + #[instrument(target = COMPONENT, skip_all)] + pub(crate) fn get_storage_map_details_for_all_entries( + &self, + account_id: AccountId, + slot_name: StorageSlotName, + block_num: BlockNumber, + ) -> Result { + let Some(hashed_entries) = self + .get_storage_map_entries( + account_id, + &slot_name, + block_num, + AccountStorageMapDetails::MAX_RETURN_ENTRIES + 1, + ) + .transpose()? + else { + return Ok(AccountStorageMapResult::NotFound); + }; + + if hashed_entries.len() > AccountStorageMapDetails::MAX_RETURN_ENTRIES { + return Ok(AccountStorageMapResult::Details(AccountStorageMapDetails { + slot_name, + entries: miden_node_proto::domain::account::StorageMapEntries::LimitExceeded, + })); + } + + let raw_keys = self + .storage_map_key_cache + .get_many(hashed_entries.iter().map(|(hashed_key, _)| hashed_key)); + if raw_keys.iter().any(Option::is_none) { + return Ok(AccountStorageMapResult::CannotReconstructKeysFromCache); + } + + let mut entries = raw_keys + .into_iter() + .flatten() + .zip(hashed_entries) + .map(|(raw_key, (_hashed_key, value))| (raw_key, value)) + .collect::>(); + entries.sort_by(|(key_a, _), (key_b, _)| key_a.cmp(key_b)); + + Ok(AccountStorageMapResult::Details(AccountStorageMapDetails::from_forest_entries( + slot_name, entries, + ))) + } + // PUBLIC INTERFACE // -------------------------------------------------------------------------------------------- @@ -387,6 +507,8 @@ impl AccountStateForest { self.update_account_storage(block_num, account_id, delta.storage()); } + self.cache_storage_map_keys_from_delta(delta); + Ok(()) } diff --git a/crates/store/src/account_state_forest/tests.rs b/crates/store/src/account_state_forest/tests.rs index 6d5dd7011c..fb252f09d6 100644 --- a/crates/store/src/account_state_forest/tests.rs +++ b/crates/store/src/account_state_forest/tests.rs @@ -661,6 +661,82 @@ fn storage_map_open_returns_proofs() { }); } +#[test] +fn storage_map_all_entries_returns_raw_keys_after_update() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + let mut forest = AccountStateForest::new(); + let account_id = dummy_account(); + let slot_name = StorageSlotName::mock(6); + let block_num = BlockNumber::GENESIS.child(); + let raw_key = StorageMapKey::from_index(42); + let value = Word::from([42u32, 0, 0, 0]); + + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(raw_key, value); + let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta))]); + let storage_delta = AccountStorageDelta::from_raw(raw); + let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta); + forest.update_account(block_num, &delta).unwrap(); + + let result = forest + .get_storage_map_details_for_all_entries(account_id, slot_name.clone(), block_num) + .expect("forest lookup should not fail"); + + assert_eq!( + result, + AccountStorageMapResult::Details(AccountStorageMapDetails::from_forest_entries( + slot_name, + vec![(raw_key, value)] + )) + ); +} + +#[test] +fn storage_map_all_entries_returns_cache_miss_when_raw_key_is_not_cached() { + use std::collections::BTreeMap; + + use miden_protocol::account::delta::{StorageMapDelta, StorageSlotDelta}; + + let mut forest = AccountStateForest::new(); + let account_id = dummy_account(); + let slot_name = StorageSlotName::mock(7); + let block_num = BlockNumber::GENESIS.child(); + let raw_key = StorageMapKey::from_index(43); + let value = Word::from([43u32, 0, 0, 0]); + + let mut map_delta = StorageMapDelta::default(); + map_delta.insert(raw_key, value); + let raw = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map_delta))]); + let storage_delta = AccountStorageDelta::from_raw(raw); + let delta = dummy_partial_delta(account_id, AccountVaultDelta::default(), storage_delta); + forest.update_account(block_num, &delta).unwrap(); + + forest.clear_storage_map_key_cache(); + + let result = forest + .get_storage_map_details_for_all_entries(account_id, slot_name.clone(), block_num) + .expect("forest lookup should not fail"); + + assert_eq!(result, AccountStorageMapResult::CannotReconstructKeysFromCache); + + forest.cache_storage_map_keys([raw_key]); + + let result = forest + .get_storage_map_details_for_all_entries(account_id, slot_name.clone(), block_num) + .expect("forest lookup should not fail"); + + assert_eq!( + result, + AccountStorageMapResult::Details(AccountStorageMapDetails::from_forest_entries( + slot_name, + vec![(raw_key, value)] + )) + ); +} + #[test] fn storage_map_key_hashing_and_raw_entries_are_consistent() { use std::collections::BTreeMap; diff --git a/crates/store/src/state/mod.rs b/crates/store/src/state/mod.rs index c53d7ad13e..11d9531ac3 100644 --- a/crates/store/src/state/mod.rs +++ b/crates/store/src/state/mod.rs @@ -18,12 +18,12 @@ use miden_node_proto::domain::account::{ AccountStorageMapDetails, AccountVaultDetails, SlotData, + StorageMapEntries, StorageMapRequest, }; use miden_node_proto::domain::batch::BatchInputs; use miden_node_utils::clap::StorageOptions; use miden_node_utils::formatting::format_array; -use miden_node_utils::limiter::{QueryParamLimiter, QueryParamStorageMapKeyTotalLimit}; use miden_protocol::Word; use miden_protocol::account::{AccountId, StorageMapKey, StorageMapWitness, StorageSlotName}; use miden_protocol::asset::{AssetVaultKey, AssetWitness}; @@ -37,7 +37,7 @@ use miden_protocol::transaction::PartialBlockchain; use tokio::sync::{Mutex, RwLock}; use tracing::{Instrument, info, instrument}; -use crate::account_state_forest::{AccountStateForest, WitnessError}; +use crate::account_state_forest::{AccountStateForest, AccountStorageMapResult, WitnessError}; use crate::accounts::AccountTreeWithHistory; use crate::blocks::BlockStore; use crate::db::models::Page; @@ -686,6 +686,68 @@ impl State { Ok((block_num, witness)) } + /// Returns storage map details from the forest for a specific account and storage slot. + /// + /// The forest can only be used if all hashed keys in the storage map are known in the + /// reverse-key LRU cache. If any hashed key is unknown, the method returns `Ok(None)` to signal + /// that the caller should fall back to reconstructing the storage map details from the + /// database. + #[instrument(target = COMPONENT, skip_all)] + async fn get_storage_map_details_from_forest( + &self, + account_id: AccountId, + slot_name: StorageSlotName, + block_num: BlockNumber, + ) -> Result, DatabaseError> { + let forest_guard = self + .forest + .read() + .instrument(tracing::info_span!("acquire_forest_for_storage_map_entries")) + .await; + match forest_guard + .get_storage_map_details_for_all_entries(account_id, slot_name.clone(), block_num) + .map_err(DatabaseError::MerkleError)? + { + AccountStorageMapResult::NotFound => Err(DatabaseError::StorageRootNotFound { + account_id, + slot_name: slot_name.to_string(), + block_num, + }), + AccountStorageMapResult::Details(details) => Ok(Some(details)), + AccountStorageMapResult::CannotReconstructKeysFromCache => Ok(None), + } + } + + /// Returns storage map details by reconstructing the storage map from the database. + /// + /// This is used as a fallback when the forest cannot be used, which happens when there are + /// hashed keys in the storage map that are not known in the reverse-key LRU cache. + async fn reconstruct_storage_map_details_from_db( + &self, + account_id: AccountId, + slot_name: StorageSlotName, + block_num: BlockNumber, + ) -> Result { + let details = self + .db + .reconstruct_storage_map_from_db( + account_id, + slot_name, + block_num, + Some(AccountStorageMapDetails::MAX_RETURN_ENTRIES), + ) + .await?; + + if let StorageMapEntries::AllEntries(entries) = &details.entries { + self.forest + .write() + .await + .cache_storage_map_keys(entries.iter().map(|(raw_key, _)| *raw_key)); + } + + Ok(details) + } + /// Fetches the account details (code, vault, storage) for a public account at the specified /// block. /// @@ -694,7 +756,8 @@ impl State { /// /// For specific key queries (`SlotData::MapKeys`), the forest is used to provide SMT proofs. /// Returns an error if the forest doesn't have data for the requested slot. - /// All-entries queries (`SlotData::All`) use the forest to request all entries database. + /// All-entries queries (`SlotData::All`) use the forest when all hashed keys are known in the + /// reverse-key LRU cache, otherwise they fall back to database reconstruction. #[expect(clippy::too_many_lines)] #[instrument(target = COMPONENT, skip_all)] async fn fetch_public_account_details( @@ -804,22 +867,17 @@ impl State { } } - // TODO parallelize the read requests for (index, slot_name) in all_entries_requests { - let details = self - .db - .reconstruct_storage_map_from_db( - account_id, - slot_name.clone(), - block_num, - Some( - // TODO unify this with - // `AccountStorageMapDetails::MAX_RETURN_ENTRIES` - // and accumulated the limits - ::LIMIT, - ), - ) - .await?; + let details = match self + .get_storage_map_details_from_forest(account_id, slot_name.clone(), block_num) + .await? + { + Some(details) => details, + None => { + self.reconstruct_storage_map_details_from_db(account_id, slot_name, block_num) + .await? + }, + }; storage_map_details_by_index[index] = Some(details); } diff --git a/crates/utils/src/lru_cache.rs b/crates/utils/src/lru_cache.rs index a2013da5bc..6141ec663f 100644 --- a/crates/utils/src/lru_cache.rs +++ b/crates/utils/src/lru_cache.rs @@ -30,6 +30,28 @@ where self.lock().put(key, value); } + /// Retrieves multiple values from the cache while holding the cache lock once. + pub fn get_many<'a>(&self, keys: impl IntoIterator) -> Vec> + where + K: 'a, + { + let mut cache = self.lock(); + keys.into_iter().map(|key| cache.get(key).cloned()).collect() + } + + /// Puts multiple values into the cache while holding the cache lock once. + pub fn put_many(&self, entries: impl IntoIterator) { + let mut cache = self.lock(); + for (key, value) in entries { + cache.put(key, value); + } + } + + /// Clears all entries from the cache. + pub fn clear(&self) { + self.lock().clear(); + } + #[instrument(name = "lru.lock", skip_all)] fn lock(&self) -> MutexGuard<'_, InnerCache> { // SAFETY: The mutex is only held for the duration of the get/put operation From 255d8638445ac8bccbc201d4ca64ed95a34f9b06 Mon Sep 17 00:00:00 2001 From: KOVACS Krisztian Date: Mon, 4 May 2026 08:22:45 +0200 Subject: [PATCH 18/28] refactor(ntx-builder): use sync mutexes where possible (#2021) --- crates/ntx-builder/src/actor/execute.rs | 132 ++++++++++++++++-------- crates/ntx-builder/src/actor/mod.rs | 17 ++- crates/ntx-builder/src/builder.rs | 47 ++------- crates/ntx-builder/src/chain_state.rs | 58 ++++++++++- crates/ntx-builder/src/lib.rs | 8 +- 5 files changed, 164 insertions(+), 98 deletions(-) diff --git a/crates/ntx-builder/src/actor/execute.rs b/crates/ntx-builder/src/actor/execute.rs index 84a357a786..2888e1b774 100644 --- a/crates/ntx-builder/src/actor/execute.rs +++ b/crates/ntx-builder/src/actor/execute.rs @@ -1,5 +1,5 @@ use std::collections::{BTreeMap, BTreeSet}; -use std::sync::Arc; +use std::sync::{Arc, Mutex}; use miden_node_utils::ErrorReport; use miden_node_utils::lru_cache::LruCache; @@ -48,7 +48,6 @@ use miden_tx::{ TransactionMastStore, TransactionProverError, }; -use tokio::sync::Mutex; use tracing::{Instrument, instrument}; use crate::COMPONENT; @@ -213,7 +212,7 @@ impl NtxContext { let executed_tx = Box::pin(self.execute(&data_store, successful_notes)).await?; // Collect scripts fetched from the remote store during execution. - let scripts_to_cache = data_store.take_fetched_scripts().await; + let scripts_to_cache = data_store.take_fetched_scripts(); // Prove transaction. let tx_inputs: TransactionInputs = executed_tx.into(); @@ -374,29 +373,9 @@ struct NtxDataStore { db: Db, /// Scripts fetched from the remote store during execution, to be persisted by the /// coordinator. - fetched_scripts: Arc>>, + fetched_scripts: Arc, /// Mapping of storage map roots to storage slot names observed during various calls. - /// - /// The registered slot names are subsequently used to retrieve storage map witnesses from the - /// store. We need this because the store interface (and the underling SMT forest) use storage - /// slot names, but the `DataStore` interface works with tree roots. To get around this problem - /// we populate this map when: - /// - The the native account is loaded (in `get_transaction_inputs()`). - /// - When a foreign account is loaded (in `get_foreign_account_inputs`). - /// - /// The assumption here are: - /// - Once an account is loaded, the mapping between `(account_id, map_root)` and slot names do - /// not change. This is always the case. - /// - New storage slots created during transaction execution will not be accesses in the same - /// transaction. The mechanism for adding new storage slots is not implemented yet, but the - /// plan for it is consistent with this assumption. - /// - /// One nuance worth mentioning: it is possible that there could be a root collision where an - /// account has two storage maps with the same root. In this case, the map will contain only a - /// single entry with the storage slot name that was added last. Thus, technically, requests - /// to the store could be "wrong", but given that two identical maps have identical witnesses - /// this does not cause issues in practice. - storage_slots: Arc>>, + storage_slots: Arc, } impl NtxDataStore { @@ -420,30 +399,25 @@ impl NtxDataStore { store, script_cache, db, - fetched_scripts: Arc::new(Mutex::new(Vec::new())), - storage_slots: Arc::new(Mutex::new(BTreeMap::default())), + fetched_scripts: Arc::new(FetchedNoteScripts::new()), + storage_slots: Arc::new(StorageSlotRegistry::new()), } } /// Returns the list of note scripts fetched from the remote store during execution. - async fn take_fetched_scripts(&self) -> Vec<(Word, NoteScript)> { - self.fetched_scripts.lock().await.drain(..).collect() + fn take_fetched_scripts(&self) -> Vec<(Word, NoteScript)> { + self.fetched_scripts.take_all() } /// Registers storage map slot names for the given account ID and storage header. /// /// These slot names are subsequently used to query for storage map witnesses against the store. - async fn register_storage_map_slots( + fn register_storage_map_slots( &self, account_id: AccountId, storage_header: &AccountStorageHeader, ) { - let mut storage_slots = self.storage_slots.lock().await; - for slot_header in storage_header.slots() { - if let StorageSlotType::Map = slot_header.slot_type() { - storage_slots.insert((account_id, slot_header.value()), slot_header.name().clone()); - } - } + self.storage_slots.register_slots(account_id, storage_header); } } @@ -467,8 +441,7 @@ impl DataStore for NtxDataStore { } // Register slot names from the native account for later use. - self.register_storage_map_slots(account_id, &self.account.storage().to_header()) - .await; + self.register_storage_map_slots(account_id, &self.account.storage().to_header()); let partial_account = PartialAccount::from(&self.account); Ok((partial_account, self.reference_block.clone(), (*self.chain_mmr).clone())) @@ -494,8 +467,7 @@ impl DataStore for NtxDataStore { self.mast_store.load_account_code(account_inputs.code()); // Register slot names from the foreign account for later use. - self.register_storage_map_slots(foreign_account_id, account_inputs.storage().header()) - .await; + self.register_storage_map_slots(foreign_account_id, account_inputs.storage().header()); Ok(account_inputs) } @@ -532,8 +504,7 @@ impl DataStore for NtxDataStore { async move { // The slot name that corresponds to the given account ID and map root must have been // registered during previous calls of this data store. - let storage_slots = self.storage_slots.lock().await; - let Some(slot_name) = storage_slots.get(&(account_id, map_root)) else { + let Some(slot_name) = self.storage_slots.get_slot_name(account_id, map_root) else { return Err(DataStoreError::other( "requested storage slot has not been registered", )); @@ -589,7 +560,7 @@ impl DataStore for NtxDataStore { if let Some(script) = maybe_script { // Collect for later persistence by the coordinator. - self.fetched_scripts.lock().await.push((script_root, script.clone())); + self.fetched_scripts.add(script_root, script.clone()); self.script_cache.put(script_root, script.clone()); Ok(Some(script)) } else { @@ -607,3 +578,78 @@ impl MastForestStore for NtxDataStore { self.mast_store.get(procedure_hash) } } + +// HELPERS +// ================================================================================================ + +/// Scripts fetched from the remote store during execution, to be persisted by the +/// coordinator. +/// +/// The API guarantees that the mutex is never held across an await point. +struct FetchedNoteScripts { + scripts: Mutex>, +} + +impl FetchedNoteScripts { + fn new() -> Self { + Self { scripts: Mutex::new(Vec::new()) } + } + + fn add(&self, script_root: Word, script: NoteScript) { + self.scripts + .lock() + .expect("Note scripts mutex is poisoned") + .push((script_root, script)); + } + + fn take_all(&self) -> Vec<(Word, NoteScript)> { + self.scripts.lock().expect("Note scripts mutex is poisoned").drain(..).collect() + } +} + +/// Mapping of storage map roots to storage slot names observed during various calls. +/// +/// The registered slot names are subsequently used to retrieve storage map witnesses from the +/// store. We need this because the store interface (and the underling SMT forest) use storage +/// slot names, but the `DataStore` interface works with tree roots. To get around this problem +/// we populate this map when: +/// - The the native account is loaded (in `get_transaction_inputs()`). +/// - When a foreign account is loaded (in `get_foreign_account_inputs`). +/// +/// The assumption here are: +/// - Once an account is loaded, the mapping between `(account_id, map_root)` and slot names do not +/// change. This is always the case. +/// - New storage slots created during transaction execution will not be accesses in the same +/// transaction. The mechanism for adding new storage slots is not implemented yet, but the plan +/// for it is consistent with this assumption. +/// +/// One nuance worth mentioning: it is possible that there could be a root collision where an +/// account has two storage maps with the same root. In this case, the map will contain only a +/// single entry with the storage slot name that was added last. Thus, technically, requests +/// to the store could be "wrong", but given that two identical maps have identical witnesses +/// this does not cause issues in practice. +/// +/// The API guarantees that the mutex is never held across an await point. +struct StorageSlotRegistry { + slots: Mutex>, +} + +impl StorageSlotRegistry { + fn new() -> Self { + Self { slots: Mutex::new(BTreeMap::default()) } + } + + fn register_slots(&self, account_id: AccountId, storage_header: &AccountStorageHeader) { + let mut slots = self.slots.lock().expect("Storage slot registry mutex is poisoned"); + for slot_header in storage_header.slots() { + if let StorageSlotType::Map = slot_header.slot_type() { + slots.insert((account_id, slot_header.value()), slot_header.name().clone()); + } + } + } + + fn get_slot_name(&self, account_id: AccountId, map_root: Word) -> Option { + let slots = self.slots.lock().expect("Storage slot registry mutex is poisoned"); + slots.get(&(account_id, map_root)).cloned() + } +} diff --git a/crates/ntx-builder/src/actor/mod.rs b/crates/ntx-builder/src/actor/mod.rs index 983d14e02e..fa49ccccfe 100644 --- a/crates/ntx-builder/src/actor/mod.rs +++ b/crates/ntx-builder/src/actor/mod.rs @@ -18,11 +18,11 @@ use miden_protocol::note::{NoteScript, Nullifier}; use miden_protocol::transaction::TransactionId; use miden_remote_prover_client::RemoteTransactionProver; use miden_tx::FailedNote; -use tokio::sync::{Notify, RwLock, Semaphore, mpsc}; +use tokio::sync::{Notify, Semaphore, mpsc}; use tokio_util::sync::CancellationToken; use crate::NoteError; -use crate::chain_state::ChainState; +use crate::chain_state::{ChainState, SharedChainState}; use crate::clients::{BlockProducerClient, StoreClient, ValidatorClient}; use crate::db::Db; @@ -61,7 +61,7 @@ pub struct AccountActorContext { pub prover: Option, /// The latest chain state that account all actors can rely on. A single chain state is shared /// among all actors. - pub chain_state: Arc>, + pub chain_state: Arc, /// Shared LRU cache for storing retrieved note scripts to avoid repeated store calls. /// This cache is shared across all account actors to maximize cache efficiency. pub script_cache: LruCache, @@ -87,17 +87,16 @@ impl AccountActorContext { /// but this is sufficient for testing coordinator logic (registry, deactivation, etc.). pub fn test(db: &crate::db::Db) -> Self { use miden_protocol::crypto::merkle::mmr::{Forest, MmrPeaks, PartialMmr}; - use tokio::sync::RwLock; use url::Url; - use crate::chain_state::ChainState; + use crate::chain_state::SharedChainState; use crate::clients::StoreClient; use crate::test_utils::mock_block_header; let url = Url::parse("http://127.0.0.1:1").unwrap(); let block_header = mock_block_header(0_u32.into()); let chain_mmr = PartialMmr::from_peaks(MmrPeaks::new(Forest::new(0), vec![]).unwrap()); - let chain_state = Arc::new(RwLock::new(ChainState::new(block_header, chain_mmr))); + let chain_state = Arc::new(SharedChainState::new(block_header, chain_mmr)); let (request_tx, _request_rx) = mpsc::channel(1); Self { @@ -210,7 +209,7 @@ pub struct AccountActor { block_producer: BlockProducerClient, validator: ValidatorClient, prover: Option, - chain_state: Arc>, + chain_state: Arc, script_cache: LruCache, /// Maximum number of notes per transaction. max_notes_per_tx: NonZeroUsize, @@ -262,7 +261,7 @@ impl AccountActor { let account_id = self.origin.id(); // Determine initial mode by checking DB for available notes. - let block_num = self.chain_state.read().await.chain_tip_header.block_num(); + let block_num = self.chain_state.chain_tip_block_number(); let has_notes = self .db .has_available_notes(account_id, block_num, self.max_note_attempts) @@ -319,7 +318,7 @@ impl AccountActor { let _permit = permit.context("semaphore closed")?; // Read the chain state. - let chain_state = self.chain_state.read().await.clone(); + let chain_state = self.chain_state.get_cloned(); // Query DB for latest account and available notes. let tx_candidate = self.select_candidate_from_db( diff --git a/crates/ntx-builder/src/builder.rs b/crates/ntx-builder/src/builder.rs index 3e3581cbe8..b9b962d8e1 100644 --- a/crates/ntx-builder/src/builder.rs +++ b/crates/ntx-builder/src/builder.rs @@ -6,16 +6,15 @@ use futures::Stream; use miden_node_proto::domain::account::NetworkAccountId; use miden_node_proto::domain::mempool::MempoolEvent; use miden_protocol::account::delta::AccountUpdateDetails; -use miden_protocol::block::BlockHeader; use tokio::net::TcpListener; -use tokio::sync::{RwLock, mpsc}; +use tokio::sync::mpsc; use tokio::task::JoinSet; use tokio_stream::StreamExt; use tonic::Status; use crate::NtxBuilderConfig; use crate::actor::{AccountActorContext, AccountOrigin, ActorRequest}; -use crate::chain_state::ChainState; +use crate::chain_state::SharedChainState; use crate::clients::StoreClient; use crate::coordinator::Coordinator; use crate::db::Db; @@ -51,7 +50,7 @@ pub struct NetworkTransactionBuilder { /// Database for persistent state. db: Db, /// Shared chain state updated by the event loop and read by actors. - chain_state: Arc>, + chain_state: Arc, /// Context shared with all account actors. actor_context: AccountActorContext, /// Stream of mempool events from the block producer. @@ -70,7 +69,7 @@ impl NetworkTransactionBuilder { coordinator: Coordinator, store: StoreClient, db: Db, - chain_state: Arc>, + chain_state: Arc, actor_context: AccountActorContext, mempool_events: MempoolEventStream, actor_request_rx: mpsc::Receiver, @@ -197,7 +196,7 @@ impl NetworkTransactionBuilder { .context("failed to load account from store")? .context("account should exist in store")?; - let block_num = self.chain_state.read().await.chain_tip_header.block_num(); + let block_num = self.chain_state.chain_tip_block_number(); let notes = self .store .get_unconsumed_network_notes(account_id, block_num.as_u32()) @@ -253,7 +252,8 @@ impl NetworkTransactionBuilder { .await .context("failed to write BlockCommitted to DB")?; - self.update_chain_tip(header.as_ref().clone()).await; + self.chain_state + .update_chain_tip(header.as_ref().clone(), self.config.max_block_count); self.coordinator.notify_accounts(&result.accounts_to_notify); Ok(()) }, @@ -292,37 +292,4 @@ impl NetworkTransactionBuilder { } Ok(()) } - - /// Updates the chain tip and prunes old blocks from the MMR. - async fn update_chain_tip(&mut self, tip: BlockHeader) { - let mut chain_state = self.chain_state.write().await; - - // Skip blocks already reflected in the chain state. A `BlockCommitted` event may arrive - // for a block whose state was already loaded from the store during startup: the mempool - // subscription is established first and then the chain tip is fetched, so any block - // committed in that window produces an event for state we have already ingested. - if tip.block_num() <= chain_state.chain_tip_header.block_num() { - tracing::debug!( - event_block = %tip.block_num(), - current_tip = %chain_state.chain_tip_header.block_num(), - "skipping BlockCommitted event for block already in chain state", - ); - return; - } - - // Update MMR which lags by one block. - let mmr_tip = chain_state.chain_tip_header.clone(); - Arc::make_mut(&mut chain_state.chain_mmr).add_block(&mmr_tip, true); - - // Set the new tip. - chain_state.chain_tip_header = tip; - - // Keep MMR pruned. - let pruned_block_height = (chain_state - .chain_mmr - .chain_length() - .as_usize() - .saturating_sub(self.config.max_block_count)) as u32; - Arc::make_mut(&mut chain_state.chain_mmr).prune_to(..pruned_block_height.into()); - } } diff --git a/crates/ntx-builder/src/chain_state.rs b/crates/ntx-builder/src/chain_state.rs index 287c0ba291..12d5b79c57 100644 --- a/crates/ntx-builder/src/chain_state.rs +++ b/crates/ntx-builder/src/chain_state.rs @@ -1,6 +1,6 @@ -use std::sync::Arc; +use std::sync::{Arc, RwLock}; -use miden_protocol::block::BlockHeader; +use miden_protocol::block::{BlockHeader, BlockNumber}; use miden_protocol::crypto::merkle::mmr::PartialMmr; use miden_protocol::transaction::PartialBlockchain; @@ -46,4 +46,58 @@ impl ChainState { pub fn into_parts(self) -> (BlockHeader, Arc) { (self.chain_tip_header, self.chain_mmr) } + + /// Updates the chain tip and prunes old blocks from the MMR. + fn update_chain_tip(&mut self, tip: BlockHeader, max_block_count: usize) { + // Skip blocks already reflected in the chain state. A `BlockCommitted` event may arrive + // for a block whose state was already loaded from the store during startup: the mempool + // subscription is established first and then the chain tip is fetched, so any block + // committed in that window produces an event for state we have already ingested. + if tip.block_num() <= self.chain_tip_header.block_num() { + tracing::debug!( + event_block = %tip.block_num(), + current_tip = %self.chain_tip_header.block_num(), + "skipping BlockCommitted event for block already in chain state", + ); + return; + } + + // Update MMR which lags by one block. + let mmr_tip = self.chain_tip_header.clone(); + Arc::make_mut(&mut self.chain_mmr).add_block(&mmr_tip, true); + + // Set the new tip. + self.chain_tip_header = tip; + + // Keep MMR pruned. + let pruned_block_height = + (self.chain_mmr.chain_length().as_usize().saturating_sub(max_block_count)) as u32; + Arc::make_mut(&mut self.chain_mmr).prune_to(..pruned_block_height.into()); + } +} + +/// A thread-safe wrapper around [`ChainState`] that can be shared across multiple actors. +/// +/// The API guarantees that the lock cannot be held across await points. +pub struct SharedChainState(RwLock); + +impl SharedChainState { + pub fn new(chain_tip_header: BlockHeader, chain_mmr: PartialMmr) -> Self { + Self(RwLock::new(ChainState::new(chain_tip_header, chain_mmr))) + } + + pub(crate) fn chain_tip_block_number(&self) -> BlockNumber { + self.0.read().expect("chain state lock poisoned").chain_tip_header.block_num() + } + + pub(crate) fn update_chain_tip(&self, tip: BlockHeader, max_block_count: usize) { + self.0 + .write() + .expect("chain state lock poisoned") + .update_chain_tip(tip, max_block_count); + } + + pub(crate) fn get_cloned(&self) -> ChainState { + self.0.read().expect("chain state lock poisoned").clone() + } } diff --git a/crates/ntx-builder/src/lib.rs b/crates/ntx-builder/src/lib.rs index fed307ed6c..4eb8c6386d 100644 --- a/crates/ntx-builder/src/lib.rs +++ b/crates/ntx-builder/src/lib.rs @@ -6,7 +6,7 @@ use std::time::Duration; use actor::AccountActorContext; use anyhow::Context; use builder::MempoolEventStream; -use chain_state::ChainState; +use chain_state::SharedChainState; use clients::{BlockProducerClient, StoreClient, ValidatorClient}; use coordinator::Coordinator; use db::Db; @@ -14,7 +14,7 @@ use futures::TryStreamExt; use miden_node_utils::ErrorReport; use miden_node_utils::lru_cache::LruCache; use miden_remote_prover_client::RemoteTransactionProver; -use tokio::sync::{RwLock, mpsc}; +use tokio::sync::mpsc; use url::Url; pub(crate) type NoteError = Arc; @@ -290,7 +290,7 @@ impl NtxBuilderConfig { .await .context("failed to upsert chain state")?; - let chain_state = Arc::new(RwLock::new(ChainState::new(chain_tip_header, chain_mmr))); + let chain_state = Arc::new(SharedChainState::new(chain_tip_header, chain_mmr)); let (request_tx, actor_request_rx) = mpsc::channel(1); @@ -298,7 +298,7 @@ impl NtxBuilderConfig { block_producer: block_producer.clone(), validator, prover, - chain_state: chain_state.clone(), + chain_state: Arc::clone(&chain_state), store: store.clone(), script_cache, max_notes_per_tx: self.max_notes_per_tx, From 513660bc87ec3c872e5a43e8692afe82258246d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Garillot?= <4142+huitseeker@users.noreply.github.com> Date: Thu, 30 Apr 2026 19:56:59 -0400 Subject: [PATCH 19/28] ci: add shear, workspace inheritance, and zizmor checks, and move heavy GitHub Actions jobs to WarpBuild (#2013) --- .github/actionlint.yaml | 4 + .github/actions/debian/action.yml | 133 +++++++++++--------- .github/workflows/book.yml | 26 ++-- .github/workflows/build-docker.yml | 4 +- .github/workflows/build-docs.yml | 6 +- .github/workflows/changelog.yml | 5 +- .github/workflows/ci.yml | 107 +++++++++++----- .github/workflows/cleanup-workflows.yml | 39 ++++-- .github/workflows/nightly.yml | 41 ++++-- .github/workflows/publish-crates.yml | 12 +- .github/workflows/publish-debian-all.yml | 18 ++- .github/workflows/publish-debian.yml | 8 +- .github/workflows/publish-docker.yml | 11 +- .github/workflows/trigger-deploy-docs.yml | 1 + .github/workflows/zizmor.yml | 39 ++++++ Cargo.lock | 17 --- Cargo.toml | 14 ++- Makefile | 12 +- bin/genesis/Cargo.toml | 1 - bin/network-monitor/Cargo.toml | 4 +- bin/node/Dockerfile | 4 +- bin/remote-prover/Cargo.toml | 29 ++--- bin/remote-prover/src/server/mod.rs | 5 +- bin/stress-test/Cargo.toml | 4 +- crates/block-producer/Cargo.toml | 28 ++--- crates/grpc-error-macro/Cargo.toml | 6 +- crates/large-smt-backend-rocksdb/Cargo.toml | 2 +- crates/ntx-builder/Cargo.toml | 19 ++- crates/proto/Cargo.toml | 7 +- crates/remote-prover-client/Cargo.toml | 12 +- crates/rocksdb-cxx-linkage-fix/Cargo.toml | 4 +- crates/rpc/Cargo.toml | 7 +- crates/rpc/src/tests.rs | 23 +++- crates/store/Cargo.toml | 17 +-- crates/store/src/db/query_plan/mod.rs | 73 ----------- crates/store/src/db/query_plan/renderer.rs | 49 -------- crates/test-macro/Cargo.toml | 6 +- crates/utils/Cargo.toml | 5 +- crates/validator/Cargo.toml | 3 + proto/Cargo.toml | 6 +- zizmor.yml | 4 + 41 files changed, 435 insertions(+), 380 deletions(-) create mode 100644 .github/actionlint.yaml create mode 100644 .github/workflows/zizmor.yml delete mode 100644 crates/store/src/db/query_plan/mod.rs delete mode 100644 crates/store/src/db/query_plan/renderer.rs create mode 100644 zizmor.yml diff --git a/.github/actionlint.yaml b/.github/actionlint.yaml new file mode 100644 index 0000000000..e69e33a7ac --- /dev/null +++ b/.github/actionlint.yaml @@ -0,0 +1,4 @@ +self-hosted-runner: + labels: + - Linux-ARM64-Runner + - warp-ubuntu-latest-x64-8x diff --git a/.github/actions/debian/action.yml b/.github/actions/debian/action.yml index 99d65d069c..2b52257f9e 100644 --- a/.github/actions/debian/action.yml +++ b/.github/actions/debian/action.yml @@ -10,46 +10,24 @@ inputs: arch: required: true description: Machine architecture to build packages for. - type: choice - options: - - amd64 - - arm64 crate: required: true description: Name of binary crate being packaged. - type: choice - options: - - miden-node - - miden-remote-prover crate_dir: required: true description: Name of crate being packaged. - type: choice - options: - - miden-node - - miden-remote-prover package: required: true description: The Debian package name. - type: choice - options: - - miden-node - - miden-prover - - miden-prover-proxy packaging_dir: required: true description: Name of packaging directory. - type: choice - options: - - node - - prover - - prover-proxy runs: using: "composite" steps: - name: Rust cache - uses: Swatinem/rust-cache@v2 + uses: Swatinem/rust-cache@e18b497796c12c097a38f9edb9d0641fb99eee32 # v2 with: # Only update the cache on push onto the next branch. This strikes a nice balance between # cache hits and cache evictions (github has a 10GB cache limit). @@ -63,47 +41,61 @@ runs: - name: Identify target git SHA id: git-sha shell: bash + env: + INPUT_GITREF: ${{ inputs.gitref }} run: | - if git show-ref -q --verify "refs/remotes/origin/${{ inputs.gitref }}" 2>/dev/null; then - echo "sha=$(git show-ref --hash --verify 'refs/remotes/origin/${{ inputs.gitref }}')" >> $GITHUB_OUTPUT - elif git show-ref -q --verify "refs/tags/${{ inputs.gitref }}" 2>/dev/null; then - echo "sha=$(git show-ref --hash --verify 'refs/tags/${{ inputs.gitref }}')" >> $GITHUB_OUTPUT - elif git rev-parse --verify "${{ inputs.gitref }}^{commit}" >/dev/null 2>&1; then - echo "sha=$(git rev-parse --verify '${{ inputs.gitref }}^{commit}')" >> $GITHUB_OUTPUT + if git show-ref -q --verify "refs/remotes/origin/${INPUT_GITREF}" 2>/dev/null; then + ref="refs/remotes/origin/${INPUT_GITREF}" + elif git show-ref -q --verify "refs/tags/${INPUT_GITREF}" 2>/dev/null; then + ref="refs/tags/${INPUT_GITREF}" + elif git rev-parse --verify "${INPUT_GITREF}^{commit}" >/dev/null 2>&1; then + ref="${INPUT_GITREF}" else echo "::error::Unknown git reference type" exit 1 fi + sha=$(git rev-parse --verify "${ref}^{commit}") + echo "sha=${sha}" >> "$GITHUB_OUTPUT" + - name: Create package directories shell: bash + env: + INPUT_PACKAGE: ${{ inputs.package }} run: | - pkg=${{ inputs.package }} + pkg="${INPUT_PACKAGE}" mkdir -p \ - packaging/deb/$pkg/DEBIAN \ - packaging/deb/$pkg/usr/bin \ - packaging/deb/$pkg/lib/systemd/system \ - packaging/deb/$pkg/opt/$pkg \ - done + "packaging/deb/${pkg}/DEBIAN" \ + "packaging/deb/${pkg}/usr/bin" \ + "packaging/deb/${pkg}/lib/systemd/system" \ + "packaging/deb/${pkg}/opt/${pkg}" - name: Copy package install scripts shell: bash + env: + INPUT_CRATE_DIR: ${{ inputs.crate_dir }} + INPUT_PACKAGE: ${{ inputs.package }} + INPUT_PACKAGING_DIR: ${{ inputs.packaging_dir }} + TARGET_SHA: ${{ steps.git-sha.outputs.sha }} run: | - pkg=${{ inputs.package }} - pkg_dir=${{ inputs.packaging_dir }} - crate=${{ inputs.crate_dir }} - git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg_dir/postinst > packaging/deb/$pkg/DEBIAN/postinst - git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg_dir/postrm > packaging/deb/$pkg/DEBIAN/postrm - for service_file in $(ls packaging/$pkg_dir/*.service | sed "s/.*miden/miden/g"); do - svc=$(echo $service_file | sed "s/.service//g") - git show ${{ steps.git-sha.outputs.sha }}:packaging/$pkg_dir/$service_file > packaging/deb/$pkg/lib/systemd/system/$service_file - git show ${{ steps.git-sha.outputs.sha }}:bin/$crate/.env > packaging/deb/$pkg/lib/systemd/system/$svc.env - done - chmod 0775 packaging/deb/$pkg/DEBIAN/postinst - chmod 0775 packaging/deb/$pkg/DEBIAN/postrm + pkg="${INPUT_PACKAGE}" + pkg_dir="${INPUT_PACKAGING_DIR}" + crate="${INPUT_CRATE_DIR}" + git show "${TARGET_SHA}:packaging/${pkg_dir}/postinst" > "packaging/deb/${pkg}/DEBIAN/postinst" + git show "${TARGET_SHA}:packaging/${pkg_dir}/postrm" > "packaging/deb/${pkg}/DEBIAN/postrm" + while IFS= read -r service_file; do + service_file="${service_file##*/}" + svc="${service_file%.service}" + git show "${TARGET_SHA}:packaging/${pkg_dir}/${service_file}" > "packaging/deb/${pkg}/lib/systemd/system/${service_file}" + git show "${TARGET_SHA}:bin/${crate}/.env" > "packaging/deb/${pkg}/lib/systemd/system/${svc}.env" + done < <(find "packaging/${pkg_dir}" -maxdepth 1 -name '*.service' -print) + chmod 0775 "packaging/deb/${pkg}/DEBIAN/postinst" + chmod 0775 "packaging/deb/${pkg}/DEBIAN/postrm" - name: Create control files shell: bash + env: + INPUT_PACKAGE: ${{ inputs.package }} run: | # Map the architecture to the format required by Debian. # i.e. arm64 and amd64 instead of aarch64 and x86_64. @@ -111,8 +103,8 @@ runs: # Control file's version field must be x.y.z format so strip the rest. version=$(git describe --tags --abbrev=0 | sed 's/[^0-9.]//g' ) - pkg=${{ inputs.package }} - cat > packaging/deb/$pkg/DEBIAN/control << EOF + pkg="${INPUT_PACKAGE}" + cat > "packaging/deb/${pkg}/DEBIAN/control" << EOF Package: $pkg Version: $version Section: base @@ -128,47 +120,66 @@ runs: - name: Build binaries shell: bash env: - repo-url: ${{ github.server_url }}/${{ github.repository }} + INPUT_CRATE: ${{ inputs.crate }} + REPO_URL: ${{ github.server_url }}/${{ github.repository }} + TARGET_SHA: ${{ steps.git-sha.outputs.sha }} run: | - cargo install ${{ inputs.crate }} --root . --locked --git ${{ env.repo-url }} --rev ${{ steps.git-sha.outputs.sha }} + cargo install "${INPUT_CRATE}" --root . --locked --git "${REPO_URL}" --rev "${TARGET_SHA}" - name: Copy binary files shell: bash + env: + INPUT_CRATE: ${{ inputs.crate }} + INPUT_PACKAGE: ${{ inputs.package }} run: | - pkg=${{ inputs.package }} - bin=${{ inputs.crate }} - cp -p ./bin/$bin packaging/deb/$pkg/usr/bin/ + pkg="${INPUT_PACKAGE}" + bin="${INPUT_CRATE}" + cp -p "./bin/${bin}" "packaging/deb/${pkg}/usr/bin/" - name: Build packages shell: bash + env: + INPUT_PACKAGE: ${{ inputs.package }} run: | - dpkg-deb --build --root-owner-group packaging/deb/${{ inputs.package }} + dpkg-deb --build --root-owner-group "packaging/deb/${INPUT_PACKAGE}" # Save the .deb files, delete the rest. mv packaging/deb/*.deb . rm -rf packaging - name: Package names + id: package-names shell: bash + env: + INPUT_ARCH: ${{ inputs.arch }} + INPUT_GITREF: ${{ inputs.gitref }} + INPUT_PACKAGE: ${{ inputs.package }} run: | - echo "package=${{ inputs.package }}-${{ inputs.gitref }}-${{ inputs.arch }}.deb" >> $GITHUB_ENV + echo "package=${INPUT_PACKAGE}-${INPUT_GITREF}-${INPUT_ARCH}.deb" >> "$GITHUB_OUTPUT" - name: Rename package files shell: bash + env: + INPUT_PACKAGE: ${{ inputs.package }} + PACKAGE_FILE: ${{ steps.package-names.outputs.package }} run: | - mv ${{ inputs.package}}.deb ${{ env.package }} + mv "${INPUT_PACKAGE}.deb" "${PACKAGE_FILE}" - name: shasum packages shell: bash + env: + PACKAGE_FILE: ${{ steps.package-names.outputs.package }} run: | - sha256sum ${{ env.package }} > ${{ env.package }}.checksum + sha256sum "${PACKAGE_FILE}" > "${PACKAGE_FILE}.checksum" - name: Publish packages shell: bash env: GH_TOKEN: ${{ inputs.github_token }} + INPUT_GITREF: ${{ inputs.gitref }} + PACKAGE_FILE: ${{ steps.package-names.outputs.package }} run: | - gh release upload ${{ inputs.gitref }} \ - ${{ env.package }} \ - ${{ env.package }}.checksum \ + gh release upload "${INPUT_GITREF}" \ + "${PACKAGE_FILE}" \ + "${PACKAGE_FILE}.checksum" \ --clobber diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml index 10e48c5f1d..de48a3fd49 100644 --- a/.github/workflows/book.yml +++ b/.github/workflows/book.yml @@ -11,16 +11,13 @@ name: book on: workflow_dispatch: pull_request: - path: ["docs/internal/**"] + paths: ["docs/internal/**"] push: branches: [next] - path: ["docs/internal/**"] + paths: ["docs/internal/**"] -# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages permissions: contents: read - pages: write - id-token: write # Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. # However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. @@ -34,13 +31,18 @@ jobs: # The documentation is uploaded as a github artifact IFF it is required for deployment i.e. on push into next. build: name: Build documentation + permissions: + contents: read + pages: write runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@main + - uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 + with: + persist-credentials: false # Installation from source takes a fair while, so we install the binaries directly instead. - name: Install mdbook and plugins - uses: taiki-e/install-action@v2 + uses: taiki-e/install-action@055f5df8c3f65ea01cd41e9dc855becd88953486 # v2.75.18 with: tool: mdbook@0.4, mdbook-linkcheck@0.7, mdbook-alerts@0.8, mdbook-katex@0.9 @@ -51,11 +53,11 @@ jobs: - name: Setup Pages if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' }} id: pages - uses: actions/configure-pages@v5 + uses: actions/configure-pages@983d7736d9b0ae728b81ab479565c72886d7745b # v5 - name: Upload book artifact if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' }} - uses: actions/upload-pages-artifact@v3 + uses: actions/upload-pages-artifact@56afc609e74202658d3ffba0e8f6dda462b719fa # v3 with: # We specify multiple [output] sections in our book.toml which causes mdbook to create separate folders for each. This moves the generated `html` into its own `html` subdirectory. path: ./docs/internal/book/html @@ -63,6 +65,10 @@ jobs: # Deployment job only runs on push to next. deploy: name: Deploy documentation + permissions: + contents: read + id-token: write + pages: write environment: name: github-pages url: ${{ steps.deployment.outputs.page_url }} @@ -72,4 +78,4 @@ jobs: steps: - name: Deploy to GitHub Pages id: deployment - uses: actions/deploy-pages@v4 + uses: actions/deploy-pages@d6db90164ac5ed86f2b6aed7e0febac5b3c0c03e # v4 diff --git a/.github/workflows/build-docker.yml b/.github/workflows/build-docker.yml index b259c23fd9..e2e2e15e84 100644 --- a/.github/workflows/build-docker.yml +++ b/.github/workflows/build-docker.yml @@ -15,10 +15,10 @@ jobs: runs-on: Linux-ARM64-Runner steps: - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3 - name: Build and push - uses: docker/build-push-action@v6 + uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6 with: push: false file: ./bin/node/Dockerfile diff --git a/.github/workflows/build-docs.yml b/.github/workflows/build-docs.yml index 56cc7795f4..105f1bc0e9 100644 --- a/.github/workflows/build-docs.yml +++ b/.github/workflows/build-docs.yml @@ -28,10 +28,12 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 + with: + persist-credentials: false - name: Setup Node.js - uses: actions/setup-node@v4 + uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4.4.0 with: node-version: "20" cache: "npm" diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml index be2667efd0..9a254f6a78 100644 --- a/.github/workflows/changelog.yml +++ b/.github/workflows/changelog.yml @@ -15,12 +15,13 @@ jobs: runs-on: ubuntu-24.04 steps: - name: Checkout code - uses: actions/checkout@main + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 with: fetch-depth: 0 + persist-credentials: false - name: Check for changes in changelog env: BASE_REF: ${{ github.event.pull_request.base.ref }} NO_CHANGELOG_LABEL: ${{ contains(github.event.pull_request.labels.*.name, 'no changelog') }} - run: ./scripts/check-changelog.sh "${{ inputs.changelog }}" + run: ./scripts/check-changelog.sh shell: bash diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 674d4ddd49..9cc7d2364b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -66,16 +66,16 @@ jobs: # Normal cargo build that saves either the persistent trunk cache or a # single per-run cache for downstream jobs to restore immediately. build: - runs-on: ubuntu-24.04 + runs-on: warp-ubuntu-latest-x64-8x steps: - - uses: actions/checkout@v6 - - name: Cleanup large tools for build space - uses: ./.github/actions/cleanup-runner + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + with: + persist-credentials: false - uses: ./.github/actions/install-rocksdb - uses: ./.github/actions/install-protobuf-compiler - name: Rustup run: rustup update --no-self-update - - uses: Swatinem/rust-cache@v2 + - uses: WarpBuilds/rust-cache@9d0cc3090d9c87de74ea67617b246e978735b1a1 # v2.9.1 with: shared-key: ${{ env.RUST_CACHE_SUFFIX }} prefix-key: ${{ env.RUST_CACHE_PREFIX }} @@ -119,17 +119,19 @@ jobs: exit 3 fi done - echo "Static linkage check passed for all of ${bin_targets[@]}" + printf "Static linkage check passed for all of %s\n" "${bin_targets[*]}" clippy: name: lint - clippy - runs-on: ubuntu-24.04 + runs-on: warp-ubuntu-latest-x64-8x needs: [build] steps: - - uses: actions/checkout@v6 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + with: + persist-credentials: false - name: Rustup run: rustup update --no-self-update - - uses: Swatinem/rust-cache@v2 + - uses: WarpBuilds/rust-cache@9d0cc3090d9c87de74ea67617b246e978735b1a1 # v2.9.1 with: shared-key: ${{ env.RUST_CACHE_SUFFIX }} prefix-key: ${{ env.RUST_CACHE_PREFIX }} @@ -139,17 +141,19 @@ jobs: run: cargo clippy --locked --all-targets --all-features --workspace -- -D warnings tests: - runs-on: ubuntu-24.04 + runs-on: warp-ubuntu-latest-x64-8x needs: [build] timeout-minutes: 30 steps: - - uses: actions/checkout@v6 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + with: + persist-credentials: false - name: Rustup run: rustup update --no-self-update - - uses: taiki-e/install-action@v2 + - uses: taiki-e/install-action@055f5df8c3f65ea01cd41e9dc855becd88953486 # v2.75.18 with: tool: nextest@0.9.122 - - uses: Swatinem/rust-cache@v2 + - uses: WarpBuilds/rust-cache@9d0cc3090d9c87de74ea67617b246e978735b1a1 # v2.9.1 with: shared-key: ${{ env.RUST_CACHE_SUFFIX }} prefix-key: ${{ env.RUST_CACHE_PREFIX }} @@ -164,14 +168,14 @@ jobs: doc: needs: [build] - runs-on: ubuntu-24.04 + runs-on: warp-ubuntu-latest-x64-8x steps: - - uses: actions/checkout@v6 - - name: Cleanup large tools for build space - uses: ./.github/actions/cleanup-runner + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + with: + persist-credentials: false - name: Rustup run: rustup update --no-self-update - - uses: Swatinem/rust-cache@v2 + - uses: WarpBuilds/rust-cache@9d0cc3090d9c87de74ea67617b246e978735b1a1 # v2.9.1 with: shared-key: ${{ env.RUST_CACHE_SUFFIX }} prefix-key: ${{ env.RUST_CACHE_PREFIX }} @@ -184,21 +188,23 @@ jobs: stress-test: name: stress test needs: [build] - runs-on: ubuntu-24.04 + runs-on: warp-ubuntu-latest-x64-8x timeout-minutes: 20 env: DATA_DIR: /tmp/store steps: - - uses: actions/checkout@v6 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + with: + persist-credentials: false - name: Rustup run: rustup update --no-self-update - - uses: Swatinem/rust-cache@v2 + - uses: WarpBuilds/rust-cache@9d0cc3090d9c87de74ea67617b246e978735b1a1 # v2.9.1 with: shared-key: ${{ env.RUST_CACHE_SUFFIX }} prefix-key: ${{ env.RUST_CACHE_PREFIX }} cache-workspace-crates: true save-if: false - - uses: taiki-e/install-action@v2 + - uses: taiki-e/install-action@055f5df8c3f65ea01cd41e9dc855becd88953486 # v2.75.18 with: tool: nextest@0.9.122 - name: Build @@ -271,9 +277,11 @@ jobs: # quick so we don't need a separate cache here. client-wasm: name: wasm targets - runs-on: ubuntu-24.04 + runs-on: warp-ubuntu-latest-x64-8x steps: - - uses: actions/checkout@v6 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + with: + persist-credentials: false - name: Rustup run: rustup update --no-self-update - name: cargo build @@ -296,8 +304,10 @@ jobs: runs-on: ubuntu-24.04 timeout-minutes: 5 steps: - - uses: actions/checkout@v6 - - uses: taiki-e/install-action@v2 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + with: + persist-credentials: false + - uses: taiki-e/install-action@055f5df8c3f65ea01cd41e9dc855becd88953486 # v2.75.18 with: tool: typos@1.42.0 - run: make typos-check @@ -306,7 +316,9 @@ jobs: name: lint - rustfmt runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@v6 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + with: + persist-credentials: false - name: Rustup +nightly run: | rustup update --no-self-update nightly @@ -319,8 +331,10 @@ jobs: runs-on: ubuntu-24.04 timeout-minutes: 5 steps: - - uses: actions/checkout@v6 - - uses: taiki-e/install-action@v2 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + with: + persist-credentials: false + - uses: taiki-e/install-action@055f5df8c3f65ea01cd41e9dc855becd88953486 # v2.75.18 with: tool: taplo-cli@0.10.0 - run: make toml-check @@ -330,16 +344,41 @@ jobs: runs-on: ubuntu-24.04 timeout-minutes: 5 steps: - - uses: actions/checkout@v6 - - uses: taiki-e/install-action@v2 + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + with: + persist-credentials: false + - uses: taiki-e/install-action@055f5df8c3f65ea01cd41e9dc855becd88953486 # v2.75.18 with: tool: cargo-workspace-lints@0.1.4 - run: make workspace-check + workspace-inheritance: + name: lint - workspace inheritance + runs-on: ubuntu-24.04 + timeout-minutes: 5 + steps: + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + with: + persist-credentials: false + - name: Install cargo-binstall + uses: taiki-e/install-action@055f5df8c3f65ea01cd41e9dc855becd88953486 # v2.75.18 + with: + tool: cargo-binstall + - name: Install workspace inheritance checker + run: cargo binstall --no-confirm cargo-workspace-inheritance-check@1.2.0 + - name: Check workspace inheritance + run: cargo workspace-inheritance-check --promotion-threshold 2 --promotion-failure + unused_deps: name: lint - unused deps runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@v6 - - name: machete - uses: bnjbvr/cargo-machete@main + - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 + with: + persist-credentials: false + - name: Install cargo-shear + uses: taiki-e/install-action@055f5df8c3f65ea01cd41e9dc855becd88953486 # v2.75.18 + with: + tool: cargo-shear@1.11.2 + - name: Check for unused dependencies + run: make shear diff --git a/.github/workflows/cleanup-workflows.yml b/.github/workflows/cleanup-workflows.yml index a7a6d2b428..eff2aa99e2 100644 --- a/.github/workflows/cleanup-workflows.yml +++ b/.github/workflows/cleanup-workflows.yml @@ -27,16 +27,17 @@ jobs: steps: - name: Checkout repo - uses: actions/checkout@v6 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 with: fetch-depth: 0 + persist-credentials: false - name: Workflows on main id: main run: | git fetch origin main WORKFLOWS=$(git ls-tree -r origin/main --name-only | grep '^.github/workflows/') - printf "%s\n" $WORKFLOWS + printf "%s\n" "$WORKFLOWS" { echo "workflows<> "$GITHUB_OUTPUT" - name: Show package info + env: + MANIFEST_PATH: ${{ steps.pkg.outputs.manifest_path }} + PACKAGE: ${{ matrix.package }} run: | - echo "Package: ${{ matrix.package }}" - echo "Manifest path: ${{ steps.pkg.outputs.manifest_path }}" - cargo msrv show --manifest-path "${{ steps.pkg.outputs.manifest_path }}" + echo "Package: ${PACKAGE}" + echo "Manifest path: ${MANIFEST_PATH}" + cargo msrv show --manifest-path "${MANIFEST_PATH}" - name: Check MSRV + env: + MANIFEST_PATH: ${{ steps.pkg.outputs.manifest_path }} run: | - cargo msrv verify --manifest-path "${{ steps.pkg.outputs.manifest_path }}" + cargo msrv verify --manifest-path "${MANIFEST_PATH}" diff --git a/.github/workflows/publish-crates.yml b/.github/workflows/publish-crates.yml index be4f62ded6..813564e8b8 100644 --- a/.github/workflows/publish-crates.yml +++ b/.github/workflows/publish-crates.yml @@ -12,17 +12,21 @@ jobs: name: Cargo publish release runs-on: Linux-ARM64-Runner if: ${{ github.repository_owner == '0xMiden' }} + environment: publish-crates steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 with: fetch-depth: 0 + persist-credentials: false ref: ${{ github.event.release.tag_name }} - uses: ./.github/actions/install-rocksdb - uses: ./.github/actions/install-protobuf-compiler - name: Log release info + env: + RELEASE_TAG: ${{ github.event.release.tag_name }} run: | - echo "Publishing release ${{ github.event.release.tag_name }}" + echo "Publishing release ${RELEASE_TAG}" echo "Commit: $(git rev-parse HEAD)" - name: Cleanup large tools for build space uses: ./.github/actions/cleanup-runner @@ -30,8 +34,8 @@ jobs: run: sudo apt-get update && sudo apt-get install -y jq - name: Update Rust toolchain run: rustup update --no-self-update - - uses: Swatinem/rust-cache@v2 - - uses: taiki-e/install-action@v2 + - uses: Swatinem/rust-cache@e18b497796c12c097a38f9edb9d0641fb99eee32 # v2 + - uses: taiki-e/install-action@055f5df8c3f65ea01cd41e9dc855becd88953486 # v2.75.18 with: tool: cargo-binstall@1.16.6 # - name: Install cargo-msrv diff --git a/.github/workflows/publish-debian-all.yml b/.github/workflows/publish-debian-all.yml index 1b5ccdb742..201412e39b 100644 --- a/.github/workflows/publish-debian-all.yml +++ b/.github/workflows/publish-debian-all.yml @@ -15,12 +15,13 @@ env: version: ${{ inputs.version || github.ref_name }} permissions: - id-token: write - contents: write + contents: read jobs: publish-node: name: Publish Node ${{ matrix.arch }} Debian + permissions: + contents: write strategy: matrix: arch: [amd64, arm64] @@ -28,9 +29,10 @@ jobs: labels: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-24.04' }} steps: - name: Checkout repo - uses: actions/checkout@main + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 with: fetch-depth: 0 + persist-credentials: false - uses: ./.github/actions/install-rocksdb - uses: ./.github/actions/install-protobuf-compiler - name: Build and Publish Node @@ -46,6 +48,8 @@ jobs: publish-prover: name: Publish Prover ${{ matrix.arch }} Debian + permissions: + contents: write strategy: matrix: arch: [amd64, arm64] @@ -53,9 +57,10 @@ jobs: labels: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-24.04' }} steps: - name: Checkout repo - uses: actions/checkout@main + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 with: fetch-depth: 0 + persist-credentials: false - name: Build and Publish Prover uses: ./.github/actions/debian with: @@ -69,6 +74,8 @@ jobs: publish-network-monitor: name: Publish Network Monitor ${{ matrix.arch }} Debian + permissions: + contents: write strategy: matrix: arch: [amd64, arm64] @@ -76,9 +83,10 @@ jobs: labels: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-24.04' }} steps: - name: Checkout repo - uses: actions/checkout@main + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 with: fetch-depth: 0 + persist-credentials: false - name: Build and Publish Network Monitor uses: ./.github/actions/debian with: diff --git a/.github/workflows/publish-debian.yml b/.github/workflows/publish-debian.yml index be01b9d1e7..a9550ff33b 100644 --- a/.github/workflows/publish-debian.yml +++ b/.github/workflows/publish-debian.yml @@ -43,12 +43,13 @@ on: type: string permissions: - id-token: write - contents: write + contents: read jobs: publish: name: Publish ${{ inputs.package }} ${{ matrix.arch }} Debian + permissions: + contents: write strategy: matrix: arch: [amd64, arm64] @@ -56,9 +57,10 @@ jobs: labels: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-24.04' }} steps: - name: Checkout repo - uses: actions/checkout@main + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 with: fetch-depth: 0 + persist-credentials: false - uses: ./.github/actions/install-rocksdb - uses: ./.github/actions/install-protobuf-compiler diff --git a/.github/workflows/publish-docker.yml b/.github/workflows/publish-docker.yml index 990ef1d94c..c23d5a5a1d 100644 --- a/.github/workflows/publish-docker.yml +++ b/.github/workflows/publish-docker.yml @@ -25,16 +25,18 @@ jobs: publish: runs-on: labels: "ubuntu-24.04" + environment: publish-docker strategy: matrix: component: [node] name: Publish ${{ matrix.component }} ${{ inputs.version }} steps: - name: Checkout repo - uses: actions/checkout@main + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 with: ref: ${{ env.version }} fetch-depth: 0 + persist-credentials: false - name: Log in to the Container registry uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1 @@ -44,23 +46,22 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - name: Configure AWS credentials - uses: aws-actions/configure-aws-credentials@v4 + uses: aws-actions/configure-aws-credentials@7474bc4690e29a8392af63c5b98e7449536d5c3a # v4 with: aws-region: ${{ secrets.AWS_REGION }} role-to-assume: ${{ secrets.AWS_ROLE }} role-session-name: GithubActionsSession - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3 with: cache-binary: true - name: Build and push Docker image id: push - uses: docker/build-push-action@v5 + uses: docker/build-push-action@ca052bb54ab0790a636c9b5f226502c73d547a25 # v5 with: push: true - labels: ${{ steps.meta.outputs.labels }} file: ./bin/${{ matrix.component }}/Dockerfile tags: ${{ env.registry }}/0xmiden/miden-${{ matrix.component }}:${{ env.version }} cache-from: type=s3,region=${{ secrets.AWS_REGION }},bucket=${{ secrets.AWS_CACHE_BUCKET }},name=miden-${{ matrix.component }} diff --git a/.github/workflows/trigger-deploy-docs.yml b/.github/workflows/trigger-deploy-docs.yml index ca54a442d2..568e6541fb 100644 --- a/.github/workflows/trigger-deploy-docs.yml +++ b/.github/workflows/trigger-deploy-docs.yml @@ -9,6 +9,7 @@ on: jobs: notify: runs-on: ubuntu-24.04 + environment: deploy-docs permissions: contents: read diff --git a/.github/workflows/zizmor.yml b/.github/workflows/zizmor.yml new file mode 100644 index 0000000000..9d6ecbc162 --- /dev/null +++ b/.github/workflows/zizmor.yml @@ -0,0 +1,39 @@ +name: GitHub Actions Security Analysis with zizmor + +on: + push: + branches: [main, next] + paths: + - ".github/workflows/**" + - ".github/actions/**" + - "zizmor.yml" + pull_request: + types: [opened, reopened, synchronize] + paths: + - ".github/workflows/**" + - ".github/actions/**" + - "zizmor.yml" + merge_group: + workflow_dispatch: + +permissions: {} + +jobs: + zizmor: + name: Run zizmor + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + steps: + - name: Checkout repository + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 + with: + persist-credentials: false + + - name: Run zizmor + uses: zizmorcore/zizmor-action@71321a20a9ded102f6e9ce5718a2fcec2c4f70d8 # v0.5.2 + with: + advanced-security: false + config: zizmor.yml + version: 1.23.1 diff --git a/Cargo.lock b/Cargo.lock index f16e6e7040..e794916ead 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3066,7 +3066,6 @@ dependencies = [ "hex", "miden-agglayer", "miden-node-store", - "miden-node-utils", "miden-protocol", "miden-standards", "rand 0.9.2", @@ -3195,18 +3194,15 @@ dependencies = [ "miden-node-proto", "miden-node-proto-build", "miden-node-store", - "miden-node-test-macro", "miden-node-utils", "miden-node-validator", "miden-protocol", "miden-remote-prover-client", "miden-standards", - "miden-tx", "miden-tx-batch-prover", "pretty_assertions", "rand 0.9.2", "rand_chacha", - "rstest", "serial_test", "tempfile", "thiserror 2.0.18", @@ -3253,7 +3249,6 @@ dependencies = [ "miden-node-db", "miden-node-proto", "miden-node-proto-build", - "miden-node-test-macro", "miden-node-utils", "miden-protocol", "miden-remote-prover-client", @@ -3278,7 +3273,6 @@ name = "miden-node-proto" version = "0.14.10" dependencies = [ "anyhow", - "assert_matches", "build-rs", "fs-err", "hex", @@ -3376,10 +3370,8 @@ dependencies = [ "pretty_assertions", "rand 0.9.2", "rand_chacha", - "regex", "serde", "tempfile", - "termtree", "thiserror 2.0.18", "tokio", "tokio-stream", @@ -3598,14 +3590,12 @@ dependencies = [ "build-rs", "clap", "fs-err", - "http 1.4.0", "humantime", "miden-block-prover", "miden-node-proto", "miden-node-proto-build", "miden-node-utils", "miden-protocol", - "miden-standards", "miden-testing", "miden-tx", "miden-tx-batch-prover", @@ -3642,7 +3632,6 @@ dependencies = [ "tonic", "tonic-prost", "tonic-prost-build", - "tonic-web", "tonic-web-wasm-client", ] @@ -6068,12 +6057,6 @@ dependencies = [ "windows-sys 0.61.2", ] -[[package]] -name = "termtree" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4d1330fe7f7f872cd05165130b10602d667b205fd85be09be2814b115d4ced9" - [[package]] name = "textwrap" version = "0.16.2" diff --git a/Cargo.toml b/Cargo.toml index 8720181051..e2416f9e82 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -85,20 +85,26 @@ indexmap = { version = "2.12" } itertools = { version = "0.14" } libsqlite3-sys = { features = ["bundled"], version = "0.35" } lru = { default-features = false, version = "0.16" } +miette = { version = "7.6" } +opentelemetry = { version = "0.31" } pretty_assertions = { version = "1.4" } # prost and protox are from different authors and are _not_ released in # lockstep, nor are they adhering to semver semantics. We keep this # to avoid future breakage. prost = { default-features = false, version = "=0.14.3" } protox = { version = "=0.9.1" } +quote = { version = "1.0" } rand = { version = "0.9" } rand_chacha = { default-features = false, version = "0.9" } +rayon = { version = "1.10" } reqwest = { version = "0.13" } rstest = { version = "0.26" } serde = { features = ["derive"], version = "1" } +serial_test = { version = "3.2" } +syn = { version = "2.0" } tempfile = { version = "3.12" } thiserror = { default-features = false, version = "2.0" } -tokio = { features = ["rt-multi-thread"], version = "1.46" } +tokio = { default-features = false, version = "1.46" } tokio-stream = { version = "0.1" } tokio-util = { version = "0.7" } toml = { version = "1.0" } @@ -107,12 +113,18 @@ tonic-health = { version = "0.14" } tonic-prost = { version = "0.14" } tonic-prost-build = { version = "0.14" } tonic-reflection = { version = "0.14" } +tonic-web = { version = "0.14" } tower = { version = "0.5" } tower-http = { features = ["cors", "trace"], version = "0.6" } tracing = { version = "0.1" } tracing-subscriber = { features = ["env-filter", "fmt", "json"], version = "0.3" } url = { features = ["serde"], version = "2.5" } +[workspace.metadata.cargo-shear] +# libsqlite3-sys is kept to control the bundled SQLite linkage. +# tonic-prost is used by generated gRPC code rather than handwritten Rust. +ignored = ["libsqlite3-sys", "tonic-prost"] + # Lints are set to warn for development, which are promoted to errors in CI. [workspace.lints.clippy] # Pedantic lints are set to a lower priority which allows lints in the group to be selectively enabled. diff --git a/Makefile b/Makefile index 33ab72a885..531833a3d8 100644 --- a/Makefile +++ b/Makefile @@ -35,9 +35,9 @@ format-check: ## Runs Format using nightly toolchain but only in check mode cargo +nightly fmt --all --check -.PHONY: machete -machete: ## Runs machete to find unused dependencies - cargo machete +.PHONY: shear +shear: ## Runs cargo-shear to find unused or misplaced dependencies + cargo shear .PHONY: toml @@ -59,7 +59,7 @@ workspace-check: ## Runs a check that all packages have `lints.workspace = true` .PHONY: lint -lint: typos-check format fix clippy toml machete ## Runs all linting tasks at once (Clippy, fixing, formatting, machete) +lint: typos-check format fix clippy toml shear ## Runs all linting tasks at once (Clippy, fixing, formatting, cargo-shear) # --- docs ---------------------------------------------------------------------------------------- @@ -155,7 +155,7 @@ check-tools: ## Checks if development tools are installed @command -v typos >/dev/null 2>&1 && echo "[OK] typos is installed" || echo "[MISSING] typos (make install-tools)" @command -v cargo nextest >/dev/null 2>&1 && echo "[OK] cargo-nextest is installed" || echo "[MISSING] cargo-nextest(make install-tools)" @command -v taplo >/dev/null 2>&1 && echo "[OK] taplo is installed" || echo "[MISSING] taplo (make install-tools)" - @command -v cargo-machete >/dev/null 2>&1 && echo "[OK] cargo-machete is installed" || echo "[MISSING] cargo-machete (make install-tools)" + @command -v cargo-shear >/dev/null 2>&1 && echo "[OK] cargo-shear is installed" || echo "[MISSING] cargo-shear is not installed (run: make install-tools)" @command -v npm >/dev/null 2>&1 && echo "[OK] npm is installed" || echo "[MISSING] npm is not installed (run: make install-tools)" .PHONY: install-tools @@ -166,7 +166,7 @@ install-tools: ## Installs tools required by the Makefile cargo install typos-cli --locked cargo install cargo-nextest --locked cargo install taplo-cli --locked - cargo install cargo-machete --locked + cargo install cargo-shear --version 1.11.2 --locked @if ! command -v node >/dev/null 2>&1; then \ echo "Node.js not found. Please install Node.js from https://nodejs.org/ or using your package manager"; \ echo "On macOS: brew install node"; \ diff --git a/bin/genesis/Cargo.toml b/bin/genesis/Cargo.toml index c37ab026fd..2be6b35595 100644 --- a/bin/genesis/Cargo.toml +++ b/bin/genesis/Cargo.toml @@ -29,6 +29,5 @@ rand_chacha = { workspace = true } [dev-dependencies] miden-node-store = { workspace = true } -miden-node-utils = { workspace = true } tempfile = { workspace = true } tokio = { features = ["macros", "rt-multi-thread"], workspace = true } diff --git a/bin/network-monitor/Cargo.toml b/bin/network-monitor/Cargo.toml index 2f3a7704b1..fec7946372 100644 --- a/bin/network-monitor/Cargo.toml +++ b/bin/network-monitor/Cargo.toml @@ -18,7 +18,7 @@ workspace = true anyhow = { workspace = true } axum = { version = "0.8" } clap = { features = ["env"], workspace = true } -hex = { version = "0.4" } +hex = { workspace = true } humantime = { workspace = true } miden-node-proto = { workspace = true } miden-node-utils = { workspace = true } @@ -29,7 +29,7 @@ miden-tx = { features = ["concurrent", "std"], workspace = true } rand = { workspace = true } rand_chacha = { workspace = true } reqwest = { features = ["json", "query"], workspace = true } -serde = { features = ["derive"], version = "1.0" } +serde = { workspace = true } serde_json = { version = "1.0" } sha2 = { version = "0.10" } tokio = { features = ["full"], workspace = true } diff --git a/bin/node/Dockerfile b/bin/node/Dockerfile index 5986451a28..0c8ff95602 100644 --- a/bin/node/Dockerfile +++ b/bin/node/Dockerfile @@ -51,5 +51,5 @@ LABEL org.opencontainers.image.created=$CREATED \ # Expose RPC port EXPOSE 57291 -# Miden node does not spawn sub-processes, so it can be used as the PID1 -CMD miden-node +# Miden node does not spawn sub-processes, so it can be used as the PID1. +CMD ["miden-node"] diff --git a/bin/remote-prover/Cargo.toml b/bin/remote-prover/Cargo.toml index 7a05151cc3..e14a083736 100644 --- a/bin/remote-prover/Cargo.toml +++ b/bin/remote-prover/Cargo.toml @@ -16,9 +16,8 @@ workspace = true [dependencies] anyhow = { workspace = true } -async-trait = { version = "0.1" } +async-trait = { workspace = true } clap = { features = ["env"], workspace = true } -http = { workspace = true } humantime = { workspace = true } miden-block-prover = { workspace = true } miden-node-proto = { workspace = true } @@ -27,36 +26,34 @@ miden-node-utils = { workspace = true } miden-protocol = { features = ["std"], workspace = true } miden-tx = { features = ["concurrent", "std"], workspace = true } miden-tx-batch-prover = { features = ["std"], workspace = true } -opentelemetry = { version = "0.31" } +opentelemetry = { workspace = true } prost = { default-features = false, features = ["derive"], workspace = true } tokio = { features = ["full"], workspace = true } -tokio-stream = { features = ["net"], version = "0.1" } -tonic = { default-features = false, features = ["codegen", "router", "transport"], version = "0.14" } -tonic-health = { version = "0.14" } +tokio-stream = { features = ["net"], workspace = true } +tonic = { default-features = false, features = ["codegen", "router", "transport"], workspace = true } +tonic-health = { workspace = true } tonic-prost = { workspace = true } tonic-reflection = { workspace = true } -tonic-web = { version = "0.14" } +tonic-web = { workspace = true } tower-http = { features = ["trace"], workspace = true } tracing = { workspace = true } [dev-dependencies] -assert_matches = { workspace = true } -miden-protocol = { features = ["testing"], workspace = true } -miden-standards = { features = ["testing"], workspace = true } -miden-testing = { workspace = true } -miden-tx = { features = ["concurrent", "testing"], workspace = true } -serial_test = { version = "3" } +assert_matches = { workspace = true } +miden-protocol = { features = ["testing"], workspace = true } +miden-testing = { workspace = true } +miden-tx = { features = ["concurrent", "testing"], workspace = true } +serial_test = { workspace = true } [build-dependencies] build-rs = { workspace = true } fs-err = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } -miette = { features = ["fancy"], version = "7.5" } +miette = { features = ["fancy"], workspace = true } tonic-prost-build = { workspace = true } -[package.metadata.cargo-machete] +[package.metadata.cargo-shear] ignored = [ - "http", "prost", "tonic-prost", # used in generated OUT_DIR code ] diff --git a/bin/remote-prover/src/server/mod.rs b/bin/remote-prover/src/server/mod.rs index 3b084c42fc..6d3038e6c8 100644 --- a/bin/remote-prover/src/server/mod.rs +++ b/bin/remote-prover/src/server/mod.rs @@ -1,7 +1,6 @@ use std::num::NonZeroUsize; use anyhow::Context; -use miden_node_utils::clap::GrpcOptionsInternal; use miden_node_utils::cors::cors_for_grpc_web_layer; use miden_node_utils::panic::catch_panic_layer_fn; use miden_node_utils::tracing::grpc::grpc_trace_fn; @@ -83,11 +82,9 @@ impl Server { // Mark the service as serving health_reporter.set_serving::>().await; - let grpc_options = GrpcOptionsInternal::default(); - let server = tonic::transport::Server::builder() .accept_http1(true) - .timeout(grpc_options.request_timeout) + .timeout(self.timeout) .layer(CatchPanicLayer::custom(catch_panic_layer_fn)) .layer(TraceLayer::new_for_grpc().make_span_with(grpc_trace_fn)) .layer(cors_for_grpc_web_layer()) diff --git a/bin/stress-test/Cargo.toml b/bin/stress-test/Cargo.toml index 452da58606..0743e2b751 100644 --- a/bin/stress-test/Cargo.toml +++ b/bin/stress-test/Cargo.toml @@ -17,7 +17,7 @@ version.workspace = true workspace = true [dependencies] -clap = { features = ["derive"], version = "4.5" } +clap = { features = ["derive"], workspace = true } fs-err = { workspace = true } futures = { workspace = true } miden-node-block-producer = { workspace = true } @@ -27,7 +27,7 @@ miden-node-utils = { workspace = true } miden-protocol = { workspace = true } miden-standards = { workspace = true } rand = { workspace = true } -rayon = { version = "1.10" } +rayon = { workspace = true } tokio = { workspace = true } tonic = { default-features = true, workspace = true } url = { workspace = true } diff --git a/crates/block-producer/Cargo.toml b/crates/block-producer/Cargo.toml index 65f99e01a1..d3349bf7cc 100644 --- a/crates/block-producer/Cargo.toml +++ b/crates/block-producer/Cargo.toml @@ -14,6 +14,9 @@ version.workspace = true [lints] workspace = true +[lib] +doctest = false + [features] testing = [] tracing-forest = ["miden-node-utils/tracing-forest"] @@ -40,17 +43,14 @@ tracing = { workspace = true } url = { workspace = true } [dev-dependencies] -assert_matches = { workspace = true } -miden-node-store = { workspace = true } -miden-node-test-macro = { workspace = true } -miden-node-utils = { features = ["testing"], workspace = true } -miden-node-validator = { workspace = true } -miden-protocol = { default-features = true, features = ["testing"], workspace = true } -miden-standards = { features = ["testing"], workspace = true } -miden-tx = { features = ["concurrent", "testing"], workspace = true } -pretty_assertions = "1.4" -rand_chacha = { default-features = false, workspace = true } -rstest = { workspace = true } -serial_test = "3.2" -tempfile = { workspace = true } -tokio = { features = ["test-util"], workspace = true } +assert_matches = { workspace = true } +miden-node-store = { workspace = true } +miden-node-utils = { features = ["testing"], workspace = true } +miden-node-validator = { workspace = true } +miden-protocol = { default-features = true, features = ["testing"], workspace = true } +miden-standards = { features = ["testing"], workspace = true } +pretty_assertions = { workspace = true } +rand_chacha = { default-features = false, workspace = true } +serial_test = { workspace = true } +tempfile = { workspace = true } +tokio = { features = ["test-util"], workspace = true } diff --git a/crates/grpc-error-macro/Cargo.toml b/crates/grpc-error-macro/Cargo.toml index 4fd40b0b2d..dd040a1f10 100644 --- a/crates/grpc-error-macro/Cargo.toml +++ b/crates/grpc-error-macro/Cargo.toml @@ -15,8 +15,10 @@ version.workspace = true workspace = true [lib] +doctest = false proc-macro = true +test = false [dependencies] -quote = "1.0" -syn = { features = ["full"], version = "2.0" } +quote = { workspace = true } +syn = { features = ["full"], workspace = true } diff --git a/crates/large-smt-backend-rocksdb/Cargo.toml b/crates/large-smt-backend-rocksdb/Cargo.toml index 5109ecc9c3..3b790c39cb 100644 --- a/crates/large-smt-backend-rocksdb/Cargo.toml +++ b/crates/large-smt-backend-rocksdb/Cargo.toml @@ -17,7 +17,7 @@ workspace = true [dependencies] miden-crypto = { features = ["concurrent", "std"], workspace = true } miden-protocol = { features = ["std"], workspace = true } -rayon = { version = "1.10" } +rayon = { workspace = true } rocksdb = { default-features = false, features = ["bindgen-runtime", "lz4"], version = "0.24" } [build-dependencies] diff --git a/crates/ntx-builder/Cargo.toml b/crates/ntx-builder/Cargo.toml index c4e2a991cc..629776da1e 100644 --- a/crates/ntx-builder/Cargo.toml +++ b/crates/ntx-builder/Cargo.toml @@ -13,6 +13,9 @@ version.workspace = true [lints] workspace = true +[lib] +doctest = false + [dependencies] anyhow = { workspace = true } diesel = { features = ["numeric", "sqlite"], workspace = true } @@ -41,13 +44,9 @@ url = { workspace = true } build-rs = { workspace = true } [dev-dependencies] -miden-node-test-macro = { path = "../test-macro" } -miden-node-utils = { features = ["testing"], workspace = true } -miden-protocol = { default-features = true, features = ["testing"], workspace = true } -miden-standards = { features = ["testing"], workspace = true } -rand_chacha = { workspace = true } -rstest = { workspace = true } -tempfile = { version = "3.20" } - -[package.metadata.cargo-machete] -ignored = ["libsqlite3-sys"] +miden-node-utils = { features = ["testing"], workspace = true } +miden-protocol = { default-features = true, features = ["testing"], workspace = true } +miden-standards = { features = ["testing"], workspace = true } +rand_chacha = { workspace = true } +rstest = { workspace = true } +tempfile = { workspace = true } diff --git a/crates/proto/Cargo.toml b/crates/proto/Cargo.toml index fa48024ce5..8165f1ea29 100644 --- a/crates/proto/Cargo.toml +++ b/crates/proto/Cargo.toml @@ -16,7 +16,7 @@ workspace = true [dependencies] anyhow = { workspace = true } -hex = { version = "0.4" } +hex = { workspace = true } http = { workspace = true } miden-node-grpc-error-macro = { workspace = true } miden-node-utils = { workspace = true } @@ -29,14 +29,13 @@ tonic-prost = { workspace = true } url = { workspace = true } [dev-dependencies] -assert_matches = { workspace = true } -proptest = { version = "1.7" } +proptest = { version = "1.7" } [build-dependencies] build-rs = { workspace = true } fs-err = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } -miette = { version = "7.6" } +miette = { workspace = true } tonic-prost-build = { workspace = true } [package.metadata.cargo-machete] diff --git a/crates/remote-prover-client/Cargo.toml b/crates/remote-prover-client/Cargo.toml index 3edb6ea546..f9157d2390 100644 --- a/crates/remote-prover-client/Cargo.toml +++ b/crates/remote-prover-client/Cargo.toml @@ -12,6 +12,8 @@ version.workspace = true [lib] crate-type = ["lib"] +doctest = false +test = false [features] batch-prover = ["dep:miden-protocol", "dep:tokio"] @@ -26,8 +28,7 @@ tonic = { features = ["codegen"], workspace = true } tonic-web-wasm-client = { default-features = false, version = "0.9" } [target.'cfg(not(all(target_arch = "wasm32", target_os = "unknown")))'.dependencies] -tonic = { features = ["codegen", "tls-native-roots", "tls-ring", "transport"], workspace = true } -tonic-web = { optional = true, version = "0.14" } +tonic = { features = ["codegen", "tls-native-roots", "tls-ring", "transport"], workspace = true } [lints] workspace = true @@ -37,18 +38,19 @@ miden-protocol = { optional = true, workspace = true } miden-tx = { optional = true, workspace = true } prost = { default-features = false, features = ["derive"], workspace = true } thiserror = { workspace = true } -tokio = { default-features = false, features = ["sync"], optional = true, version = "1.44" } +tokio = { default-features = false, features = ["sync"], optional = true, workspace = true } tonic-prost = { workspace = true } [build-dependencies] build-rs = { workspace = true } fs-err = { workspace = true } miden-node-proto-build = { workspace = true } -miette = { features = ["fancy"], version = "7.5" } +miette = { features = ["fancy"], workspace = true } tonic-prost-build = { workspace = true } -[package.metadata.cargo-machete] +[package.metadata.cargo-shear] ignored = [ + "getrandom", "prost", "tonic-prost", # used in generated OUT_DIR code ] diff --git a/crates/rocksdb-cxx-linkage-fix/Cargo.toml b/crates/rocksdb-cxx-linkage-fix/Cargo.toml index 9e0eb23f7a..3e80a35bfd 100644 --- a/crates/rocksdb-cxx-linkage-fix/Cargo.toml +++ b/crates/rocksdb-cxx-linkage-fix/Cargo.toml @@ -11,7 +11,9 @@ rust-version.workspace = true version.workspace = true [lib] -path = "src/lib.rs" +doctest = false +path = "src/lib.rs" +test = false [lints] workspace = true diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index 8c483ac319..ed3d8ff903 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -14,9 +14,12 @@ version.workspace = true [lints] workspace = true +[lib] +doctest = false + [dependencies] anyhow = { workspace = true } -futures = { version = "0.3" } +futures = { workspace = true } http = { workspace = true } mediatype = { version = "0.21" } miden-node-proto = { workspace = true } @@ -31,7 +34,7 @@ tokio = { features = ["macros", "net", "rt-multi-thread"], work tokio-stream = { features = ["net"], workspace = true } tonic = { default-features = true, features = ["tls-native-roots", "tls-ring"], workspace = true } tonic-reflection = { workspace = true } -tonic-web = { version = "0.14" } +tonic-web = { workspace = true } tower = { workspace = true } tower-http = { features = ["trace"], workspace = true } tracing = { workspace = true } diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index 29613be0d6..a1e85e7737 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -38,6 +38,7 @@ use tempfile::TempDir; use tokio::net::TcpListener; use tokio::runtime::{self, Runtime}; use tokio::task; +use tokio::time::sleep; use url::Url; use crate::Rpc; @@ -228,7 +229,7 @@ async fn rpc_startup_is_robust_to_network_failures() { let (store_runtime, data_directory, _genesis, store_addr) = start_store(store_listener).await; // Test: send request against RPC api and should succeed - let response = send_request(&mut rpc_client).await; + let response = send_request_until_success(&mut rpc_client).await; assert!(response.unwrap().into_inner().block_header.is_some()); // Test: shutdown the store and should fail @@ -238,7 +239,7 @@ async fn rpc_startup_is_robust_to_network_failures() { // Test: restart the store and request should succeed let store_runtime = restart_store(store_addr, data_directory.path()).await; - let response = send_request(&mut rpc_client).await; + let response = send_request_until_success(&mut rpc_client).await; assert_eq!(response.unwrap().into_inner().block_header.unwrap().block_num, 0); // Shutdown the store before data_directory is dropped to allow RocksDB to flush properly @@ -440,6 +441,24 @@ async fn send_request( rpc_client.get_block_header_by_number(request).await } +async fn send_request_until_success( + rpc_client: &mut RpcClient, +) -> std::result::Result, tonic::Status> { + let mut attempts = 0; + loop { + attempts += 1; + + match send_request(rpc_client).await { + Ok(response) => return Ok(response), + Err(err) if attempts < 30 => { + sleep(Duration::from_millis(200)).await; + tracing::warn!(%attempts, %err, "RPC request failed, retrying"); + }, + Err(err) => return Err(err), + } + } +} + async fn connect_rpc(url: Url, local_address: Option) -> RpcClient { let mut endpoint = tonic::transport::Endpoint::from_shared(url.to_string()) .expect("Url type always results in valid endpoint") diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index aa8cfc3a27..08ca35f3d5 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -14,14 +14,17 @@ version.workspace = true [lints] workspace = true +[lib] +doctest = false + [dependencies] anyhow = { workspace = true } -deadpool = { default-features = false, features = ["managed", "rt_tokio_1"], version = "0.12" } -deadpool-diesel = { features = ["sqlite"], version = "0.6" } -diesel = { features = ["numeric", "sqlite"], version = "2.3" } -diesel_migrations = { features = ["sqlite"], version = "2.3" } +deadpool = { features = ["managed", "rt_tokio_1"], workspace = true } +deadpool-diesel = { features = ["sqlite"], workspace = true } +diesel = { features = ["numeric", "sqlite"], workspace = true } +diesel_migrations = { features = ["sqlite"], workspace = true } fs-err = { workspace = true } -hex = { version = "0.4" } +hex = { workspace = true } indexmap = { workspace = true } libsqlite3-sys = { workspace = true } miden-block-prover = { workspace = true } @@ -38,7 +41,7 @@ miden-protocol = { features = ["std", "testing"], workspace = true } pretty_assertions = { workspace = true } rand = { workspace = true } rand_chacha = { workspace = true } -serde = { features = ["derive"], version = "1" } +serde = { workspace = true } thiserror = { workspace = true } tokio = { features = ["fs", "rt-multi-thread"], workspace = true } tokio-stream = { features = ["net"], workspace = true } @@ -66,9 +69,7 @@ miden-node-utils = { features = ["testing", "tracing-forest"], workspace = miden-protocol = { default-features = true, features = ["testing"], workspace = true } miden-standards = { features = ["testing"], workspace = true } rand = { workspace = true } -regex = { version = "1.11" } tempfile = { workspace = true } -termtree = "1.0" [features] default = ["rocksdb"] diff --git a/crates/store/src/db/query_plan/mod.rs b/crates/store/src/db/query_plan/mod.rs deleted file mode 100644 index ebeb805630..0000000000 --- a/crates/store/src/db/query_plan/mod.rs +++ /dev/null @@ -1,73 +0,0 @@ -use std::{ - fmt::{Display, Formatter}, - ops::Not, -}; - -pub mod renderer; - -impl Transaction<'_> { - /// Panics if the query plan contains a slow table scan. - pub fn check_query_plan(&self, sql: &str) { - let query_plan = self.query_plan(sql).expect("Must be a valid SQL"); - - assert!( - query_plan.contains_unnecessary_scans().not(), - "Query plan contains unnecessary table scan(s):\n{query_plan}" - ); - } - - /// Renders query plan as tree using ASCII graphics. - fn query_plan(&self, sql: &str) -> rusqlite::Result { - let explain_sql = format!("EXPLAIN QUERY PLAN {sql}"); - let explain_stmt = self.prepare(&explain_sql)?; - let query_plan = QueryPlanRenderer::new().render_tree(explain_stmt)?; - - Ok(QueryPlan { explain_sql, query_plan }) - } -} - -/// Represents rendered query plan with `EXPLAIN QUERY PLAN ...` SQL query. -struct QueryPlan { - explain_sql: String, - query_plan: String, -} - -impl Display for QueryPlan { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - writeln!(f, "{}\n\n{}", self.explain_sql, self.query_plan) - } -} - -impl QueryPlan { - /// Checks if the given query plan contains unnecessary table scans not utilizing indices. - fn contains_unnecessary_scans(&self) -> bool { - use std::sync::LazyLock; - - static RE_IDX_SCAN: LazyLock = LazyLock::new(|| { - regex::RegexBuilder::new( - r"SCAN ([A-Za-z0-9_]+) (USING( COVERING)?|VIRTUAL TABLE) INDEX ([A-Za-z0-9_]+)", - ) - .case_insensitive(true) - .build() - .expect("Must be a valid regex pattern") - }); - - // Some index scans might cause false positiveness if we just look for `SCAN` keyword: - // ``` - // SCAN rarray VIRTUAL TABLE INDEX 1 - // SCAN table1 USING INDEX sqlite_autoindex_table1_1 - // SCAN table1 USING COVERING INDEX idx_table1_column - // ``` - // Preprocess query plan in order to replace such cases to `SEARCH` expressions instead of - // `SCAN`. Another solution would be to find only scan expressions which don't accompanied - // by known index suffixes, but current `regex` implementation doesn't support look-around. - let query_plan = RE_IDX_SCAN.replace_all(&self.query_plan, r"SEARCH $1 $2 INDEX $4"); - - // Don't flag `SELECT` queries without `WHERE` clause, since they don't usually use indexes - if self.explain_sql.contains("SELECT") && !self.explain_sql.contains("WHERE") { - return false; - } - - query_plan.contains(" SCAN ") - } -} diff --git a/crates/store/src/db/query_plan/renderer.rs b/crates/store/src/db/query_plan/renderer.rs deleted file mode 100644 index 8a08785918..0000000000 --- a/crates/store/src/db/query_plan/renderer.rs +++ /dev/null @@ -1,49 +0,0 @@ -use rusqlite::Statement; -use termtree::Tree; - -/// SQL query plan renderer which represents result as a tree using ASCII graphics. -pub struct QueryPlanRenderer { - path: Vec<(u64, Tree)>, -} - -impl QueryPlanRenderer { - /// Constructs and initializes query plan renderer. - pub fn new() -> Self { - Self { - path: vec![(0_u64, Tree::new("QUERY PLAN".to_string()))], - } - } - - /// Runs `EXPLAIN QUERY PLAN` statement and renders result as tree using ASCII graphics. - /// - /// # Note - /// Current algorithm relies on the row ordering (all child rows go after corresponding parent - /// row) of the current implementation of SQLite's `EXPLAIN QUERY PLAN` command. This is not - /// bad, because this makes algorithm simple and effective, and it is intended to be used only - /// for debugging and testing. - pub fn render_tree(mut self, mut explain_stmt: Statement) -> rusqlite::Result { - let mut rows = explain_stmt.raw_query(); - while let Some(row) = rows.next()? { - let id: u64 = row.get(0)?; - let parent_id: u64 = row.get(1)?; - let label: String = row.get(3)?; - - self.fold_up_to(parent_id); - self.path.push((id, label.into())); - } - self.fold_up_to(0); - - let (_, root) = self.path.pop().expect("Always present"); - - Ok(root.to_string()) - } - - /// Folds all elements from the top of the path stack up to the element with the given ID. - /// All elements with higher indexes become children of the elements with lower indexes. - fn fold_up_to(&mut self, id: u64) { - while self.path.last().expect("Always present").0 > id { - let (_, top) = self.path.pop().expect("Always present"); - self.path.last_mut().map(|(_, last)| last.push(top)).expect("Always present"); - } - } -} diff --git a/crates/test-macro/Cargo.toml b/crates/test-macro/Cargo.toml index dc1b9fec17..7064479d0f 100644 --- a/crates/test-macro/Cargo.toml +++ b/crates/test-macro/Cargo.toml @@ -15,8 +15,10 @@ version = "0.1.0" workspace = true [dependencies] -quote = { version = "1.0" } -syn = { features = ["extra-traits", "full"], version = "2.0" } +quote = { workspace = true } +syn = { features = ["extra-traits", "full"], workspace = true } [lib] +doctest = false proc-macro = true +test = false diff --git a/crates/utils/Cargo.toml b/crates/utils/Cargo.toml index 01f536ffee..f3f6770185 100644 --- a/crates/utils/Cargo.toml +++ b/crates/utils/Cargo.toml @@ -14,6 +14,9 @@ version.workspace = true [lints] workspace = true +[lib] +doctest = false + [features] # Enables utility functions for testing traces created by some other crate's stack. rocksdb = ["dep:miden-large-smt-backend-rocksdb"] @@ -31,7 +34,7 @@ humantime = { workspace = true } itertools = { workspace = true } lru = { workspace = true } miden-protocol = { workspace = true } -opentelemetry = { version = "0.31" } +opentelemetry = { workspace = true } opentelemetry-otlp = { default-features = false, features = ["grpc-tonic", "tls-roots", "trace"], version = "0.31" } opentelemetry_sdk = { features = ["rt-tokio", "testing"], version = "0.31" } rand = { workspace = true } diff --git a/crates/validator/Cargo.toml b/crates/validator/Cargo.toml index 2cf6fb650e..bbe4d4df0a 100644 --- a/crates/validator/Cargo.toml +++ b/crates/validator/Cargo.toml @@ -14,6 +14,9 @@ version.workspace = true [lints] workspace = true +[lib] +doctest = false + [features] [dependencies] diff --git a/proto/Cargo.toml b/proto/Cargo.toml index ee79d7adc1..2011f090f7 100644 --- a/proto/Cargo.toml +++ b/proto/Cargo.toml @@ -26,5 +26,9 @@ tonic-prost-build = { workspace = true } [build-dependencies] build-rs = { workspace = true } fs-err = { workspace = true } -miette = { version = "7.6" } +miette = { workspace = true } protox = { workspace = true } + +[package.metadata.cargo-shear] +# cargo-shear misses this because it is required by files generated by build.rs. +ignored = ["tonic-prost-build"] diff --git a/zizmor.yml b/zizmor.yml new file mode 100644 index 0000000000..f8a62efe78 --- /dev/null +++ b/zizmor.yml @@ -0,0 +1,4 @@ +rules: + use-trusted-publishing: + ignore: + - publish-crates.yml From b491987ba93857c024506c80afc673c32aeaa0f2 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Mon, 4 May 2026 14:44:34 +0200 Subject: [PATCH 20/28] ci: install toolchain properly (#2026) --- .github/workflows/ci.yml | 16 +++++++--------- .github/workflows/nightly.yml | 2 +- .github/workflows/publish-crates.yml | 2 +- 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9cc7d2364b..91a4ba468b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -74,7 +74,7 @@ jobs: - uses: ./.github/actions/install-rocksdb - uses: ./.github/actions/install-protobuf-compiler - name: Rustup - run: rustup update --no-self-update + run: rustup toolchain install --no-self-update - uses: WarpBuilds/rust-cache@9d0cc3090d9c87de74ea67617b246e978735b1a1 # v2.9.1 with: shared-key: ${{ env.RUST_CACHE_SUFFIX }} @@ -130,7 +130,7 @@ jobs: with: persist-credentials: false - name: Rustup - run: rustup update --no-self-update + run: rustup toolchain install --no-self-update - uses: WarpBuilds/rust-cache@9d0cc3090d9c87de74ea67617b246e978735b1a1 # v2.9.1 with: shared-key: ${{ env.RUST_CACHE_SUFFIX }} @@ -149,7 +149,7 @@ jobs: with: persist-credentials: false - name: Rustup - run: rustup update --no-self-update + run: rustup toolchain install --no-self-update - uses: taiki-e/install-action@055f5df8c3f65ea01cd41e9dc855becd88953486 # v2.75.18 with: tool: nextest@0.9.122 @@ -174,7 +174,7 @@ jobs: with: persist-credentials: false - name: Rustup - run: rustup update --no-self-update + run: rustup toolchain install --no-self-update - uses: WarpBuilds/rust-cache@9d0cc3090d9c87de74ea67617b246e978735b1a1 # v2.9.1 with: shared-key: ${{ env.RUST_CACHE_SUFFIX }} @@ -197,7 +197,7 @@ jobs: with: persist-credentials: false - name: Rustup - run: rustup update --no-self-update + run: rustup toolchain install --no-self-update - uses: WarpBuilds/rust-cache@9d0cc3090d9c87de74ea67617b246e978735b1a1 # v2.9.1 with: shared-key: ${{ env.RUST_CACHE_SUFFIX }} @@ -283,7 +283,7 @@ jobs: with: persist-credentials: false - name: Rustup - run: rustup update --no-self-update + run: rustup toolchain install --no-self-update - name: cargo build run: | cargo build --locked -p miden-remote-prover-client \ @@ -320,9 +320,7 @@ jobs: with: persist-credentials: false - name: Rustup +nightly - run: | - rustup update --no-self-update nightly - rustup +nightly component add rustfmt + run: rustup toolchain install --no-self-update nightly --component rustfmt - name: Fmt run: make format-check diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index 398321d5f2..bbde7c792e 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -50,7 +50,7 @@ jobs: - uses: ./.github/actions/install-rocksdb - uses: ./.github/actions/install-protobuf-compiler - name: Install rust - run: rustup update --no-self-update + run: rustup toolchain install --no-self-update - name: Install cargo-hack uses: taiki-e/install-action@055f5df8c3f65ea01cd41e9dc855becd88953486 # v2.75.18 with: diff --git a/.github/workflows/publish-crates.yml b/.github/workflows/publish-crates.yml index 813564e8b8..a0b20fca46 100644 --- a/.github/workflows/publish-crates.yml +++ b/.github/workflows/publish-crates.yml @@ -33,7 +33,7 @@ jobs: - name: Install dependencies run: sudo apt-get update && sudo apt-get install -y jq - name: Update Rust toolchain - run: rustup update --no-self-update + run: rustup toolchain install --no-self-update - uses: Swatinem/rust-cache@e18b497796c12c097a38f9edb9d0641fb99eee32 # v2 - uses: taiki-e/install-action@055f5df8c3f65ea01cd41e9dc855becd88953486 # v2.75.18 with: From a5c3f3e890eaf48b5a11486ce61e046414f90a2f Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Mon, 4 May 2026 14:58:43 +0200 Subject: [PATCH 21/28] ci: cache each job (#2025) --- .github/workflows/ci.yml | 129 +++++++++++---------------------------- 1 file changed, 34 insertions(+), 95 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 91a4ba468b..ff4d4c0d8b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,6 +1,6 @@ # Continuous integration jobs. # -# These get run on every pull-request. +# These get run on every pull-request, with Warp caches updated on push into `main` or `next`. name: CI permissions: @@ -23,33 +23,12 @@ on: - "docs/**" env: - # Shared prefix key for the rust cache. + # Shared prefix key for the Rust caches. # - # This provides a convenient way to evict old or corrupted cache. - RUST_CACHE_PREFIX: rust-cache - # Shared branch-aware cache namespace for rust-cache. - # - # Pushes save to the persistent trunk cache, while pull requests append the - # PR number so they save into an ephemeral PR-specific cache. - # - # The format is
with an optional `-pr-` for pull requests. - # We default to next if no suitable is found. - RUST_CACHE_SUFFIX: >- - ${{ - format( - '{0}{1}', - github.base_ref == 'main' && 'main' - || github.base_ref == 'next' && 'next' - || github.ref == 'refs/heads/main' && 'main' - || github.ref == 'refs/heads/next' && 'next' - || 'next', - github.event_name == 'pull_request' - && format('-pr-{0}', github.event.pull_request.number) - || '' - ) - }} - # Match rust-cache's compilation mode so restored outputs stay reusable downstream. - CARGO_INCREMENTAL: 0 + # Cache is trunk specific (next|main). + CACHE_PREFIX: rust-cache-${{ github.base_ref || github.ref_name }} + # Only trusted branch pushes should update caches; PRs restore from the target branch cache. + SAVE_CACHE: ${{ github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/next') }} # Reduce cache usage by removing debug information. CARGO_PROFILE_DEV_DEBUG: 0 @@ -60,11 +39,10 @@ concurrency: jobs: # =============================================================================================== - # Conventional builds, lints and tests that re-use a single cache for efficiency + # Conventional builds, lints and tests # =============================================================================================== - # Normal cargo build that saves either the persistent trunk cache or a - # single per-run cache for downstream jobs to restore immediately. + # Normal cargo build that verifies the workspace compiles. build: runs-on: warp-ubuntu-latest-x64-8x steps: @@ -77,10 +55,9 @@ jobs: run: rustup toolchain install --no-self-update - uses: WarpBuilds/rust-cache@9d0cc3090d9c87de74ea67617b246e978735b1a1 # v2.9.1 with: - shared-key: ${{ env.RUST_CACHE_SUFFIX }} - prefix-key: ${{ env.RUST_CACHE_PREFIX }} - cache-workspace-crates: true - save-if: true + shared-key: ${{ github.job }} + prefix-key: ${{ env.CACHE_PREFIX }} + save-if: ${{ env.SAVE_CACHE }} - name: cargo build run: cargo build --workspace --all-targets --locked - name: Check static linkage @@ -124,7 +101,6 @@ jobs: clippy: name: lint - clippy runs-on: warp-ubuntu-latest-x64-8x - needs: [build] steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 with: @@ -133,16 +109,14 @@ jobs: run: rustup toolchain install --no-self-update - uses: WarpBuilds/rust-cache@9d0cc3090d9c87de74ea67617b246e978735b1a1 # v2.9.1 with: - shared-key: ${{ env.RUST_CACHE_SUFFIX }} - prefix-key: ${{ env.RUST_CACHE_PREFIX }} - cache-workspace-crates: true - save-if: false + shared-key: ${{ github.job }} + prefix-key: ${{ env.CACHE_PREFIX }} + save-if: ${{ env.SAVE_CACHE }} - name: clippy run: cargo clippy --locked --all-targets --all-features --workspace -- -D warnings tests: runs-on: warp-ubuntu-latest-x64-8x - needs: [build] timeout-minutes: 30 steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 @@ -155,10 +129,9 @@ jobs: tool: nextest@0.9.122 - uses: WarpBuilds/rust-cache@9d0cc3090d9c87de74ea67617b246e978735b1a1 # v2.9.1 with: - shared-key: ${{ env.RUST_CACHE_SUFFIX }} - prefix-key: ${{ env.RUST_CACHE_PREFIX }} - cache-workspace-crates: true - save-if: false + shared-key: ${{ github.job }} + prefix-key: ${{ env.CACHE_PREFIX }} + save-if: ${{ env.SAVE_CACHE }} - name: Build tests run: cargo nextest run --all-features --workspace --no-run - name: Run tests @@ -167,7 +140,6 @@ jobs: run: cargo test --doc --workspace --all-features doc: - needs: [build] runs-on: warp-ubuntu-latest-x64-8x steps: - uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 @@ -177,17 +149,15 @@ jobs: run: rustup toolchain install --no-self-update - uses: WarpBuilds/rust-cache@9d0cc3090d9c87de74ea67617b246e978735b1a1 # v2.9.1 with: - shared-key: ${{ env.RUST_CACHE_SUFFIX }} - prefix-key: ${{ env.RUST_CACHE_PREFIX }} - cache-workspace-crates: true - save-if: false + shared-key: ${{ github.job }} + prefix-key: ${{ env.CACHE_PREFIX }} + save-if: ${{ env.SAVE_CACHE }} - name: Build docs run: cargo doc --no-deps --workspace --all-features --locked # Ensure the stress-test still functions by running some cheap benchmarks. stress-test: name: stress test - needs: [build] runs-on: warp-ubuntu-latest-x64-8x timeout-minutes: 20 env: @@ -200,10 +170,9 @@ jobs: run: rustup toolchain install --no-self-update - uses: WarpBuilds/rust-cache@9d0cc3090d9c87de74ea67617b246e978735b1a1 # v2.9.1 with: - shared-key: ${{ env.RUST_CACHE_SUFFIX }} - prefix-key: ${{ env.RUST_CACHE_PREFIX }} - cache-workspace-crates: true - save-if: false + shared-key: ${{ github.job }} + prefix-key: ${{ env.CACHE_PREFIX }} + save-if: ${{ env.SAVE_CACHE }} - uses: taiki-e/install-action@055f5df8c3f65ea01cd41e9dc855becd88953486 # v2.75.18 with: tool: nextest@0.9.122 @@ -216,12 +185,12 @@ jobs: cargo run --bin miden-node-stress-test seed-store \ --data-directory ${{ env.DATA_DIR }} \ --num-accounts 500 --public-accounts-percentage 50 - # TODO re-introduce + # TODO re-introduce # - name: Benchmark state sync - # run: | - # cargo run --bin miden-node-stress-test benchmark-store \ - # --data-directory ${{ env.DATA_DIR }} \ - # --iterations 10 --concurrency 1 sync-state + # run: | + # cargo run --bin miden-node-stress-test benchmark-store \ + # --data-directory ${{ env.DATA_DIR }} \ + # --iterations 10 --concurrency 1 sync-state - name: Benchmark notes sync run: | cargo run --bin miden-node-stress-test benchmark-store \ @@ -233,48 +202,13 @@ jobs: --data-directory ${{ env.DATA_DIR }} \ --iterations 10 --concurrency 1 sync-nullifiers --prefixes 10 - cleanup-run-cache: - name: cleanup run cache - runs-on: ubuntu-24.04 - if: ${{ always() && github.event_name == 'pull_request' }} - needs: - - build - - clippy - - tests - - doc - - stress-test - permissions: - actions: write - contents: read - steps: - - name: Delete PR rust cache - env: - GH_TOKEN: ${{ github.token }} - run: | - mapfile -t cache_entries < <( - gh api \ - -H "Accept: application/vnd.github+json" \ - "/repos/${{ github.repository }}/actions/caches?key=${{ env.RUST_CACHE_PREFIX }}&ref=${{ github.ref }}" \ - --jq '.actions_caches[] | @json' - ) - for cache_entry in "${cache_entries[@]}"; do - cache_id="$(jq -r '.id' <<< "${cache_entry}")" - cache_key="$(jq -r '.key' <<< "${cache_entry}")" - echo "Deleting rust cache key=${cache_key}" - gh api \ - --method DELETE \ - -H "Accept: application/vnd.github+json" \ - "/repos/${{ github.repository }}/actions/caches/${cache_id}" - done - # =============================================================================================== # WASM related jobs # =============================================================================================== # Tests the miden-remote-prover-client WASM support. # - # The WASM build is incompatible with the build job's cache, thankfully this compilation is fairly - # quick so we don't need a separate cache here. + # The WASM build is incompatible with native build caches, so it uses a dedicated cache. client-wasm: name: wasm targets runs-on: warp-ubuntu-latest-x64-8x @@ -284,6 +218,11 @@ jobs: persist-credentials: false - name: Rustup run: rustup toolchain install --no-self-update + - uses: WarpBuilds/rust-cache@9d0cc3090d9c87de74ea67617b246e978735b1a1 # v2.9.1 + with: + shared-key: ${{ github.job }} + prefix-key: ${{ env.CACHE_PREFIX }} + save-if: ${{ env.SAVE_CACHE }} - name: cargo build run: | cargo build --locked -p miden-remote-prover-client \ From 97c932420fc36ae271d75ef543f7841fa02bcd47 Mon Sep 17 00:00:00 2001 From: KOVACS Krisztian Date: Mon, 4 May 2026 15:49:32 +0200 Subject: [PATCH 22/28] feat(store): use RocksDB-backed persistent account state forest (#2020) Co-authored-by: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> --- CHANGELOG.md | 4 + Cargo.lock | 1 + crates/store/Cargo.toml | 2 +- crates/store/src/account_state_forest/mod.rs | 59 +++- crates/store/src/db/mod.rs | 14 + .../store/src/db/models/queries/accounts.rs | 106 +++++++ crates/store/src/errors.rs | 13 + crates/store/src/state/loader.rs | 286 +++++++++++++++++- crates/store/src/state/mod.rs | 48 ++- crates/utils/src/clap.rs | 13 +- crates/utils/src/clap/rocksdb.rs | 59 ++++ 11 files changed, 564 insertions(+), 41 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 569d8257ed..6d2031391a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,9 @@ # Changelog +## v0.14.11 (TBD) + +- Implement persistent RocksDB backend for `AccountStateForest`, improving startup time ([#2020](https://github.com/0xMiden/node/pull/2020)). + ## v0.14.10 (2026-05-29) - Optimize `GetAccount` implementation to serve vault assets from `AccountStateForest` ([#1981](https://github.com/0xMiden/node/pull/1981)). diff --git a/Cargo.lock b/Cargo.lock index f16e6e7040..33a6fe6a6b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2993,6 +2993,7 @@ dependencies = [ "rand_core 0.9.5", "rand_hc", "rayon", + "rocksdb", "serde", "sha2", "sha3", diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index aa8cfc3a27..851e9d5b6e 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -72,7 +72,7 @@ termtree = "1.0" [features] default = ["rocksdb"] -rocksdb = ["dep:miden-large-smt-backend-rocksdb", "miden-node-utils/rocksdb"] +rocksdb = ["dep:miden-large-smt-backend-rocksdb", "miden-crypto/persistent-forest", "miden-node-utils/rocksdb"] [[bench]] harness = false diff --git a/crates/store/src/account_state_forest/mod.rs b/crates/store/src/account_state_forest/mod.rs index ca58c13871..26513dc9cd 100644 --- a/crates/store/src/account_state_forest/mod.rs +++ b/crates/store/src/account_state_forest/mod.rs @@ -2,7 +2,9 @@ use std::collections::BTreeSet; use std::num::NonZeroUsize; use miden_crypto::hash::rpo::Rpo256; -use miden_crypto::merkle::smt::ForestInMemoryBackend; +#[cfg(feature = "rocksdb")] +use miden_crypto::merkle::smt::ForestPersistentBackend; +use miden_crypto::merkle::smt::{Backend, ForestInMemoryBackend}; use miden_node_proto::domain::account::{AccountStorageMapDetails, AccountVaultDetails}; use miden_node_utils::ErrorReport; use miden_node_utils::lru_cache::LruCache; @@ -64,6 +66,15 @@ pub enum WitnessError { AssetError(#[from] AssetError), } +#[cfg(feature = "rocksdb")] +pub(crate) type AccountStateForestBackend = ForestPersistentBackend; +#[cfg(not(feature = "rocksdb"))] +pub(crate) type AccountStateForestBackend = ForestInMemoryBackend; + +const fn empty_smt_root() -> Word { + *EmptySubtreeRoots::entry(SMT_DEPTH, 0) +} + // ACCOUNT STATE FOREST // ================================================================================================ @@ -76,10 +87,10 @@ pub enum AccountStorageMapResult { } /// Container for forest-related state that needs to be updated atomically. -pub(crate) struct AccountStateForest { +pub(crate) struct AccountStateForest { /// `LargeSmtForest` for efficient account storage reconstruction. /// Populated during block import with storage and vault SMTs. - forest: LargeSmtForest, + forest: LargeSmtForest, /// Reverse lookup from hashed SMT storage keys to raw storage map keys. /// @@ -88,7 +99,8 @@ pub(crate) struct AccountStateForest { storage_map_key_cache: LruCache, } -impl AccountStateForest { +#[cfg(test)] +impl AccountStateForest { pub(crate) fn new() -> Self { Self { forest: Self::create_forest(), @@ -99,19 +111,36 @@ impl AccountStateForest { } } + /// Returns the root of an empty SMT. + pub(crate) const fn empty_smt_root() -> Word { + empty_smt_root() + } + fn create_forest() -> LargeSmtForest { let backend = ForestInMemoryBackend::new(); LargeSmtForest::new(backend).expect("in-memory backend should initialize") } +} - // HELPERS - // -------------------------------------------------------------------------------------------- +impl AccountStateForest { + pub(crate) fn from_backend(backend: B) -> Result { + Ok(Self { + forest: LargeSmtForest::new(backend)?, + storage_map_key_cache: LruCache::new( + NonZeroUsize::new(HASHED_STORAGE_MAP_KEY_CACHE_CAPACITY) + .expect("storage map key cache capacity must be non-zero"), + ), + }) + } - /// Returns the root of an empty SMT. - const fn empty_smt_root() -> Word { - *EmptySubtreeRoots::entry(SMT_DEPTH, 0) + #[cfg(feature = "rocksdb")] + pub(crate) fn lineage_count(&self) -> usize { + self.forest.lineage_count() } + // HELPERS + // -------------------------------------------------------------------------------------------- + #[cfg(test)] fn tree_id_for_root( &self, @@ -517,9 +546,9 @@ impl AccountStateForest { /// Retrieves the most recent vault SMT root for an account. If no vault root is found for the /// account, returns an empty SMT root. - fn get_latest_vault_root(&self, account_id: AccountId) -> Word { + pub(crate) fn get_latest_vault_root(&self, account_id: AccountId) -> Word { let lineage = Self::vault_lineage_id(account_id); - self.forest.latest_root(lineage).unwrap_or_else(Self::empty_smt_root) + self.forest.latest_root(lineage).unwrap_or_else(empty_smt_root) } /// Inserts asset vault data into the forest for the specified account. Assumes that asset @@ -532,7 +561,7 @@ impl AccountStateForest { ) -> Result<(), AccountStateForestError> { let prev_root = self.get_latest_vault_root(account_id); let lineage = Self::vault_lineage_id(account_id); - assert_eq!(prev_root, Self::empty_smt_root(), "account should not be in the forest"); + assert_eq!(prev_root, empty_smt_root(), "account should not be in the forest"); assert!( self.forest.latest_version(lineage).is_none(), "account should not be in the forest" @@ -602,7 +631,7 @@ impl AccountStateForest { for (slot_name, map_delta) in storage_delta.maps() { // get the latest root for this map, and make sure the root is for an empty tree let prev_root = self.get_latest_storage_map_root(account_id, slot_name); - assert_eq!(prev_root, Self::empty_smt_root(), "account should not be in the forest"); + assert_eq!(prev_root, empty_smt_root(), "account should not be in the forest"); let raw_map_entries: Vec<(StorageMapKey, Word)> = Vec::from_iter(map_delta.entries().iter().filter_map(|(&key, &value)| { @@ -734,13 +763,13 @@ impl AccountStateForest { // -------------------------------------------------------------------------------------------- /// Retrieves the most recent storage map SMT root for an account slot. - fn get_latest_storage_map_root( + pub(crate) fn get_latest_storage_map_root( &self, account_id: AccountId, slot_name: &StorageSlotName, ) -> Word { let lineage = Self::storage_lineage_id(account_id, slot_name); - self.forest.latest_root(lineage).unwrap_or_else(Self::empty_smt_root) + self.forest.latest_root(lineage).unwrap_or_else(empty_smt_root) } /// Updates the forest with storage map changes from a delta. diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 1f26619374..c988b1e963 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -35,6 +35,7 @@ pub use crate::db::models::queries::{ AccountCommitmentsPage, NullifiersPage, PublicAccountIdsPage, + PublicAccountStateRootsPage, }; use crate::db::models::queries::{BlockHeaderCommitment, StorageMapValuesPage}; use crate::db::models::{Page, queries}; @@ -410,6 +411,19 @@ impl Db { .await } + /// Returns a page of public account state roots for forest consistency verification. + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn select_public_account_state_roots_paged( + &self, + page_size: std::num::NonZeroUsize, + after_account_id: Option, + ) -> Result { + self.transact("read public account state roots paged", move |conn| { + queries::select_public_account_state_roots_paged(conn, page_size, after_account_id) + }) + .await + } + /// Loads public account details from the DB. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub async fn select_account(&self, id: AccountId) -> Result { diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index 29e068ba55..503d135517 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -340,6 +340,24 @@ pub struct PublicAccountIdsPage { pub next_cursor: Option, } +/// Latest account state forest roots for a public account. +#[derive(Debug)] +pub struct PublicAccountStateRoots { + pub account_id: AccountId, + pub vault_root: Word, + pub storage_header: AccountStorageHeader, +} + +/// Page of public account state roots returned by +/// [`select_public_account_state_roots_paged`]. +#[derive(Debug)] +pub struct PublicAccountStateRootsPage { + /// The public account state roots in this page. + pub accounts: Vec, + /// If `Some`, there are more results. Use this as the `after_account_id` for the next page. + pub next_cursor: Option, +} + /// Selects public account IDs with pagination. /// /// Returns up to `page_size` public account IDs, starting after `after_account_id` if provided. @@ -400,6 +418,94 @@ pub(crate) fn select_public_account_ids_paged( Ok(PublicAccountIdsPage { account_ids, next_cursor }) } +/// Selects public account vault roots and storage headers with pagination. +/// +/// Returns up to `page_size` public account states, starting after `after_account_id` if provided. +/// Results are ordered by `account_id` for stable pagination. +/// +/// Public accounts are those with `AccountStorageMode::Public` or `AccountStorageMode::Network`. +/// We identify them by checking `code_commitment IS NOT NULL` - public accounts store their full +/// state (including `code_commitment`), while private accounts only store the `account_commitment`. +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT +/// account_id, +/// vault_root, +/// storage_header +/// FROM +/// accounts +/// WHERE +/// is_latest = 1 +/// AND code_commitment IS NOT NULL +/// AND (account_id > :after_account_id OR :after_account_id IS NULL) +/// ORDER BY +/// account_id ASC +/// LIMIT :page_size + 1 +/// ``` +pub(crate) fn select_public_account_state_roots_paged( + conn: &mut SqliteConnection, + page_size: NonZeroUsize, + after_account_id: Option, +) -> Result { + #[expect(clippy::cast_possible_wrap)] + let limit = (page_size.get() + 1) as i64; + + let mut query = SelectDsl::select( + schema::accounts::table, + ( + schema::accounts::account_id, + schema::accounts::vault_root, + schema::accounts::storage_header, + ), + ) + .filter(schema::accounts::is_latest.eq(true)) + .filter(schema::accounts::code_commitment.is_not_null()) + .order_by(schema::accounts::account_id.asc()) + .limit(limit) + .into_boxed(); + + if let Some(cursor) = after_account_id { + query = query.filter(schema::accounts::account_id.gt(cursor.to_bytes())); + } + + let raw = query.load::<(Vec, Option>, Option>)>(conn)?; + + let mut accounts: Vec = Result::from_iter(raw.into_iter().map( + |(account_id_bytes, vault_root_bytes, storage_header_bytes)| { + let account_id = AccountId::read_from_bytes(&account_id_bytes) + .map_err(DatabaseError::DeserializationError)?; + let vault_root_bytes = vault_root_bytes.ok_or_else(|| { + DatabaseError::DataCorrupted(format!( + "public account {account_id} is missing a vault root" + )) + })?; + let storage_header_bytes = storage_header_bytes.ok_or_else(|| { + DatabaseError::DataCorrupted(format!( + "public account {account_id} is missing a storage header" + )) + })?; + + Ok::<_, DatabaseError>(PublicAccountStateRoots { + account_id, + vault_root: Word::read_from_bytes(&vault_root_bytes)?, + storage_header: AccountStorageHeader::read_from_bytes(&storage_header_bytes)?, + }) + }, + ))?; + + // If we got more than page_size, there are more results. + let next_cursor = if accounts.len() > page_size.get() { + accounts.pop(); + accounts.last().map(|account| account.account_id) + } else { + None + }; + + Ok(PublicAccountStateRootsPage { accounts, next_cursor }) +} + /// Select account vault assets within a block range (inclusive). /// /// # Parameters diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index 1e008b5935..26d9953542 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -120,6 +120,8 @@ pub enum StateInitializationError { AccountTreeIoError(String), #[error("nullifier tree IO error: {0}")] NullifierTreeIoError(String), + #[error("account state forest IO error: {0}")] + AccountStateForestIoError(String), #[error("database error")] DatabaseError(#[from] DatabaseError), #[error("failed to create nullifier tree")] @@ -145,6 +147,17 @@ pub enum StateInitializationError { tree_root: Word, block_root: Word, }, + #[error( + "account state forest root ({forest_root}) does not match SQLite root \ + ({database_root}) for account {account_id}, slot {slot_name:?}. Delete the account \ + state forest storage directory and restart the node to rebuild from the database." + )] + AccountStateForestStorageDiverged { + account_id: AccountId, + slot_name: Option, + forest_root: Word, + database_root: Word, + }, #[error("public account {0} is missing details in database")] PublicAccountMissingDetails(AccountId), #[error("failed to convert account to delta: {0}")] diff --git a/crates/store/src/state/loader.rs b/crates/store/src/state/loader.rs index 3863f4afbc..4400121f38 100644 --- a/crates/store/src/state/loader.rs +++ b/crates/store/src/state/loader.rs @@ -13,9 +13,14 @@ use std::num::NonZeroUsize; use std::path::Path; use miden_crypto::merkle::mmr::Mmr; +use miden_crypto::merkle::smt::{Backend, ForestInMemoryBackend}; +#[cfg(feature = "rocksdb")] +use miden_crypto::merkle::smt::{ForestPersistentBackend, PersistentBackendConfig}; #[cfg(feature = "rocksdb")] use miden_large_smt_backend_rocksdb::RocksDbStorage; +#[cfg(feature = "rocksdb")] use miden_node_utils::clap::RocksDbOptions; +use miden_protocol::account::{AccountId, AccountStorageHeader, StorageSlotType}; use miden_protocol::block::account_tree::{AccountIdKey, AccountTree}; use miden_protocol::block::nullifier_tree::NullifierTree; use miden_protocol::block::{BlockNumber, Blockchain}; @@ -42,6 +47,9 @@ pub const ACCOUNT_TREE_STORAGE_DIR: &str = "accounttree"; /// Directory name for the nullifier tree storage within the data directory. pub const NULLIFIER_TREE_STORAGE_DIR: &str = "nullifiertree"; +/// Directory name for the account state forest storage within the data directory. +pub const ACCOUNT_STATE_FOREST_STORAGE_DIR: &str = "accountstateforest"; + /// Page size for loading account commitments from the database during tree rebuilding. /// This limits memory usage when rebuilding trees with millions of accounts. const ACCOUNT_COMMITMENTS_PAGE_SIZE: NonZeroUsize = NonZeroUsize::new(10_000).unwrap(); @@ -90,7 +98,7 @@ fn block_num_to_nullifier_leaf(block_num: BlockNumber) -> Word { Word::from([Felt::from(block_num), Felt::ZERO, Felt::ZERO, Felt::ZERO]) } -// STORAGE LOADER TRAIT +// TREE STORAGE LOADER TRAIT // ================================================================================================ /// Trait for loading trees from storage. @@ -101,7 +109,7 @@ fn block_num_to_nullifier_leaf(block_num: BlockNumber) -> Word { /// Missing or corrupted storage is handled by the `verify_tree_consistency` check after loading, /// which detects divergence between persistent storage and the database. If divergence is detected, /// the user should manually delete the tree storage directories and restart the node. -pub trait StorageLoader: SmtStorage + Sized { +pub trait TreeStorageLoader: SmtStorage + Sized { /// A configuration type for the implementation. type Config: std::fmt::Debug + std::default::Default; /// Creates a storage backend for the given domain. @@ -124,11 +132,38 @@ pub trait StorageLoader: SmtStorage + Sized { ) -> impl Future>, StateInitializationError>> + Send; } +// ACCOUNT FOREST LOADER TRAIT +// ================================================================================================ + +/// Trait for loading account state forests from storage. +/// +/// For `ForestInMemoryBackend`, the forest is rebuilt from database entries on each startup. For +/// `ForestPersistentBackend`, the forest is loaded directly from disk if data exists, otherwise it +/// is rebuilt from the database and persisted. +pub trait AccountForestLoader: Backend + Sized { + /// A configuration type for the implementation. + type Config: std::fmt::Debug + std::default::Default; + + /// Creates a forest backend for the given domain. + fn create( + data_dir: &Path, + storage_options: &Self::Config, + domain: &'static str, + ) -> Result; + + /// Loads the account state forest, either from persistent storage or by rebuilding from DB. + fn load_account_state_forest( + self, + db: &mut Db, + block_num: BlockNumber, + ) -> impl Future, StateInitializationError>> + Send; +} + // MEMORY STORAGE IMPLEMENTATION // ================================================================================================ #[cfg(not(feature = "rocksdb"))] -impl StorageLoader for MemoryStorage { +impl TreeStorageLoader for MemoryStorage { type Config = (); fn create( _data_dir: &Path, @@ -221,7 +256,7 @@ impl StorageLoader for MemoryStorage { // ================================================================================================ #[cfg(feature = "rocksdb")] -impl StorageLoader for RocksDbStorage { +impl TreeStorageLoader for RocksDbStorage { type Config = RocksDbOptions; fn create( data_dir: &Path, @@ -335,6 +370,83 @@ impl StorageLoader for RocksDbStorage { } } +// ACCOUNT FOREST BACKEND IMPLEMENTATIONS +// ================================================================================================ + +impl AccountForestLoader for ForestInMemoryBackend { + type Config = (); + + fn create( + _data_dir: &Path, + _storage_options: &Self::Config, + _domain: &'static str, + ) -> Result { + Ok(ForestInMemoryBackend::new()) + } + + #[instrument(target = COMPONENT, skip_all, fields(block.number = %block_num))] + async fn load_account_state_forest( + self, + db: &mut Db, + block_num: BlockNumber, + ) -> Result, StateInitializationError> { + let mut forest = AccountStateForest::from_backend(self) + .map_err(|e| StateInitializationError::AccountStateForestIoError(e.to_string()))?; + rebuild_account_state_forest(&mut forest, db, block_num).await?; + Ok(forest) + } +} + +#[cfg(feature = "rocksdb")] +impl AccountForestLoader for ForestPersistentBackend { + type Config = RocksDbOptions; + + fn create( + data_dir: &Path, + storage_options: &Self::Config, + domain: &'static str, + ) -> Result { + let storage_path = data_dir.join(domain); + fs_err::create_dir_all(&storage_path) + .map_err(|e| StateInitializationError::AccountStateForestIoError(e.to_string()))?; + + let max_open_files = usize::try_from(storage_options.max_open_fds).map_err(|_| { + StateInitializationError::AccountStateForestIoError(format!( + "invalid account state forest RocksDB max_open_fds: {}", + storage_options.max_open_fds + )) + })?; + let config = PersistentBackendConfig::new(&storage_path) + .map_err(|e| StateInitializationError::AccountStateForestIoError(e.to_string()))? + .with_cache_size_bytes(storage_options.cache_size_in_bytes) + .with_max_open_files(max_open_files); + + ForestPersistentBackend::load(config) + .map_err(|e| StateInitializationError::AccountStateForestIoError(e.to_string())) + } + + #[instrument(target = COMPONENT, skip_all, fields(block.number = %block_num))] + async fn load_account_state_forest( + self, + db: &mut Db, + block_num: BlockNumber, + ) -> Result, StateInitializationError> { + let mut forest = AccountStateForest::from_backend(self) + .map_err(|e| StateInitializationError::AccountStateForestIoError(e.to_string()))?; + + if forest.lineage_count() != 0 { + return Ok(forest); + } + + info!( + target: COMPONENT, + "RocksDB account state forest storage is empty, populating from SQLite" + ); + rebuild_account_state_forest(&mut forest, db, block_num).await?; + Ok(forest) + } +} + // HELPER FUNCTIONS // ================================================================================================ @@ -361,15 +473,15 @@ pub async fn load_mmr(db: &mut Db) -> Result, db: &mut Db, block_num: BlockNumber, -) -> Result { +) -> Result<(), StateInitializationError> { use miden_protocol::account::delta::AccountDelta; - let mut forest = AccountStateForest::new(); let mut cursor = None; loop { @@ -402,7 +514,7 @@ pub async fn load_smt_forest( } } - Ok(forest) + Ok(()) } // CONSISTENCY VERIFICATION @@ -456,3 +568,159 @@ pub async fn verify_tree_consistency( Ok(()) } + +/// Verifies that the account state forest matches latest public account roots from SQLite. +/// +/// This check ensures persisted account state forest has not diverged from the latest +/// account states in SQLite. When the forest is rebuilt from SQLite, it will naturally +/// match; when loaded from `RocksDB`, this catches corruption or incomplete shutdown. +#[instrument(target = COMPONENT, skip_all)] +pub async fn verify_account_state_forest_consistency( + forest: &AccountStateForest, + db: &mut Db, +) -> Result<(), StateInitializationError> { + let mut cursor = None; + + loop { + let page = db + .select_public_account_state_roots_paged(PUBLIC_ACCOUNT_IDS_PAGE_SIZE, cursor) + .await?; + + if page.accounts.is_empty() { + break; + } + + for account in page.accounts { + verify_account_state_forest_record( + forest, + account.account_id, + account.vault_root, + &account.storage_header, + )?; + } + + cursor = page.next_cursor; + if cursor.is_none() { + break; + } + } + + Ok(()) +} + +fn verify_account_state_forest_record( + forest: &AccountStateForest, + account_id: AccountId, + vault_root: Word, + storage_header: &AccountStorageHeader, +) -> Result<(), StateInitializationError> { + let forest_vault_root = forest.get_latest_vault_root(account_id); + if forest_vault_root != vault_root { + return Err(StateInitializationError::AccountStateForestStorageDiverged { + account_id, + slot_name: None, + forest_root: forest_vault_root, + database_root: vault_root, + }); + } + + for slot in storage_header.slots() { + if slot.slot_type() != StorageSlotType::Map { + continue; + } + + let forest_root = forest.get_latest_storage_map_root(account_id, slot.name()); + let database_root = slot.value(); + if forest_root != database_root { + return Err(StateInitializationError::AccountStateForestStorageDiverged { + account_id, + slot_name: Some(slot.name().to_string()), + forest_root, + database_root, + }); + } + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use miden_protocol::account::{ + AccountId, + AccountStorageHeader, + StorageSlotHeader, + StorageSlotName, + StorageSlotType, + }; + use miden_protocol::testing::account_id::ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE; + + use super::*; + + #[test] + fn account_state_forest_consistency_detects_storage_map_root_mismatch() { + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE) + .expect("test account ID should be valid"); + let slot_name = + StorageSlotName::new("account::balances").expect("slot name should be valid"); + let expected_storage_root = Word::from([1, 0, 0, 0u32]); + let storage_header = AccountStorageHeader::new(vec![StorageSlotHeader::new( + slot_name.clone(), + StorageSlotType::Map, + expected_storage_root, + )]) + .expect("storage header should be valid"); + let forest = AccountStateForest::new(); + + let error = verify_account_state_forest_record( + &forest, + account_id, + AccountStateForest::empty_smt_root(), + &storage_header, + ) + .expect_err("storage map root mismatch should be detected"); + + assert_matches::assert_matches!( + error, + StateInitializationError::AccountStateForestStorageDiverged { + account_id: actual_account_id, + slot_name: Some(actual_slot_name), + forest_root, + database_root, + } if actual_account_id == account_id + && actual_slot_name == slot_name.to_string() + && forest_root == AccountStateForest::empty_smt_root() + && database_root == expected_storage_root + ); + } + + #[test] + fn account_state_forest_consistency_detects_vault_root_mismatch() { + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE) + .expect("test account ID should be valid"); + let expected_vault_root = Word::from([2, 0, 0, 0u32]); + let storage_header = + AccountStorageHeader::new(Vec::new()).expect("storage header should be valid"); + let forest = AccountStateForest::new(); + + let error = verify_account_state_forest_record( + &forest, + account_id, + expected_vault_root, + &storage_header, + ) + .expect_err("vault root mismatch should be detected"); + + assert_matches::assert_matches!( + error, + StateInitializationError::AccountStateForestStorageDiverged { + account_id: actual_account_id, + slot_name: None, + forest_root, + database_root, + } if actual_account_id == account_id + && forest_root == AccountStateForest::empty_smt_root() + && database_root == expected_vault_root + ); + } +} diff --git a/crates/store/src/state/mod.rs b/crates/store/src/state/mod.rs index 11d9531ac3..16d377a370 100644 --- a/crates/store/src/state/mod.rs +++ b/crates/store/src/state/mod.rs @@ -37,7 +37,12 @@ use miden_protocol::transaction::PartialBlockchain; use tokio::sync::{Mutex, RwLock}; use tracing::{Instrument, info, instrument}; -use crate::account_state_forest::{AccountStateForest, AccountStorageMapResult, WitnessError}; +use crate::account_state_forest::{ + AccountStateForest, + AccountStateForestBackend, + AccountStorageMapResult, + WitnessError, +}; use crate::accounts::AccountTreeWithHistory; use crate::blocks::BlockStore; use crate::db::models::Page; @@ -57,12 +62,14 @@ use crate::{COMPONENT, DataDirectory}; mod loader; use loader::{ + ACCOUNT_STATE_FOREST_STORAGE_DIR, ACCOUNT_TREE_STORAGE_DIR, + AccountForestLoader, NULLIFIER_TREE_STORAGE_DIR, - StorageLoader, TreeStorage, + TreeStorageLoader, load_mmr, - load_smt_forest, + verify_account_state_forest_consistency, verify_tree_consistency, }; @@ -117,7 +124,7 @@ pub struct State { inner: RwLock>, /// Forest-related state `(SmtForest, storage_map_roots, vault_roots)` with its own lock. - forest: RwLock, + forest: RwLock>, /// To allow readers to access the tree data while an update in being performed, and prevent /// TOCTOU issues, there must be no concurrent writers. This locks to serialize the writers. @@ -154,18 +161,23 @@ impl State { let blockchain = load_mmr(&mut db).await?; let latest_block_num = blockchain.chain_tip().unwrap_or(BlockNumber::GENESIS); - let account_storage = TreeStorage::create( - data_path, - &storage_options.account_tree.into(), - ACCOUNT_TREE_STORAGE_DIR, - )?; + #[cfg(feature = "rocksdb")] + let (account_storage_config, nullifier_storage_config, forest_storage_config) = ( + storage_options.account_tree.into(), + storage_options.nullifier_tree.into(), + storage_options.account_state_forest.into(), + ); + #[cfg(not(feature = "rocksdb"))] + let (account_storage_config, nullifier_storage_config, forest_storage_config) = { + let _ = &storage_options; + ((), (), ()) + }; + let account_storage = + TreeStorage::create(data_path, &account_storage_config, ACCOUNT_TREE_STORAGE_DIR)?; let account_tree = account_storage.load_account_tree(&mut db).await?; - let nullifier_storage = TreeStorage::create( - data_path, - &storage_options.nullifier_tree.into(), - NULLIFIER_TREE_STORAGE_DIR, - )?; + let nullifier_storage = + TreeStorage::create(data_path, &nullifier_storage_config, NULLIFIER_TREE_STORAGE_DIR)?; let nullifier_tree = nullifier_storage.load_nullifier_tree(&mut db).await?; // Verify that tree roots match the expected roots from the database. @@ -175,7 +187,13 @@ impl State { let account_tree = AccountTreeWithHistory::new(account_tree, latest_block_num); - let forest = load_smt_forest(&mut db, latest_block_num).await?; + let forest_backend = AccountStateForestBackend::create( + data_path, + &forest_storage_config, + ACCOUNT_STATE_FOREST_STORAGE_DIR, + )?; + let forest = forest_backend.load_account_state_forest(&mut db, latest_block_num).await?; + verify_account_state_forest_consistency(&forest, &mut db).await?; let inner = RwLock::new(InnerState { nullifier_tree, blockchain, account_tree }); diff --git a/crates/utils/src/clap.rs b/crates/utils/src/clap.rs index 079a619d35..a1b97bc336 100644 --- a/crates/utils/src/clap.rs +++ b/crates/utils/src/clap.rs @@ -149,6 +149,9 @@ pub struct StorageOptions { #[cfg(feature = "rocksdb")] #[clap(flatten)] pub nullifier_tree: NullifierTreeRocksDbOptions, + #[cfg(feature = "rocksdb")] + #[clap(flatten)] + pub account_state_forest: AccountStateForestRocksDbOptions, } impl StorageOptions { @@ -166,7 +169,15 @@ impl StorageOptions { max_open_fds: BENCH_ROCKSDB_MAX_OPEN_FDS, cache_size_in_bytes: DEFAULT_ROCKSDB_CACHE_SIZE, }; - Self { account_tree, nullifier_tree } + let account_state_forest = AccountStateForestRocksDbOptions { + max_open_fds: BENCH_ROCKSDB_MAX_OPEN_FDS, + cache_size_in_bytes: DEFAULT_ROCKSDB_CACHE_SIZE, + }; + Self { + account_tree, + nullifier_tree, + account_state_forest, + } } #[cfg(not(feature = "rocksdb"))] Self::default() diff --git a/crates/utils/src/clap/rocksdb.rs b/crates/utils/src/clap/rocksdb.rs index 572e5b1cf7..8e30fc1674 100644 --- a/crates/utils/src/clap/rocksdb.rs +++ b/crates/utils/src/clap/rocksdb.rs @@ -58,6 +58,31 @@ impl Default for AccountTreeRocksDbOptions { } } +/// Per usage options for rocksdb configuration +#[derive(clap::Args, Clone, Debug, PartialEq, Eq)] +pub struct AccountStateForestRocksDbOptions { + #[arg( + id = "account_state_forest_rocksdb_max_open_fds", + long = "account_state_forest.rocksdb.max_open_fds", + default_value_t = DEFAULT_ROCKSDB_MAX_OPEN_FDS, + value_name = "ACCOUNT_STATE_FOREST__ROCKSDB__MAX_OPEN_FDS" + )] + pub max_open_fds: i32, + #[arg( + id = "account_state_forest_rocksdb_max_cache_size", + long = "account_state_forest.rocksdb.max_cache_size", + default_value_t = DEFAULT_ROCKSDB_CACHE_SIZE, + value_name = "ACCOUNT_STATE_FOREST__ROCKSDB__CACHE_SIZE" + )] + pub cache_size_in_bytes: usize, +} + +impl Default for AccountStateForestRocksDbOptions { + fn default() -> Self { + RocksDbOptions::default().into() + } +} + /// General confiration options for rocksdb. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct RocksDbOptions { @@ -88,6 +113,13 @@ impl From for RocksDbOptions { } } +impl From for RocksDbOptions { + fn from(value: AccountStateForestRocksDbOptions) -> Self { + let AccountStateForestRocksDbOptions { max_open_fds, cache_size_in_bytes } = value; + Self { max_open_fds, cache_size_in_bytes } + } +} + impl From for AccountTreeRocksDbOptions { fn from(value: RocksDbOptions) -> Self { let RocksDbOptions { max_open_fds, cache_size_in_bytes } = value; @@ -102,6 +134,13 @@ impl From for NullifierTreeRocksDbOptions { } } +impl From for AccountStateForestRocksDbOptions { + fn from(value: RocksDbOptions) -> Self { + let RocksDbOptions { max_open_fds, cache_size_in_bytes } = value; + Self { max_open_fds, cache_size_in_bytes } + } +} + impl RocksDbOptions { pub fn with_path(self, path: &Path) -> RocksDbConfig { RocksDbConfig::new(path) @@ -109,3 +148,23 @@ impl RocksDbOptions { .with_max_open_files(self.max_open_fds) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn account_state_forest_options_roundtrip_general_rocksdb_options() { + let options = AccountStateForestRocksDbOptions { + max_open_fds: 123, + cache_size_in_bytes: 456, + }; + + let general = RocksDbOptions::from(options.clone()); + assert_eq!(general.max_open_fds, options.max_open_fds); + assert_eq!(general.cache_size_in_bytes, options.cache_size_in_bytes); + + let roundtrip = AccountStateForestRocksDbOptions::from(general); + assert_eq!(roundtrip, options); + } +} From 41c2acab85b0f1db1b063f9146c86865b69085ce Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Mon, 4 May 2026 16:32:27 +0200 Subject: [PATCH 23/28] chore(telemetry): `client.address` should be the resolved client address (#2032) --- crates/utils/src/tracing/grpc.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/crates/utils/src/tracing/grpc.rs b/crates/utils/src/tracing/grpc.rs index 6d88e0fa5c..359d1eae7f 100644 --- a/crates/utils/src/tracing/grpc.rs +++ b/crates/utils/src/tracing/grpc.rs @@ -1,4 +1,5 @@ use http::header::HeaderName; +use tower_governor::key_extractor::{KeyExtractor, SmartIpKeyExtractor}; use tracing::field; use crate::tracing::OpenTelemetrySpanExt; @@ -63,9 +64,17 @@ pub fn grpc_trace_fn(request: &http::Request) -> tracing::Span { .extensions() .get::() .and_then(tonic::transport::server::TcpConnectInfo::remote_addr); - if let Some(addr) = remote_addr { + + // client.address should be the resolved IP address of the client, if available. + // In the case of a reverse proxy, this may not be the same as the remote address. + if let Ok(ip) = SmartIpKeyExtractor.extract(request) { + span.set_attribute("client.address", ip); + } else if let Some(addr) = remote_addr { span.set_attribute("client.address", addr.ip()); span.set_attribute("client.port", addr.port()); + } + + if let Some(addr) = remote_addr { span.set_attribute("network.peer.address", addr.ip()); span.set_attribute("network.peer.port", addr.port()); span.set_attribute("network.transport", "tcp"); From fba447a7c424f230e794a507487750542083425a Mon Sep 17 00:00:00 2001 From: bigeez <159047114+bigeez@users.noreply.github.com> Date: Tue, 5 May 2026 06:40:12 +0100 Subject: [PATCH 24/28] feat(store): analyse database on disk size via filesystem instead of scanning (#2028) Co-authored-by: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> --- crates/store/src/db/mod.rs | 44 +---------- crates/store/src/server/mod.rs | 110 +++++++++++++++++++++++---- crates/store/src/state/mod.rs | 5 -- crates/utils/src/tracing/span_ext.rs | 6 ++ 4 files changed, 101 insertions(+), 64 deletions(-) diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index c988b1e963..80287941b1 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -5,11 +5,10 @@ use std::path::PathBuf; use std::sync::Arc; use anyhow::Context; -use diesel::{Connection, QueryableByName, RunQueryDsl, SqliteConnection}; +use diesel::{Connection, SqliteConnection}; use miden_node_proto::domain::account::AccountInfo; use miden_node_proto::{BlockProofRequest, generated as proto}; use miden_node_utils::limiter::MAX_RESPONSE_PAYLOAD_BYTES; -use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::Word; use miden_protocol::account::{AccountHeader, AccountId, AccountStorageHeader, StorageMapKey}; use miden_protocol::asset::{Asset, AssetVaultKey}; @@ -747,47 +746,6 @@ impl Db { }) } - /// Emits size metrics for each table in the database, and the entire database. - #[instrument(target = COMPONENT, skip_all, err)] - pub async fn analyze_table_sizes(&self) -> Result<(), DatabaseError> { - self.transact("db analysis", |conn| { - #[derive(QueryableByName)] - struct TotalSize { - #[diesel(sql_type = diesel::sql_types::BigInt)] - size: i64, - } - - #[derive(QueryableByName)] - struct Table { - #[diesel(sql_type = diesel::sql_types::Text)] - name: String, - #[diesel(sql_type = diesel::sql_types::BigInt)] - size: i64, - } - - let tables = - diesel::sql_query("SELECT name, sum(payload) AS size FROM dbstat GROUP BY name") - .load::(conn)?; - - let span = tracing::Span::current(); - for Table { name, size } in tables { - span.set_attribute(format!("database.table.{name}.size"), size); - } - - let total = diesel::sql_query( - "SELECT page_count * page_size as size FROM pragma_page_count(), pragma_page_size()", - ) - .get_result::(conn)?; - span.set_attribute("database.total.size", total.size); - - Result::<_, DatabaseError>::Ok(()) - }) - .await - .inspect_err(|err| tracing::Span::current().set_error(err))?; - - Ok(()) - } - /// Loads the network notes for an account that are unconsumed by a specified block number. /// Pagination is used to limit the number of notes returned. pub(crate) async fn select_unconsumed_network_notes( diff --git a/crates/store/src/server/mod.rs b/crates/store/src/server/mod.rs index 85292bb726..e3afd97387 100644 --- a/crates/store/src/server/mod.rs +++ b/crates/store/src/server/mod.rs @@ -13,12 +13,13 @@ use miden_node_proto_build::{ }; use miden_node_utils::clap::{GrpcOptionsInternal, StorageOptions}; use miden_node_utils::panic::{CatchPanicLayer, catch_panic_layer_fn}; +use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_node_utils::tracing::grpc::grpc_trace_fn; use tokio::net::TcpListener; use tokio::task::JoinSet; use tokio_stream::wrappers::TcpListenerStream; use tower_http::trace::TraceLayer; -use tracing::{info, instrument}; +use tracing::{Instrument, info, info_span, instrument}; use url::Url; use crate::blocks::BlockStore; @@ -88,6 +89,7 @@ impl Store { let rpc_address = self.rpc_listener.local_addr()?; let ntx_builder_address = self.ntx_builder_listener.local_addr()?; let block_producer_address = self.block_producer_listener.local_addr()?; + let data_directory = self.data_directory.clone(); info!(target: COMPONENT, rpc_endpoint=?rpc_address, ntx_builder_endpoint=?ntx_builder_address, block_producer_endpoint=?block_producer_address, ?self.data_directory, ?self.grpc_options.request_timeout, "Loading database"); @@ -143,22 +145,10 @@ impl Store { info!(target: COMPONENT, "Database loaded"); - let mut join_set = JoinSet::new(); - - join_set.spawn(async move { - // Manual tests on testnet indicate each iteration takes ~2s once things are OS cached. - // - // 5 minutes seems like a reasonable interval, where this should have minimal database - // IO impact while providing a decent view into table growth over time. - let mut interval = tokio::time::interval(Duration::from_secs(5 * 60)); - let database = Arc::clone(&state); - loop { - interval.tick().await; - let _ = database.analyze_table_sizes().await; - } - }); + // Spawn disk monitor (fire-and-forget; never causes server shutdown). + let _disk_monitor_task = Self::spawn_disk_monitor(data_directory); - // Build the gRPC server with the API services and trace layer. + let mut join_set = JoinSet::new(); join_set.spawn( tonic::transport::Server::builder() .timeout(self.grpc_options.request_timeout) @@ -206,6 +196,38 @@ impl Store { } } } + + /// Spawns a background task that periodically records the on-disk size of every store data + /// path as `OTel` span attributes. + /// + /// Sizes are measured with [`fs_err::metadata`] (no SQL connections, no lock contention). + /// Errors are logged as warnings and never cause the server to stop. + fn spawn_disk_monitor(data_directory: PathBuf) -> tokio::task::JoinHandle<()> { + tokio::spawn(async move { + let mut interval = tokio::time::interval(Duration::from_mins(5)); + loop { + interval.tick().await; + let dir = data_directory.clone(); + let span = info_span!(target: COMPONENT, "measure disk space usage"); + let result = tokio::task::spawn_blocking(move || measure_disk_usage_bytes(&dir)) + .instrument(span.clone()) + .await; + match result { + Ok(usage) => { + span.set_attribute("db.sqlite.size", usage.sqlite_db); + span.set_attribute("db.sqlite.wal.size", usage.sqlite_wal); + span.set_attribute("db.block_store.size", usage.block_store); + #[cfg(feature = "rocksdb")] + { + span.set_attribute("db.account_tree.size", usage.account_tree); + span.set_attribute("db.nullifier_tree.size", usage.nullifier_tree); + } + }, + Err(err) => span.set_error(&err), + } + } + }) + } } /// Represents the store's data-directory and its content paths. @@ -238,3 +260,59 @@ impl DataDirectory { self.0.display() } } + +// DISK USAGE HELPERS +// ================================================================================================ + +/// Byte counts for each on-disk storage component. +struct DiskUsage { + sqlite_db: u64, + sqlite_wal: u64, + block_store: u64, + #[cfg(feature = "rocksdb")] + account_tree: u64, + #[cfg(feature = "rocksdb")] + nullifier_tree: u64, +} + +/// Collects on-disk byte sizes for every store data path under `data_dir`. +/// +/// Uses only [`fs_err::metadata`] and [`fs_err::read_dir`] — no SQLite connections are opened, +/// so there is no read-lock contention with concurrent database writers. +fn measure_disk_usage_bytes(data_dir: &Path) -> DiskUsage { + DiskUsage { + sqlite_db: path_size_bytes(&data_dir.join("miden-store.sqlite3")), + sqlite_wal: path_size_bytes(&data_dir.join("miden-store.sqlite3-wal")), + block_store: dir_size_bytes(&data_dir.join("blocks")), + #[cfg(feature = "rocksdb")] + account_tree: dir_size_bytes(&data_dir.join("accounttree")), + #[cfg(feature = "rocksdb")] + nullifier_tree: dir_size_bytes(&data_dir.join("nullifiertree")), + } +} + +/// Returns the byte length of the file at `path`, or `0` if it does not exist. +fn path_size_bytes(path: &Path) -> u64 { + fs_err::metadata(path).map(|m| m.len()).unwrap_or(0) +} + +/// Returns the total byte length of all files in `path` iteratively, or `0` on any error. +fn dir_size_bytes(path: &Path) -> u64 { + let mut to_process = vec![path.to_path_buf()]; + let mut total = 0u64; + while let Some(dir) = to_process.pop() { + let Ok(entries) = fs_err::read_dir(&dir) else { + continue; + }; + for entry in entries.flatten() { + if let Ok(meta) = entry.metadata() { + if meta.is_dir() { + to_process.push(entry.path()); + } else { + total += meta.len(); + } + } + } + } + total +} diff --git a/crates/store/src/state/mod.rs b/crates/store/src/state/mod.rs index 16d377a370..e842f7271b 100644 --- a/crates/store/src/state/mod.rs +++ b/crates/store/src/state/mod.rs @@ -937,11 +937,6 @@ impl State { self.inner.read().await.latest_block_num() } - /// Emits metrics for each database table's size. - pub async fn analyze_table_sizes(&self) -> Result<(), DatabaseError> { - self.db.analyze_table_sizes().await - } - /// Returns the network notes for an account that are unconsumed by a specified block number, /// along with the next pagination token. pub async fn get_unconsumed_network_notes_for_account( diff --git a/crates/utils/src/tracing/span_ext.rs b/crates/utils/src/tracing/span_ext.rs index 07ac008fe3..7f50c82ad8 100644 --- a/crates/utils/src/tracing/span_ext.rs +++ b/crates/utils/src/tracing/span_ext.rs @@ -65,6 +65,12 @@ impl ToValue for usize { } } +impl ToValue for u64 { + fn to_value(&self) -> Value { + i64::try_from(*self).unwrap_or(i64::MAX).into() + } +} + /// Generates `impl ToValue` blocks for types that are `ToString`. macro_rules! impl_to_string_to_value { ($($t:ty),*) => { From b3fb9393c4f3856a960d0c76c5420fa674000f76 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Wed, 6 May 2026 11:35:15 +0200 Subject: [PATCH 25/28] chore: bump dependencies (#2055) --- Cargo.lock | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d952c13c01..6dc505da82 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1406,9 +1406,9 @@ dependencies = [ [[package]] name = "diesel" -version = "2.3.7" +version = "2.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4ae09a41a4b89f94ec1e053623da8340d996bc32c6517d325a9daad9b239358" +checksum = "9940fb8467a0a06312218ed384185cb8536aa10d8ec017d0ce7fad2c1bd882d5" dependencies = [ "bigdecimal", "diesel_derives", @@ -3042,7 +3042,7 @@ dependencies = [ "p3-field", "p3-goldilocks", "paste", - "rand 0.10.0", + "rand 0.10.1", "serde", "subtle", "thiserror 2.0.18", @@ -4209,7 +4209,7 @@ dependencies = [ "p3-maybe-rayon", "p3-util", "paste", - "rand 0.10.0", + "rand 0.10.1", "serde", "tracing", ] @@ -4230,7 +4230,7 @@ dependencies = [ "p3-symmetric", "p3-util", "paste", - "rand 0.10.0", + "rand 0.10.1", "serde", ] @@ -4255,7 +4255,7 @@ dependencies = [ "p3-field", "p3-maybe-rayon", "p3-util", - "rand 0.10.0", + "rand 0.10.1", "serde", "tracing", ] @@ -4279,7 +4279,7 @@ dependencies = [ "p3-field", "p3-symmetric", "p3-util", - "rand 0.10.0", + "rand 0.10.1", ] [[package]] @@ -4310,7 +4310,7 @@ dependencies = [ "p3-miden-lmcs", "p3-miden-transcript", "p3-util", - "rand 0.10.0", + "rand 0.10.1", "thiserror 2.0.18", "tracing", ] @@ -4350,7 +4350,7 @@ dependencies = [ "p3-miden-transcript", "p3-symmetric", "p3-util", - "rand 0.10.0", + "rand 0.10.1", "serde", "thiserror 2.0.18", "tracing", @@ -4396,7 +4396,7 @@ dependencies = [ "p3-symmetric", "p3-util", "paste", - "rand 0.10.0", + "rand 0.10.1", "serde", "spin 0.10.0", "tracing", @@ -4410,7 +4410,7 @@ checksum = "6a018b618e3fa0aec8be933b1d8e404edd23f46991f6bf3f5c2f3f95e9413fe9" dependencies = [ "p3-field", "p3-symmetric", - "rand 0.10.0", + "rand 0.10.1", ] [[package]] @@ -4423,7 +4423,7 @@ dependencies = [ "p3-mds", "p3-symmetric", "p3-util", - "rand 0.10.0", + "rand 0.10.1", ] [[package]] @@ -4784,7 +4784,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "343d3bd7056eda839b03204e68deff7d1b13aba7af2b2fd16890697274262ee7" dependencies = [ "heck", - "itertools 0.14.0", + "itertools 0.13.0", "log", "multimap", "petgraph 0.8.3", @@ -4805,7 +4805,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "27c6023962132f4b30eb4c172c91ce92d933da334c59c23cddee82358ddafb0b" dependencies = [ "anyhow", - "itertools 0.14.0", + "itertools 0.13.0", "proc-macro2", "quote", "syn 2.0.117", @@ -5012,9 +5012,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc266eb313df6c5c09c1c7b1fbe2510961e5bcd3add930c1e31f7ed9da0feff8" +checksum = "d2e8e8bcc7961af1fdac401278c6a831614941f6164ee3bf4ce61b7edb162207" dependencies = [ "rand_core 0.10.0", ] @@ -5360,7 +5360,7 @@ dependencies = [ "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.103.10", + "rustls-webpki 0.103.13", "subtle", "zeroize", ] @@ -5401,7 +5401,7 @@ dependencies = [ "rustls 0.23.37", "rustls-native-certs", "rustls-platform-verifier-android", - "rustls-webpki 0.103.10", + "rustls-webpki 0.103.13", "security-framework", "security-framework-sys", "webpki-root-certs", @@ -5426,9 +5426,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.10" +version = "0.103.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df33b2b81ac578cabaf06b89b0631153a3f416b0a886e8a7a1707fb51abbd1ef" +checksum = "61c429a8649f110dddef65e2a5ad240f747e85f7758a6bccc7e5777bd33f756e" dependencies = [ "aws-lc-rs", "ring", From 8f1ed50aabb5cc5d3908c3415523a184217d28fc Mon Sep 17 00:00:00 2001 From: OllieDev Date: Wed, 6 May 2026 11:06:58 +0100 Subject: [PATCH 26/28] fix: replace blocking-in-async (#2041) Co-authored-by: KOVACS Krisztian Co-authored-by: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> --- CHANGELOG.md | 1 + bin/remote-prover/src/server/prover.rs | 20 ++++++-- crates/ntx-builder/src/actor/execute.rs | 60 +++++++++++++++-------- crates/validator/src/signers/mod.rs | 11 ++++- crates/validator/src/tx_validation/mod.rs | 42 +++++++++++----- 5 files changed, 96 insertions(+), 38 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6d2031391a..59a710e028 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ ## v0.14.11 (TBD) +- Replaced blocking-in-async operations in the validator, remote prover, and ntx-builder with `spawn_blocking` to avoid starving the Tokio runtime ([#2041](https://github.com/0xMiden/node/pull/2041)). - Implement persistent RocksDB backend for `AccountStateForest`, improving startup time ([#2020](https://github.com/0xMiden/node/pull/2020)). ## v0.14.10 (2026-05-29) diff --git a/bin/remote-prover/src/server/prover.rs b/bin/remote-prover/src/server/prover.rs index 2931cc70fe..f8c552138f 100644 --- a/bin/remote-prover/src/server/prover.rs +++ b/bin/remote-prover/src/server/prover.rs @@ -112,8 +112,14 @@ impl ProveRequest for LocalBatchProver { type Output = ProvenBatch; async fn prove(&self, input: Self::Input) -> Result { - self.prove(input) - .map_err(|e| tonic::Status::internal(e.as_report_context("failed to prove batch"))) + let prover = self.clone(); + tokio::task::spawn_blocking(move || { + prover + .prove(input) + .map_err(|e| tonic::Status::internal(e.as_report_context("failed to prove batch"))) + }) + .await + .map_err(|e| tonic::Status::internal(e.as_report_context("batch prover task panicked")))? } } @@ -123,8 +129,14 @@ impl ProveRequest for LocalBlockProver { type Output = BlockProof; async fn prove(&self, input: Self::Input) -> Result { + let prover = self.clone(); let BlockProofRequest { tx_batches, block_header, block_inputs } = input; - self.prove(tx_batches, &block_header, block_inputs) - .map_err(|e| tonic::Status::internal(e.as_report_context("failed to prove block"))) + tokio::task::spawn_blocking(move || { + prover + .prove(tx_batches, &block_header, block_inputs) + .map_err(|e| tonic::Status::internal(e.as_report_context("failed to prove block"))) + }) + .await + .map_err(|e| tonic::Status::internal(e.as_report_context("block prover task panicked")))? } } diff --git a/crates/ntx-builder/src/actor/execute.rs b/crates/ntx-builder/src/actor/execute.rs index 2888e1b774..f001883dcd 100644 --- a/crates/ntx-builder/src/actor/execute.rs +++ b/crates/ntx-builder/src/actor/execute.rs @@ -194,25 +194,35 @@ impl NtxContext { async move { Box::pin(async move { - let data_store = NtxDataStore::new( - account, - chain_tip_header, - chain_mmr, - self.store.clone(), - self.script_cache.clone(), - self.db.clone(), - ); - - // Filter notes. let notes = notes.into_iter().map(Note::from).collect::>(); - let (successful_notes, failed_notes) = - self.filter_notes(&data_store, notes).await?; - // Execute transaction. - let executed_tx = Box::pin(self.execute(&data_store, successful_notes)).await?; - - // Collect scripts fetched from the remote store during execution. - let scripts_to_cache = data_store.take_fetched_scripts(); + // VM execution (note filtering + transaction execution) is CPU-intensive and + // may not yield between await points. Run on a dedicated blocking thread, + // using the parent runtime handle so that async data-store callbacks (gRPC + // calls to the store) are driven by the existing I/O driver. + let ctx = self.clone(); + let handle = tokio::runtime::Handle::current(); + let (executed_tx, failed_notes, scripts_to_cache) = + tokio::task::spawn_blocking(move || { + let data_store = NtxDataStore::new( + account, + chain_tip_header, + chain_mmr, + ctx.store.clone(), + ctx.script_cache.clone(), + ctx.db.clone(), + ); + handle.block_on(async { + let (successful_notes, failed_notes) = + ctx.filter_notes(&data_store, notes).await?; + let executed_tx = + Box::pin(ctx.execute(&data_store, successful_notes)).await?; + let scripts_to_cache = data_store.take_fetched_scripts(); + Ok::<_, NtxError>((executed_tx, failed_notes, scripts_to_cache)) + }) + }) + .await + .unwrap_or_else(|e| std::panic::resume_unwind(e.into_panic()))?; // Prove transaction. let tx_inputs: TransactionInputs = executed_tx.into(); @@ -316,13 +326,21 @@ impl NtxContext { #[instrument(target = COMPONENT, name = "ntx.execute_transaction.prove", skip_all, err)] async fn prove(&self, tx_inputs: &TransactionInputs) -> NtxResult { if let Some(remote) = &self.prover { - remote.prove(tx_inputs).await + remote.prove(tx_inputs).await.map_err(NtxError::Proving) } else { - // Only perform tx inputs clone for local proving. + // ZK proof generation is CPU-intensive; run it on a dedicated blocking thread. let tx_inputs = tx_inputs.clone(); - LocalTransactionProver::default().prove(tx_inputs).await + tokio::task::spawn_blocking(move || { + tokio::runtime::Builder::new_current_thread() + .enable_all() + .build() + .expect("failed to build tokio runtime") + .block_on(LocalTransactionProver::default().prove(tx_inputs)) + }) + .await + .unwrap_or_else(|e| std::panic::resume_unwind(e.into_panic())) + .map_err(NtxError::Proving) } - .map_err(NtxError::Proving) } /// Submits the transaction to the block producer. diff --git a/crates/validator/src/signers/mod.rs b/crates/validator/src/signers/mod.rs index 21bbeaa7ae..6e6092350f 100644 --- a/crates/validator/src/signers/mod.rs +++ b/crates/validator/src/signers/mod.rs @@ -36,7 +36,16 @@ impl ValidatorSigner { Ok(sig) }, Self::Local(signer) => { - let sig = ::sign(signer, header).await?; + let signer = signer.clone(); + let header = header.clone(); + let sig = tokio::task::spawn_blocking(move || { + tokio::runtime::Builder::new_current_thread() + .build() + .expect("failed to build tokio runtime") + .block_on(::sign(&signer, &header)) + }) + .await + .unwrap_or_else(|e| std::panic::resume_unwind(e.into_panic()))?; Ok(sig) }, } diff --git a/crates/validator/src/tx_validation/mod.rs b/crates/validator/src/tx_validation/mod.rs index f2d1250a20..d33c0934a0 100644 --- a/crates/validator/src/tx_validation/mod.rs +++ b/crates/validator/src/tx_validation/mod.rs @@ -39,23 +39,41 @@ pub async fn validate_transaction( proven_tx: ProvenTransaction, tx_inputs: TransactionInputs, ) -> Result { - // First, verify the transaction proof - info_span!("verify").in_scope(|| { - let tx_verifier = TransactionVerifier::new(MIN_PROOF_SECURITY_LEVEL); - tx_verifier.verify(&proven_tx) - })?; + // Proof verification is CPU-intensive; run it on a dedicated blocking thread. + let proven_tx_clone = proven_tx.clone(); + tokio::task::spawn_blocking(move || { + info_span!("verify").in_scope(|| { + TransactionVerifier::new(MIN_PROOF_SECURITY_LEVEL).verify(&proven_tx_clone) + }) + }) + .await + .unwrap_or_else(|e| std::panic::resume_unwind(e.into_panic()))?; // Create a DataStore from the transaction inputs. let data_store = TransactionInputsDataStore::new(tx_inputs.clone()); - // Execute the transaction. + // VM execution may not yield; run it on a dedicated blocking thread. let (account, block_header, _, input_notes, tx_args) = tx_inputs.into_parts(); - let executor: TransactionExecutor<'_, '_, _, UnreachableAuth> = - TransactionExecutor::new(&data_store); - let executed_tx = executor - .execute_transaction(account.id(), block_header.block_num(), input_notes, tx_args) - .instrument(info_span!("execute")) - .await?; + let execute_span = info_span!("execute"); + let executed_tx = tokio::task::spawn_blocking(move || { + let executor: TransactionExecutor<'_, '_, _, UnreachableAuth> = + TransactionExecutor::new(&data_store); + tokio::runtime::Builder::new_current_thread() + .build() + .expect("failed to build tokio runtime") + .block_on( + executor + .execute_transaction( + account.id(), + block_header.block_num(), + input_notes, + tx_args, + ) + .instrument(execute_span), + ) + }) + .await + .unwrap_or_else(|e| std::panic::resume_unwind(e.into_panic()))?; // Validate that the executed transaction matches the submitted transaction. let executed_tx_header: TransactionHeader = (&executed_tx).into(); From f1e1448218a7f05e142338f855040d29dc5da0a5 Mon Sep 17 00:00:00 2001 From: Mirko <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Thu, 7 May 2026 07:52:26 +0200 Subject: [PATCH 27/28] feat: remove `BlockSigner` trait (#2057) --- bin/genesis/src/main.rs | 10 ++-- bin/node/src/commands/validator.rs | 47 +++++++---------- bin/stress-test/src/seeding/mod.rs | 7 ++- crates/block-producer/src/server/tests.rs | 6 +-- crates/rpc/src/tests.rs | 13 ++--- crates/store/src/db/tests.rs | 20 ++++--- crates/store/src/genesis/config/mod.rs | 12 ++--- crates/store/src/genesis/config/tests.rs | 23 +++++---- crates/store/src/genesis/mod.rs | 63 ++++++++++++++++------- crates/utils/src/lib.rs | 1 - crates/utils/src/signer.rs | 36 ------------- crates/validator/src/server/tests.rs | 16 +++--- crates/validator/src/signers/kms.rs | 15 ++---- crates/validator/src/signers/mod.rs | 40 +++++++------- 14 files changed, 146 insertions(+), 163 deletions(-) delete mode 100644 crates/utils/src/signer.rs diff --git a/bin/genesis/src/main.rs b/bin/genesis/src/main.rs index 0b66588273..4244ee6b13 100644 --- a/bin/genesis/src/main.rs +++ b/bin/genesis/src/main.rs @@ -205,17 +205,17 @@ mod tests { /// Parses the generated genesis.toml, builds a genesis block, and asserts the bridge account /// is included with nonce=1. - async fn assert_valid_genesis_block(dir: &Path) { + fn assert_valid_genesis_block(dir: &Path) { let bridge_id = AccountFile::read(dir.join("bridge.mac")).unwrap().account.id(); let config = GenesisConfig::read_toml_file(&dir.join("genesis.toml")).unwrap(); let signer = SecretKey::read_from_bytes(&[0x01; 32]).unwrap(); - let (state, _) = config.into_state(signer).unwrap(); + let (state, _) = config.into_state(signer.public_key()).unwrap(); let bridge = state.accounts.iter().find(|a| a.id() == bridge_id).unwrap(); assert_eq!(bridge.nonce(), ONE); - state.into_block().await.expect("genesis block should build"); + state.into_block(&signer).expect("genesis block should build"); } #[tokio::test] @@ -229,7 +229,7 @@ mod tests { let ger = AccountFile::read(dir.path().join("ger_manager.mac")).unwrap(); assert_eq!(ger.auth_secret_keys.len(), 1); - assert_valid_genesis_block(dir.path()).await; + assert_valid_genesis_block(dir.path()); } #[tokio::test] @@ -249,6 +249,6 @@ mod tests { let ger = AccountFile::read(dir.path().join("ger_manager.mac")).unwrap(); assert!(ger.auth_secret_keys.is_empty()); - assert_valid_genesis_block(dir.path()).await; + assert_valid_genesis_block(dir.path()); } } diff --git a/bin/node/src/commands/validator.rs b/bin/node/src/commands/validator.rs index 8e1e9fdf9f..ce80c97c83 100644 --- a/bin/node/src/commands/validator.rs +++ b/bin/node/src/commands/validator.rs @@ -6,7 +6,6 @@ use miden_node_store::genesis::config::{AccountFileWithName, GenesisConfig}; use miden_node_utils::clap::GrpcOptionsInternal; use miden_node_utils::fs::ensure_empty_directory; use miden_node_utils::grpc::UrlExt; -use miden_node_utils::signer::BlockSigner; use miden_node_validator::{Validator, ValidatorSigner}; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; use miden_protocol::utils::serde::{Deserializable, Serializable}; @@ -196,28 +195,14 @@ impl ValidatorCommand { // Bootstrap with KMS key or local key. let signer = validator_key.into_signer().await?; - match signer { - ValidatorSigner::Kms(signer) => { - build_and_write_genesis( - config, - signer, - accounts_directory, - genesis_block_directory, - data_directory, - ) - .await - }, - ValidatorSigner::Local(signer) => { - build_and_write_genesis( - config, - signer, - accounts_directory, - genesis_block_directory, - data_directory, - ) - .await - }, - } + build_and_write_genesis( + config, + signer, + accounts_directory, + genesis_block_directory, + data_directory, + ) + .await } } @@ -225,13 +210,13 @@ impl ValidatorCommand { /// to disk, and initializes the validator's database with the genesis block as the chain tip. async fn build_and_write_genesis( config: GenesisConfig, - signer: impl BlockSigner, + signer: ValidatorSigner, accounts_directory: &Path, genesis_block_directory: &Path, data_directory: &Path, ) -> anyhow::Result<()> { // Build genesis state with the provided signer. - let (genesis_state, secrets) = config.into_state(signer)?; + let (genesis_state, secrets) = config.into_state(signer.public_key())?; // Write account secret files. for item in secrets.as_account_files(&genesis_state) { @@ -247,8 +232,16 @@ async fn build_and_write_genesis( } // Build the signed genesis block. - let genesis_block = - genesis_state.into_block().await.context("failed to build the genesis block")?; + let unsigned_genesis_block = genesis_state + .into_unsigned_block() + .context("failed to build the unsigned genesis block")?; + let signature = signer + .sign(unsigned_genesis_block.header()) + .await + .context("failed to sign the genesis block")?; + let genesis_block = unsigned_genesis_block + .into_block(signature) + .context("failed to build the genesis block")?; // Serialize and write the genesis block to disk. let block_bytes = genesis_block.inner().to_bytes(); diff --git a/bin/stress-test/src/seeding/mod.rs b/bin/stress-test/src/seeding/mod.rs index f1da81554f..28b70b68aa 100644 --- a/bin/stress-test/src/seeding/mod.rs +++ b/bin/stress-test/src/seeding/mod.rs @@ -119,11 +119,10 @@ pub async fn seed_store( let asset_faucet_ids = benchmark_faucets.iter().map(Account::id).collect::>(); let fee_params = FeeParameters::new(faucet.id(), 0).unwrap(); let signer = EcdsaSecretKey::new(); - let genesis_state = GenesisState::new(benchmark_faucets, fee_params, 1, 1, signer.clone()); + let genesis_state = GenesisState::new(benchmark_faucets, fee_params, 1, 1, signer.public_key()); let genesis_block = genesis_state .clone() - .into_block() - .await + .into_block(&signer) .expect("genesis block should be created"); Store::bootstrap(genesis_block, &data_directory).expect("store should bootstrap"); @@ -135,7 +134,7 @@ pub async fn seed_store( let accounts_filepath = data_directory.join(ACCOUNTS_FILENAME); let data_directory = miden_node_store::DataDirectory::load(data_directory).expect("data directory should exist"); - let genesis_header = genesis_state.into_block().await.unwrap().into_inner(); + let genesis_header = genesis_state.into_block(&signer).unwrap().into_inner(); let metrics = generate_blocks( num_accounts, public_accounts_percentage, diff --git a/crates/block-producer/src/server/tests.rs b/crates/block-producer/src/server/tests.rs index c63613ce02..73055deff5 100644 --- a/crates/block-producer/src/server/tests.rs +++ b/crates/block-producer/src/server/tests.rs @@ -130,11 +130,11 @@ async fn start_store( store_addr: std::net::SocketAddr, data_directory: &std::path::Path, ) -> runtime::Runtime { - let genesis_state = GenesisState::new(vec![], test_fee_params(), 1, 1, random_secret_key()); + let signer = random_secret_key(); + let genesis_state = GenesisState::new(vec![], test_fee_params(), 1, 1, signer.public_key()); let genesis_block = genesis_state .clone() - .into_block() - .await + .into_block(&signer) .expect("genesis block should be created"); Store::bootstrap(genesis_block, data_directory).expect("store should bootstrap"); diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index a1e85e7737..f892316d85 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -526,12 +526,12 @@ async fn start_store(store_listener: TcpListener) -> (Runtime, TempDir, Word, So let config = GenesisConfig::default(); let signer = SecretKey::new(); - let (genesis_state, _) = config.into_state(signer).unwrap(); + let (genesis_state, _) = config.into_state(signer.public_key()).unwrap(); let genesis_block = genesis_state .clone() - .into_block() - .await + .into_block(&signer) .expect("genesis block should be created"); + let genesis_commitment = genesis_block.inner().header().commitment(); Store::bootstrap(genesis_block, data_directory.path()).expect("store should bootstrap"); let dir = data_directory.path().to_path_buf(); let store_addr = @@ -562,12 +562,7 @@ async fn start_store(store_listener: TcpListener) -> (Runtime, TempDir, Word, So .await .expect("store should start serving"); }); - ( - store_runtime, - data_directory, - genesis_state.into_block().await.unwrap().inner().header().commitment(), - store_addr, - ) + (store_runtime, data_directory, genesis_commitment, store_addr) } /// Shuts down the store runtime properly to allow `RocksDB` to flush before the temp directory is diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 738889cddd..903bd3a847 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -1953,9 +1953,10 @@ async fn genesis_with_account_assets() { .build_existing() .unwrap(); + let signer = random_secret_key(); let genesis_state = - GenesisState::new(vec![account], test_fee_params(), 1, 0, random_secret_key()); - let genesis_block = genesis_state.into_block().await.unwrap(); + GenesisState::new(vec![account], test_fee_params(), 1, 0, signer.public_key()); + let genesis_block = genesis_state.into_block(&signer).unwrap(); crate::db::Db::bootstrap(":memory:".into(), genesis_block).unwrap(); } @@ -2008,9 +2009,10 @@ async fn genesis_with_account_storage_map() { .build_existing() .unwrap(); + let signer = random_secret_key(); let genesis_state = - GenesisState::new(vec![account], test_fee_params(), 1, 0, random_secret_key()); - let genesis_block = genesis_state.into_block().await.unwrap(); + GenesisState::new(vec![account], test_fee_params(), 1, 0, signer.public_key()); + let genesis_block = genesis_state.into_block(&signer).unwrap(); crate::db::Db::bootstrap(":memory:".into(), genesis_block).unwrap(); } @@ -2061,9 +2063,10 @@ async fn genesis_with_account_assets_and_storage() { .build_existing() .unwrap(); + let signer = random_secret_key(); let genesis_state = - GenesisState::new(vec![account], test_fee_params(), 1, 0, random_secret_key()); - let genesis_block = genesis_state.into_block().await.unwrap(); + GenesisState::new(vec![account], test_fee_params(), 1, 0, signer.public_key()); + let genesis_block = genesis_state.into_block(&signer).unwrap(); crate::db::Db::bootstrap(":memory:".into(), genesis_block).unwrap(); } @@ -2152,14 +2155,15 @@ async fn genesis_with_multiple_accounts() { .build_existing() .unwrap(); + let signer = random_secret_key(); let genesis_state = GenesisState::new( vec![account1, account2, account3], test_fee_params(), 1, 0, - random_secret_key(), + signer.public_key(), ); - let genesis_block = genesis_state.into_block().await.unwrap(); + let genesis_block = genesis_state.into_block(&signer).unwrap(); crate::db::Db::bootstrap(":memory:".into(), genesis_block).unwrap(); } diff --git a/crates/store/src/genesis/config/mod.rs b/crates/store/src/genesis/config/mod.rs index 6c70a6f387..d3398f7053 100644 --- a/crates/store/src/genesis/config/mod.rs +++ b/crates/store/src/genesis/config/mod.rs @@ -6,7 +6,6 @@ use std::str::FromStr; use indexmap::IndexMap; use miden_node_utils::crypto::get_rpo_random_coin; -use miden_node_utils::signer::BlockSigner; use miden_protocol::account::auth::{AuthScheme, AuthSecretKey}; use miden_protocol::account::{ Account, @@ -23,6 +22,7 @@ use miden_protocol::account::{ }; use miden_protocol::asset::{FungibleAsset, TokenSymbol}; use miden_protocol::block::FeeParameters; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::PublicKey; use miden_protocol::crypto::dsa::falcon512_poseidon2::SecretKey as RpoSecretKey; use miden_protocol::errors::TokenSymbolError; use miden_protocol::{Felt, ONE}; @@ -141,10 +141,10 @@ impl GenesisConfig { /// /// Also returns the set of secrets for the generated accounts. #[expect(clippy::too_many_lines)] - pub fn into_state( + pub fn into_state( self, - signer: S, - ) -> Result<(GenesisState, AccountSecrets), GenesisConfigError> { + validator_key: PublicKey, + ) -> Result<(GenesisState, AccountSecrets), GenesisConfigError> { let GenesisConfig { version, timestamp, @@ -335,7 +335,7 @@ impl GenesisConfig { accounts: all_accounts, version, timestamp, - block_signer: signer, + validator_key, }, AccountSecrets { secrets }, )) @@ -529,7 +529,7 @@ impl AccountSecrets { /// and the index in pub fn as_account_files( &self, - genesis_state: &GenesisState, + genesis_state: &GenesisState, ) -> impl Iterator> + '_ { let account_lut = IndexMap::::from_iter( genesis_state.accounts.iter().map(|account| (account.id(), account.clone())), diff --git a/crates/store/src/genesis/config/tests.rs b/crates/store/src/genesis/config/tests.rs index acc1b94465..0463396431 100644 --- a/crates/store/src/genesis/config/tests.rs +++ b/crates/store/src/genesis/config/tests.rs @@ -27,7 +27,8 @@ fn parsing_yields_expected_default_values() -> TestResult { let config_path = write_toml_file(temp_dir.path(), sample_content); let gcfg = GenesisConfig::read_toml_file(&config_path)?; - let (state, _secrets) = gcfg.into_state(SecretKey::new())?; + let signer = SecretKey::new(); + let (state, _secrets) = gcfg.into_state(signer.public_key())?; let _ = state; // faucets always precede wallet accounts let native_faucet = state.accounts[0].clone(); @@ -70,14 +71,15 @@ fn parsing_yields_expected_default_values() -> TestResult { #[miden_node_test_macro::enable_logging] async fn genesis_accounts_have_nonce_one() -> TestResult { let gcfg = GenesisConfig::default(); - let (state, secrets) = gcfg.into_state(SecretKey::new()).unwrap(); + let signer = SecretKey::new(); + let (state, secrets) = gcfg.into_state(signer.public_key()).unwrap(); let mut iter = secrets.as_account_files(&state); let AccountFileWithName { account_file: status_quo, .. } = iter.next().unwrap().unwrap(); assert!(iter.next().is_none()); assert_eq!(status_quo.account.nonce(), ONE); - let _block = state.into_block().await?; + let _block = state.into_block(&signer)?; Ok(()) } @@ -134,7 +136,8 @@ path = "test_account.mac" let gcfg = GenesisConfig::read_toml_file(&config_path)?; // Convert to state and verify the account is included - let (state, _secrets) = gcfg.into_state(SecretKey::new())?; + let signer = SecretKey::new(); + let (state, _secrets) = gcfg.into_state(signer.public_key())?; assert!(state.accounts.iter().any(|a| a.id() == account_id)); Ok(()) @@ -192,7 +195,8 @@ verification_base_fee = 0 let gcfg = GenesisConfig::read_toml_file(&config_path)?; // Convert to state and verify the native faucet is included - let (state, secrets) = gcfg.into_state(SecretKey::new())?; + let signer = SecretKey::new(); + let (state, secrets) = gcfg.into_state(signer.public_key())?; assert!(state.accounts.iter().any(|a| a.id() == faucet_id)); // No secrets should be generated for file-loaded native faucet @@ -251,7 +255,7 @@ verification_base_fee = 0 let gcfg = GenesisConfig::read_toml_file(&config_path)?; // into_state should fail with NativeFaucetNotFungible error when loading the file - let result = gcfg.into_state(SecretKey::new()); + let result = gcfg.into_state(SecretKey::new().public_key()); assert!(result.is_err()); let err = result.unwrap_err(); assert!( @@ -284,7 +288,7 @@ path = "does_not_exist.mac" let gcfg = GenesisConfig::read_toml_file(&config_path).unwrap(); // into_state should fail with AccountFileRead error when loading the file - let result = gcfg.into_state(SecretKey::new()); + let result = gcfg.into_state(SecretKey::new().public_key()); assert!(result.is_err()); let err = result.unwrap_err(); assert!( @@ -303,7 +307,8 @@ async fn parsing_agglayer_sample_with_account_files() -> TestResult { .join("src/genesis/config/samples/02-with-account-files.toml"); let gcfg = GenesisConfig::read_toml_file(&sample_path)?; - let (state, secrets) = gcfg.into_state(SecretKey::new())?; + let signer = SecretKey::new(); + let (state, secrets) = gcfg.into_state(signer.public_key())?; // Should have 4 accounts: // 1. Native faucet (MIDEN) - built from parameters @@ -355,7 +360,7 @@ async fn parsing_agglayer_sample_with_account_files() -> TestResult { assert_eq!(secrets.secrets.len(), 1, "Only native faucet should generate a secret"); // Verify the genesis state can be converted to a block - let block = state.into_block().await?; + let block = state.into_block(&signer)?; // Verify that non-private accounts (Public and Network) get full Delta details. for update in block.inner().body().updated_accounts() { diff --git a/crates/store/src/genesis/mod.rs b/crates/store/src/genesis/mod.rs index 6c4624fb00..041c42b561 100644 --- a/crates/store/src/genesis/mod.rs +++ b/crates/store/src/genesis/mod.rs @@ -1,4 +1,3 @@ -use miden_node_utils::signer::BlockSigner; use miden_protocol::Word; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{Account, AccountDelta}; @@ -13,6 +12,7 @@ use miden_protocol::block::{ FeeParameters, ProvenBlock, }; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::{PublicKey, SecretKey, Signature}; use miden_protocol::crypto::merkle::mmr::{Forest, MmrPeaks}; use miden_protocol::crypto::merkle::smt::{LargeSmt, MemoryStorage, Smt}; use miden_protocol::errors::AccountError; @@ -26,18 +26,45 @@ pub mod config; /// Represents the state at genesis, which will be used to derive the genesis block. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct GenesisState { +pub struct GenesisState { pub accounts: Vec, pub fee_parameters: FeeParameters, pub version: u32, pub timestamp: u32, - pub block_signer: S, + pub validator_key: PublicKey, } /// A type-safety wrapper ensuring that genesis block data can only be created from /// [`GenesisState`] or validated from a [`ProvenBlock`] via [`GenesisBlock::try_from`]. pub struct GenesisBlock(ProvenBlock); +/// A genesis block with all data except the validator signature. +pub struct UnsignedGenesisBlock { + header: BlockHeader, + body: BlockBody, + block_proof: BlockProof, +} + +impl UnsignedGenesisBlock { + pub fn header(&self) -> &BlockHeader { + &self.header + } + + pub fn into_block(self, signature: Signature) -> anyhow::Result { + anyhow::ensure!( + signature.verify(self.header.commitment(), self.header.validator_key()), + "genesis block signature verification failed", + ); + + Ok(GenesisBlock(ProvenBlock::new_unchecked( + self.header, + self.body, + signature, + self.block_proof, + ))) + } +} + impl GenesisBlock { pub fn inner(&self) -> &ProvenBlock { &self.0 @@ -69,27 +96,25 @@ impl TryFrom for GenesisBlock { } } -impl GenesisState { +impl GenesisState { pub fn new( accounts: Vec, fee_parameters: FeeParameters, version: u32, timestamp: u32, - signer: S, + validator_key: PublicKey, ) -> Self { Self { accounts, fee_parameters, version, timestamp, - block_signer: signer, + validator_key, } } -} -impl GenesisState { - /// Returns the block header and the account SMT. - pub async fn into_block(self) -> anyhow::Result { + /// Builds the unsigned genesis block. + pub fn into_unsigned_block(self) -> anyhow::Result { let accounts: Vec = self .accounts .iter() @@ -140,7 +165,7 @@ impl GenesisState { empty_block_note_tree.root(), Word::empty(), TransactionKernel.to_commitment(), - self.block_signer.public_key(), + self.validator_key, self.fee_parameters, self.timestamp, ); @@ -154,13 +179,13 @@ impl GenesisState { let block_proof = BlockProof::new_dummy(); - // Sign and assert verification for sanity (no mismatch between frontend and backend signing - // impls). - let signature = self.block_signer.sign(&header).await?; - assert!(signature.verify(header.commitment(), &self.block_signer.public_key())); - // SAFETY: Header and accounts should be valid by construction. - // No notes or nullifiers are created at genesis, which is consistent with the above empty - // block note tree root and empty nullifier tree root. - Ok(GenesisBlock(ProvenBlock::new_unchecked(header, body, signature, block_proof))) + Ok(UnsignedGenesisBlock { header, body, block_proof }) + } + + /// Builds and signs the genesis block with a local secret key. + pub fn into_block(self, signer: &SecretKey) -> anyhow::Result { + let unsigned_block = self.into_unsigned_block()?; + let signature = signer.sign(unsigned_block.header().commitment()); + unsigned_block.into_block(signature) } } diff --git a/crates/utils/src/lib.rs b/crates/utils/src/lib.rs index a2c4a82d1c..ea9e60c5ae 100644 --- a/crates/utils/src/lib.rs +++ b/crates/utils/src/lib.rs @@ -10,7 +10,6 @@ pub mod limiter; pub mod logging; pub mod lru_cache; pub mod panic; -pub mod signer; pub mod tracing; pub trait ErrorReport: std::error::Error { diff --git a/crates/utils/src/signer.rs b/crates/utils/src/signer.rs deleted file mode 100644 index 00dbe3ebc3..0000000000 --- a/crates/utils/src/signer.rs +++ /dev/null @@ -1,36 +0,0 @@ -use core::convert::Infallible; -use core::error; - -use miden_protocol::block::BlockHeader; -use miden_protocol::crypto::dsa::ecdsa_k256_keccak::{PublicKey, SecretKey, Signature}; - -// BLOCK SIGNER -// ================================================================================================ - -/// Trait which abstracts the signing of block headers with ECDSA signatures. -/// -/// Production-level implementations will involve some sort of secure remote backend. The trait also -/// allows for testing with local and ephemeral signers. -pub trait BlockSigner { - type Error: error::Error + Send + Sync + 'static; - fn sign( - &self, - header: &BlockHeader, - ) -> impl Future> + Send; - fn public_key(&self) -> PublicKey; -} - -// SECRET KEY BLOCK SIGNER -// ================================================================================================ - -impl BlockSigner for SecretKey { - type Error = Infallible; - - async fn sign(&self, header: &BlockHeader) -> Result { - Ok(self.sign(header.commitment())) - } - - fn public_key(&self) -> PublicKey { - self.public_key() - } -} diff --git a/crates/validator/src/server/tests.rs b/crates/validator/src/server/tests.rs index e87b821e21..7ce8efdb4c 100644 --- a/crates/validator/src/server/tests.rs +++ b/crates/validator/src/server/tests.rs @@ -29,8 +29,10 @@ impl TestValidator { async fn new() -> Self { let signer = ValidatorSigner::new_local(random_secret_key()); - let genesis_state = GenesisState::new(vec![], test_fee_params(), 1, 0, random_secret_key()); - let genesis_block = genesis_state.into_block().await.unwrap(); + let genesis_signer = random_secret_key(); + let genesis_state = + GenesisState::new(vec![], test_fee_params(), 1, 0, genesis_signer.public_key()); + let genesis_block = genesis_state.into_block(&genesis_signer).unwrap(); let genesis_header = genesis_block.inner().header().clone(); let dir = tempfile::tempdir().unwrap(); @@ -234,9 +236,10 @@ async fn commitment_mismatch_rejected() { // Build a valid ProposedBlock on a *different* genesis so its prev_block_commitment // won't match the validator's actual chain tip. + let other_genesis_signer = random_secret_key(); let other_genesis_state = - GenesisState::new(vec![], test_fee_params(), 1, 1, random_secret_key()); - let other_genesis_block = other_genesis_state.into_block().await.unwrap(); + GenesisState::new(vec![], test_fee_params(), 1, 1, other_genesis_signer.public_key()); + let other_genesis_block = other_genesis_state.into_block(&other_genesis_signer).unwrap(); let other_genesis_header = other_genesis_block.inner().header().clone(); let mismatched_block = empty_block(&other_genesis_header, &PartialBlockchain::default()); @@ -261,9 +264,10 @@ async fn replacement_commitment_mismatch_rejected() { // Build a replacement block at the same height but using a *different* genesis so its // prev_block_commitment won't match the validator's actual parent of the chain tip. + let other_genesis_signer = random_secret_key(); let other_genesis_state = - GenesisState::new(vec![], test_fee_params(), 1, 1, random_secret_key()); - let other_genesis_block = other_genesis_state.into_block().await.unwrap(); + GenesisState::new(vec![], test_fee_params(), 1, 1, other_genesis_signer.public_key()); + let other_genesis_block = other_genesis_state.into_block(&other_genesis_signer).unwrap(); let other_genesis_header = other_genesis_block.inner().header().clone(); let mismatched_replacement = empty_block(&other_genesis_header, &PartialBlockchain::default()); diff --git a/crates/validator/src/signers/kms.rs b/crates/validator/src/signers/kms.rs index e84576ac1e..f9a5f47b2d 100644 --- a/crates/validator/src/signers/kms.rs +++ b/crates/validator/src/signers/kms.rs @@ -1,8 +1,7 @@ use aws_sdk_kms::error::SdkError; use aws_sdk_kms::operation::sign::SignError; use aws_sdk_kms::types::SigningAlgorithmSpec; -use miden_node_utils::signer::BlockSigner; -use miden_protocol::block::BlockHeader; +use miden_protocol::Word; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::{PublicKey, Signature}; use miden_protocol::crypto::hash::keccak::Keccak256; use miden_protocol::utils::serde::{DeserializationError, Serializable}; @@ -74,18 +73,14 @@ impl KmsSigner { let pub_key = PublicKey::from_der(spki_der)?; Ok(Self { key_id, pub_key, client }) } -} - -impl BlockSigner for KmsSigner { - type Error = KmsSignerError; - async fn sign(&self, header: &BlockHeader) -> Result { + pub async fn sign(&self, commitment: Word) -> Result { // The Validator produces Ethereum-style ECDSA (secp256k1) signatures over Keccak-256 // digests. AWS KMS does not support SHA-3 hashing for ECDSA keys // (ECC_SECG_P256K1 being the corresponding AWS key-spec), so we pre-hash the // message and pass MessageType::Digest. KMS signs the provided 32-byte digest // verbatim. - let msg = header.commitment().to_bytes(); + let msg = commitment.to_bytes(); let digest = Keccak256::hash(&msg); // Request signature from KMS backend. @@ -109,14 +104,14 @@ impl BlockSigner for KmsSigner { .map_err(KmsSignerError::SignatureFormatError)?; // Check the returned signature. - if sig.verify(header.commitment(), &self.pub_key) { + if sig.verify(commitment, &self.pub_key) { Ok(sig) } else { Err(KmsSignerError::InvalidSignature) } } - fn public_key(&self) -> PublicKey { + pub fn public_key(&self) -> PublicKey { self.pub_key.clone() } } diff --git a/crates/validator/src/signers/mod.rs b/crates/validator/src/signers/mod.rs index 6e6092350f..e4c2192b3e 100644 --- a/crates/validator/src/signers/mod.rs +++ b/crates/validator/src/signers/mod.rs @@ -1,8 +1,7 @@ mod kms; pub use kms::KmsSigner; -use miden_node_utils::signer::BlockSigner; use miden_protocol::block::BlockHeader; -use miden_protocol::crypto::dsa::ecdsa_k256_keccak::{SecretKey, Signature}; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::{PublicKey, SecretKey, Signature}; // VALIDATOR SIGNER // ================================================================================================= @@ -28,26 +27,27 @@ impl ValidatorSigner { Self::Local(secret_key) } + /// Returns the public key corresponding to the configured signer. + pub fn public_key(&self) -> PublicKey { + match self { + Self::Kms(signer) => signer.public_key(), + Self::Local(signer) => signer.public_key(), + } + } + /// Signs a block header using the configured signer. pub async fn sign(&self, header: &BlockHeader) -> anyhow::Result { - match self { - Self::Kms(signer) => { - let sig = signer.sign(header).await?; - Ok(sig) - }, - Self::Local(signer) => { + let commitment = header.commitment(); + let signature = match self { + Self::Kms(signer) => signer.sign(commitment).await?, + Self::Local(signer) => tokio::task::spawn_blocking({ let signer = signer.clone(); - let header = header.clone(); - let sig = tokio::task::spawn_blocking(move || { - tokio::runtime::Builder::new_current_thread() - .build() - .expect("failed to build tokio runtime") - .block_on(::sign(&signer, &header)) - }) - .await - .unwrap_or_else(|e| std::panic::resume_unwind(e.into_panic()))?; - Ok(sig) - }, - } + move || signer.sign(commitment) + }) + .await + .unwrap_or_else(|e| std::panic::resume_unwind(e.into_panic())), + }; + + Ok(signature) } } From ce77ca5fd157e1750d02deb1e5f5e770ac3d704a Mon Sep 17 00:00:00 2001 From: KOVACS Krisztian Date: Thu, 7 May 2026 14:23:26 +0200 Subject: [PATCH 28/28] fix: correctly propagate the current span to blocking tasks (#2061) --- bin/remote-prover/src/server/prover.rs | 7 +++-- clippy.toml | 3 ++ .../block-producer/src/batch_builder/mod.rs | 13 ++++---- .../block-producer/src/block_builder/mod.rs | 3 +- crates/block-producer/src/server/tests.rs | 9 ++++-- crates/ntx-builder/src/actor/execute.rs | 30 ++++++++++++------- crates/rpc/src/tests.rs | 3 +- crates/store/src/server/mod.rs | 11 +++---- crates/utils/src/lib.rs | 1 + crates/utils/src/spawn.rs | 21 +++++++++++++ crates/validator/src/signers/mod.rs | 3 +- crates/validator/src/tx_validation/mod.rs | 14 ++++----- 12 files changed, 82 insertions(+), 36 deletions(-) create mode 100644 crates/utils/src/spawn.rs diff --git a/bin/remote-prover/src/server/prover.rs b/bin/remote-prover/src/server/prover.rs index f8c552138f..06aa761e50 100644 --- a/bin/remote-prover/src/server/prover.rs +++ b/bin/remote-prover/src/server/prover.rs @@ -1,6 +1,7 @@ use miden_block_prover::LocalBlockProver; use miden_node_proto::BlockProofRequest; use miden_node_utils::ErrorReport; +use miden_node_utils::spawn::spawn_blocking_in_current_span; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::MIN_PROOF_SECURITY_LEVEL; use miden_protocol::batch::{ProposedBatch, ProvenBatch}; @@ -113,7 +114,8 @@ impl ProveRequest for LocalBatchProver { async fn prove(&self, input: Self::Input) -> Result { let prover = self.clone(); - tokio::task::spawn_blocking(move || { + + spawn_blocking_in_current_span(move || { prover .prove(input) .map_err(|e| tonic::Status::internal(e.as_report_context("failed to prove batch"))) @@ -131,7 +133,8 @@ impl ProveRequest for LocalBlockProver { async fn prove(&self, input: Self::Input) -> Result { let prover = self.clone(); let BlockProofRequest { tx_batches, block_header, block_inputs } = input; - tokio::task::spawn_blocking(move || { + + spawn_blocking_in_current_span(move || { prover .prove(tx_batches, &block_header, block_inputs) .map_err(|e| tonic::Status::internal(e.as_report_context("failed to prove block"))) diff --git a/clippy.toml b/clippy.toml index 2a5815cec4..9ee9a79b1b 100644 --- a/clippy.toml +++ b/clippy.toml @@ -29,4 +29,7 @@ disallowed-methods = [ { path = "std::path::Path::read_link", reason = "Use fs_err::path::PathExt methods" }, { path = "std::path::Path::symlink_metadata", reason = "Use fs_err::path::PathExt methods" }, { path = "std::path::Path::try_exists", reason = "Use fs_err::path::PathExt methods" }, + + # Use our own `spawn_blocking` wrapper so that the tracing span is correctly propagated + { path = "tokio::task::spawn_blocking", replacement = "miden_node_utils::spawn::spawn_blocking_in_current_span" }, ] diff --git a/crates/block-producer/src/batch_builder/mod.rs b/crates/block-producer/src/batch_builder/mod.rs index 549d76261f..86f7e49d5e 100644 --- a/crates/block-producer/src/batch_builder/mod.rs +++ b/crates/block-producer/src/batch_builder/mod.rs @@ -6,6 +6,7 @@ use std::time::Duration; use futures::never::Never; use futures::{FutureExt, TryFutureExt}; use miden_node_proto::domain::batch::BatchInputs; +use miden_node_utils::spawn::spawn_blocking_in_current_span; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::MIN_PROOF_SECURITY_LEVEL; use miden_protocol::batch::{BatchId, ProposedBatch, ProvenBatch}; @@ -249,12 +250,14 @@ impl BatchJob { .prove(proposed_batch) .await .map_err(BuildBatchError::RemoteProverClientError), - BatchProver::Local(prover) => tokio::task::spawn_blocking({ + BatchProver::Local(prover) => { let prover = prover.clone(); - move || prover.prove(proposed_batch).map_err(BuildBatchError::ProveBatchError) - }) - .await - .map_err(BuildBatchError::JoinError)?, + spawn_blocking_in_current_span(move || { + prover.prove(proposed_batch).map_err(BuildBatchError::ProveBatchError) + }) + .await + .map_err(BuildBatchError::JoinError)? + }, }?; if proven_batch.proof_security_level() < MIN_PROOF_SECURITY_LEVEL { diff --git a/crates/block-producer/src/block_builder/mod.rs b/crates/block-producer/src/block_builder/mod.rs index 85acb62bff..3d1613c6cd 100644 --- a/crates/block-producer/src/block_builder/mod.rs +++ b/crates/block-producer/src/block_builder/mod.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use anyhow::Context; use futures::FutureExt; +use miden_node_utils::spawn::spawn_blocking_in_current_span; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::batch::{OrderedBatches, ProvenBatch}; use miden_protocol::block::{BlockInputs, BlockNumber, ProposedBlock, ProvenBlock, SignedBlock}; @@ -225,7 +226,7 @@ impl BlockBuilder { proposed_block: ProposedBlock, ) -> Result<(OrderedBatches, SignedBlock), BuildBlockError> { // Concurrently build the block and validate it via the validator. - let build_result = tokio::task::spawn_blocking({ + let build_result = spawn_blocking_in_current_span({ let proposed_block = proposed_block.clone(); move || proposed_block.into_header_and_body() }); diff --git a/crates/block-producer/src/server/tests.rs b/crates/block-producer/src/server/tests.rs index 73055deff5..a5a2e34123 100644 --- a/crates/block-producer/src/server/tests.rs +++ b/crates/block-producer/src/server/tests.rs @@ -5,6 +5,7 @@ use miden_node_proto::generated::block_producer::api_client as block_producer_cl use miden_node_store::{DEFAULT_MAX_CONCURRENT_PROOFS, GenesisState, Store}; use miden_node_utils::clap::{GrpcOptionsInternal, StorageOptions}; use miden_node_utils::fee::test_fee_params; +use miden_node_utils::spawn::spawn_blocking_in_current_span; use miden_node_validator::{Validator, ValidatorSigner}; use miden_protocol::testing::random_secret_key::random_secret_key; use tokio::net::TcpListener; @@ -172,9 +173,11 @@ async fn start_store( /// Shuts down the store runtime properly to allow the database to flush before the temp directory /// is deleted. async fn shutdown_store(store_runtime: runtime::Runtime) { - task::spawn_blocking(move || store_runtime.shutdown_timeout(Duration::from_millis(500))) - .await - .expect("shutdown should complete"); + spawn_blocking_in_current_span(move || { + store_runtime.shutdown_timeout(Duration::from_millis(500)); + }) + .await + .expect("shutdown should complete"); } /// Sends a status request to the block producer to verify connectivity. diff --git a/crates/ntx-builder/src/actor/execute.rs b/crates/ntx-builder/src/actor/execute.rs index f001883dcd..2b465adbd5 100644 --- a/crates/ntx-builder/src/actor/execute.rs +++ b/crates/ntx-builder/src/actor/execute.rs @@ -3,6 +3,7 @@ use std::sync::{Arc, Mutex}; use miden_node_utils::ErrorReport; use miden_node_utils::lru_cache::LruCache; +use miden_node_utils::spawn::spawn_blocking_in_current_span; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_protocol::Word; use miden_protocol::account::{ @@ -202,8 +203,10 @@ impl NtxContext { // calls to the store) are driven by the existing I/O driver. let ctx = self.clone(); let handle = tokio::runtime::Handle::current(); + let span = tracing::Span::current(); + let (executed_tx, failed_notes, scripts_to_cache) = - tokio::task::spawn_blocking(move || { + spawn_blocking_in_current_span(move || { let data_store = NtxDataStore::new( account, chain_tip_header, @@ -212,14 +215,17 @@ impl NtxContext { ctx.script_cache.clone(), ctx.db.clone(), ); - handle.block_on(async { - let (successful_notes, failed_notes) = - ctx.filter_notes(&data_store, notes).await?; - let executed_tx = - Box::pin(ctx.execute(&data_store, successful_notes)).await?; - let scripts_to_cache = data_store.take_fetched_scripts(); - Ok::<_, NtxError>((executed_tx, failed_notes, scripts_to_cache)) - }) + handle.block_on( + async { + let (successful_notes, failed_notes) = + ctx.filter_notes(&data_store, notes).await?; + let executed_tx = + Box::pin(ctx.execute(&data_store, successful_notes)).await?; + let scripts_to_cache = data_store.take_fetched_scripts(); + Ok::<_, NtxError>((executed_tx, failed_notes, scripts_to_cache)) + } + .instrument(span), + ) }) .await .unwrap_or_else(|e| std::panic::resume_unwind(e.into_panic()))?; @@ -330,12 +336,14 @@ impl NtxContext { } else { // ZK proof generation is CPU-intensive; run it on a dedicated blocking thread. let tx_inputs = tx_inputs.clone(); - tokio::task::spawn_blocking(move || { + let span = tracing::Span::current(); + + spawn_blocking_in_current_span(move || { tokio::runtime::Builder::new_current_thread() .enable_all() .build() .expect("failed to build tokio runtime") - .block_on(LocalTransactionProver::default().prove(tx_inputs)) + .block_on(LocalTransactionProver::default().prove(tx_inputs).instrument(span)) }) .await .unwrap_or_else(|e| std::panic::resume_unwind(e.into_panic())) diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index f892316d85..290c4afa99 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -17,6 +17,7 @@ use miden_node_utils::limiter::{ QueryParamNoteIdLimit, QueryParamNullifierLimit, }; +use miden_node_utils::spawn::spawn_blocking_in_current_span; use miden_protocol::Word; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{ @@ -568,7 +569,7 @@ async fn start_store(store_listener: TcpListener) -> (Runtime, TempDir, Word, So /// Shuts down the store runtime properly to allow `RocksDB` to flush before the temp directory is /// deleted. async fn shutdown_store(store_runtime: Runtime) { - task::spawn_blocking(move || store_runtime.shutdown_timeout(Duration::from_secs(3))) + spawn_blocking_in_current_span(move || store_runtime.shutdown_timeout(Duration::from_secs(3))) .await .expect("shutdown should complete"); // Give RocksDB time to release its lock file after the runtime shutdown diff --git a/crates/store/src/server/mod.rs b/crates/store/src/server/mod.rs index e3afd97387..4e8f599145 100644 --- a/crates/store/src/server/mod.rs +++ b/crates/store/src/server/mod.rs @@ -13,13 +13,14 @@ use miden_node_proto_build::{ }; use miden_node_utils::clap::{GrpcOptionsInternal, StorageOptions}; use miden_node_utils::panic::{CatchPanicLayer, catch_panic_layer_fn}; +use miden_node_utils::spawn::spawn_blocking_in_span; use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_node_utils::tracing::grpc::grpc_trace_fn; use tokio::net::TcpListener; use tokio::task::JoinSet; use tokio_stream::wrappers::TcpListenerStream; use tower_http::trace::TraceLayer; -use tracing::{Instrument, info, info_span, instrument}; +use tracing::{info, info_span, instrument}; use url::Url; use crate::blocks::BlockStore; @@ -208,10 +209,10 @@ impl Store { loop { interval.tick().await; let dir = data_directory.clone(); - let span = info_span!(target: COMPONENT, "measure disk space usage"); - let result = tokio::task::spawn_blocking(move || measure_disk_usage_bytes(&dir)) - .instrument(span.clone()) - .await; + let span = info_span!(target: COMPONENT, "measure_disk_space_usage"); + let result = + spawn_blocking_in_span(move || measure_disk_usage_bytes(&dir), span.clone()) + .await; match result { Ok(usage) => { span.set_attribute("db.sqlite.size", usage.sqlite_db); diff --git a/crates/utils/src/lib.rs b/crates/utils/src/lib.rs index ea9e60c5ae..ca9977ffb9 100644 --- a/crates/utils/src/lib.rs +++ b/crates/utils/src/lib.rs @@ -10,6 +10,7 @@ pub mod limiter; pub mod logging; pub mod lru_cache; pub mod panic; +pub mod spawn; pub mod tracing; pub trait ErrorReport: std::error::Error { diff --git a/crates/utils/src/spawn.rs b/crates/utils/src/spawn.rs new file mode 100644 index 0000000000..b0dc5438f5 --- /dev/null +++ b/crates/utils/src/spawn.rs @@ -0,0 +1,21 @@ +use tokio::task::JoinHandle; +use tracing::Span; + +/// Spawn a blocking task in the current tracing span. +pub fn spawn_blocking_in_current_span(f: F) -> JoinHandle +where + F: FnOnce() -> R + Send + 'static, + R: Send + 'static, +{ + spawn_blocking_in_span(f, Span::current()) +} + +/// Spawn a blocking task in a span. +pub fn spawn_blocking_in_span(f: F, span: Span) -> JoinHandle +where + F: FnOnce() -> R + Send + 'static, + R: Send + 'static, +{ + #[expect(clippy::disallowed_methods)] + tokio::task::spawn_blocking(move || span.in_scope(f)) +} diff --git a/crates/validator/src/signers/mod.rs b/crates/validator/src/signers/mod.rs index e4c2192b3e..2c50b2dbb2 100644 --- a/crates/validator/src/signers/mod.rs +++ b/crates/validator/src/signers/mod.rs @@ -1,5 +1,6 @@ mod kms; pub use kms::KmsSigner; +use miden_node_utils::spawn::spawn_blocking_in_current_span; use miden_protocol::block::BlockHeader; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::{PublicKey, SecretKey, Signature}; @@ -40,7 +41,7 @@ impl ValidatorSigner { let commitment = header.commitment(); let signature = match self { Self::Kms(signer) => signer.sign(commitment).await?, - Self::Local(signer) => tokio::task::spawn_blocking({ + Self::Local(signer) => spawn_blocking_in_current_span({ let signer = signer.clone(); move || signer.sign(commitment) }) diff --git a/crates/validator/src/tx_validation/mod.rs b/crates/validator/src/tx_validation/mod.rs index d33c0934a0..9921532964 100644 --- a/crates/validator/src/tx_validation/mod.rs +++ b/crates/validator/src/tx_validation/mod.rs @@ -2,6 +2,7 @@ mod data_store; mod validated_tx; pub use data_store::TransactionInputsDataStore; +use miden_node_utils::spawn::{spawn_blocking_in_current_span, spawn_blocking_in_span}; use miden_protocol::MIN_PROOF_SECURITY_LEVEL; use miden_protocol::transaction::{ProvenTransaction, TransactionHeader, TransactionInputs}; use miden_tx::auth::UnreachableAuth; @@ -41,11 +42,10 @@ pub async fn validate_transaction( ) -> Result { // Proof verification is CPU-intensive; run it on a dedicated blocking thread. let proven_tx_clone = proven_tx.clone(); - tokio::task::spawn_blocking(move || { - info_span!("verify").in_scope(|| { - TransactionVerifier::new(MIN_PROOF_SECURITY_LEVEL).verify(&proven_tx_clone) - }) - }) + spawn_blocking_in_span( + move || TransactionVerifier::new(MIN_PROOF_SECURITY_LEVEL).verify(&proven_tx_clone), + info_span!("verify"), + ) .await .unwrap_or_else(|e| std::panic::resume_unwind(e.into_panic()))?; @@ -54,8 +54,8 @@ pub async fn validate_transaction( // VM execution may not yield; run it on a dedicated blocking thread. let (account, block_header, _, input_notes, tx_args) = tx_inputs.into_parts(); - let execute_span = info_span!("execute"); - let executed_tx = tokio::task::spawn_blocking(move || { + let execute_span = info_span!("execute").or_current(); + let executed_tx = spawn_blocking_in_current_span(move || { let executor: TransactionExecutor<'_, '_, _, UnreachableAuth> = TransactionExecutor::new(&data_store); tokio::runtime::Builder::new_current_thread()