From c46ae072ec799e999de85af914a9eea207d66c3b Mon Sep 17 00:00:00 2001 From: "Jan [Yann]" <4518474+fafk@users.noreply.github.com> Date: Mon, 26 Jan 2026 10:40:02 +0100 Subject: [PATCH 001/219] Make alloy gas estimator configurable (#4081) # Description Adds the ability to configure past_blocks and reward_percentile parameters for the EIP-1559 gas price estimator. Previously, alloy's hardcoded defaults (10 blocks, 20th percentile) were always used. # Changes [x] Add configurable_alloy.rs - a gas estimator that calls eth_feeHistory with custom parameters, then uses alloy's default estimation algorithm [x] Extend GasEstimatorType::Alloy config variant with optional past-blocks and reward-percentile fields [x] Default values match alloy's hardcoded constants (10 blocks, 20.0 percentile) for backwards compatibility --- crates/driver/src/infra/blockchain/gas.rs | 13 +- crates/driver/src/infra/config/file/mod.rs | 154 +++++++++++++++++- .../shared/src/gas_price_estimation/alloy.rs | 51 ------ .../configurable_alloy.rs | 102 ++++++++++++ crates/shared/src/gas_price_estimation/mod.rs | 18 +- 5 files changed, 277 insertions(+), 61 deletions(-) delete mode 100644 crates/shared/src/gas_price_estimation/alloy.rs create mode 100644 crates/shared/src/gas_price_estimation/configurable_alloy.rs diff --git a/crates/driver/src/infra/blockchain/gas.rs b/crates/driver/src/infra/blockchain/gas.rs index f67885d532..510eb1ace0 100644 --- a/crates/driver/src/infra/blockchain/gas.rs +++ b/crates/driver/src/infra/blockchain/gas.rs @@ -11,7 +11,7 @@ use { ethrpc::Web3, shared::gas_price_estimation::{ GasPriceEstimating, - alloy::Eip1559GasPriceEstimator, + configurable_alloy::{ConfigurableGasPriceEstimator, EstimatorConfig}, eth_node::NodeGasPriceEstimator, }, std::sync::Arc, @@ -36,7 +36,16 @@ impl GasPriceEstimator { ) -> Result { let gas: Arc = match gas_estimator_type { GasEstimatorType::Web3 => Arc::new(NodeGasPriceEstimator::new(web3.alloy.clone())), - GasEstimatorType::Alloy => Arc::new(Eip1559GasPriceEstimator::new(web3.alloy.clone())), + GasEstimatorType::Alloy { + past_blocks, + reward_percentile, + } => Arc::new(ConfigurableGasPriceEstimator::new( + web3.alloy.clone(), + EstimatorConfig { + past_blocks: *past_blocks, + reward_percentile: *reward_percentile, + }, + )), }; // TODO: simplify logic by moving gas price adjustments out of the individual // mempool configs diff --git a/crates/driver/src/infra/config/file/mod.rs b/crates/driver/src/infra/config/file/mod.rs index 4b4b20e7a7..ad0c4d8184 100644 --- a/crates/driver/src/infra/config/file/mod.rs +++ b/crates/driver/src/infra/config/file/mod.rs @@ -6,6 +6,10 @@ use { reqwest::Url, serde::{Deserialize, Deserializer, Serialize}, serde_with::serde_as, + shared::gas_price_estimation::configurable_alloy::{ + default_past_blocks, + default_reward_percentile, + }, solver::solver::Arn, std::{collections::HashMap, time::Duration}, }; @@ -735,13 +739,32 @@ pub struct LiquoriceConfig { pub http_timeout: Duration, } -#[derive(Clone, Debug, Deserialize, Default)] -#[serde(rename_all = "kebab-case", deny_unknown_fields)] -#[serde(tag = "estimator")] +#[derive(Clone, Debug, Deserialize)] +#[serde(rename_all = "kebab-case", deny_unknown_fields, tag = "estimator")] pub enum GasEstimatorType { Web3, - #[default] - Alloy, + /// EIP-1559 gas estimator using alloy's algorithm. + /// Optionally configure the fee history query parameters. + #[serde(rename_all = "kebab-case")] + Alloy { + /// Number of blocks to look back for fee history (default: 10) + #[serde(default = "default_past_blocks")] + past_blocks: u64, + /// Percentile of rewards to use for priority fee estimation (default: + /// 20.0). This is what Metamask uses as medium priority: + /// https://github.com/MetaMask/core/blob/0fd4b397e7237f104d1c81579a0c4321624d076b/packages/gas-fee-controller/src/fetchGasEstimatesViaEthFeeHistory/calculateGasFeeEstimatesForPriorityLevels.ts#L14-L45 + #[serde(default = "default_reward_percentile")] + reward_percentile: f64, + }, +} + +impl Default for GasEstimatorType { + fn default() -> Self { + Self::Alloy { + past_blocks: default_past_blocks(), + reward_percentile: default_reward_percentile(), + } + } } /// Defines various strategies to prioritize orders. @@ -967,3 +990,124 @@ enum AtBlock { /// Use the latest finalized block. Finalized, } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn gas_estimator_alloy_defaults() { + let config: GasEstimatorType = toml::from_str( + r#" + estimator = "alloy" + "#, + ) + .unwrap(); + + match config { + GasEstimatorType::Alloy { + past_blocks, + reward_percentile, + } => { + assert_eq!(past_blocks, 10); + assert_eq!(reward_percentile, 20.0); + } + _ => panic!("expected Alloy variant"), + } + } + + #[test] + fn gas_estimator_alloy_custom_past_blocks() { + let config: GasEstimatorType = toml::from_str( + r#" + estimator = "alloy" + past-blocks = 5 + "#, + ) + .unwrap(); + + match config { + GasEstimatorType::Alloy { + past_blocks, + reward_percentile, + } => { + assert_eq!(past_blocks, 5); + assert_eq!(reward_percentile, 20.0); + } + _ => panic!("expected Alloy variant"), + } + } + + #[test] + fn gas_estimator_alloy_custom_percentile() { + let config: GasEstimatorType = toml::from_str( + r#" + estimator = "alloy" + reward-percentile = 50.0 + "#, + ) + .unwrap(); + + match config { + GasEstimatorType::Alloy { + past_blocks, + reward_percentile, + } => { + assert_eq!(past_blocks, 10); + assert_eq!(reward_percentile, 50.0); + } + _ => panic!("expected Alloy variant"), + } + } + + #[test] + fn gas_estimator_alloy_all_custom() { + let config: GasEstimatorType = toml::from_str( + r#" + estimator = "alloy" + past-blocks = 20 + reward-percentile = 75.0 + "#, + ) + .unwrap(); + + match config { + GasEstimatorType::Alloy { + past_blocks, + reward_percentile, + } => { + assert_eq!(past_blocks, 20); + assert_eq!(reward_percentile, 75.0); + } + _ => panic!("expected Alloy variant"), + } + } + + #[test] + fn gas_estimator_web3() { + let config: GasEstimatorType = toml::from_str( + r#" + estimator = "web3" + "#, + ) + .unwrap(); + + assert!(matches!(config, GasEstimatorType::Web3)); + } + + #[test] + fn gas_estimator_default() { + let config = GasEstimatorType::default(); + + match config { + GasEstimatorType::Alloy { + past_blocks, + reward_percentile, + } => { + assert_eq!(past_blocks, 10); + assert_eq!(reward_percentile, 20.0); + } + _ => panic!("expected Alloy variant as default"), + } + } +} diff --git a/crates/shared/src/gas_price_estimation/alloy.rs b/crates/shared/src/gas_price_estimation/alloy.rs deleted file mode 100644 index 9faf2190ff..0000000000 --- a/crates/shared/src/gas_price_estimation/alloy.rs +++ /dev/null @@ -1,51 +0,0 @@ -//! Uses `alloy`'s logic for suggesting reasonable EIP-1559 -//! gas price values. It computes the suggested gas price -//! based on the 20th percentile of fee rewards for -//! transactions of the last 10 blocks. -//! See -//! for the implementation details. - -use { - crate::gas_price_estimation::{GasPriceEstimating, price::GasPrice1559, u128_to_f64}, - alloy::providers::Provider, - anyhow::{Context, Result}, - ethrpc::AlloyProvider, - futures::TryFutureExt, - tracing::instrument, -}; - -/// Estimates an EIP-1559 gas price based on the 20th percentile of fee rewards -/// for transactions of the last 10 blocks. -pub struct Eip1559GasPriceEstimator(AlloyProvider); - -impl Eip1559GasPriceEstimator { - pub fn new(provider: AlloyProvider) -> Self { - Self(provider) - } -} - -#[async_trait::async_trait] -impl GasPriceEstimating for Eip1559GasPriceEstimator { - /// Returns alloy's estimation for the EIP-1559 gas price. - #[instrument(skip(self))] - async fn estimate(&self) -> Result { - let fees = self - .0 - .estimate_eip1559_fees() - .map_err(|err| anyhow::anyhow!("could not estimate EIP 1559 fees: {err:?}")) - .await?; - - let max_fee_per_gas = u128_to_f64(fees.max_fee_per_gas) - .context("could not convert max_fee_per_gas to f64")?; - - Ok(GasPrice1559 { - // We reuse `max_fee_per_gas` since the base fee only actually - // exists in a mined block. For price estimates used to configure - // the gas price of a transaction the base fee doesn't matter. - base_fee_per_gas: max_fee_per_gas, - max_fee_per_gas, - max_priority_fee_per_gas: u128_to_f64(fees.max_priority_fee_per_gas) - .context("could not convert max_priority_fee_per_gas to f64")?, - }) - } -} diff --git a/crates/shared/src/gas_price_estimation/configurable_alloy.rs b/crates/shared/src/gas_price_estimation/configurable_alloy.rs new file mode 100644 index 0000000000..671497d7cf --- /dev/null +++ b/crates/shared/src/gas_price_estimation/configurable_alloy.rs @@ -0,0 +1,102 @@ +//! Configurable EIP-1559 gas price estimator. +//! +//! Unlike alloy's default estimator which uses hardcoded values (10 blocks, +//! 20th percentile), this estimator allows configuring: +//! - Number of blocks to look back +//! - Reward percentile to use + +use { + crate::gas_price_estimation::{GasPriceEstimating, price::GasPrice1559, u128_to_f64}, + alloy::{ + eips::BlockNumberOrTag, + providers::{Provider, utils::eip1559_default_estimator}, + }, + anyhow::{Context, Result}, + ethrpc::AlloyProvider, + tracing::instrument, +}; + +#[derive(Debug, Clone, Copy)] +pub struct EstimatorConfig { + /// Number of blocks to look back for fee history + pub past_blocks: u64, + /// Percentile of rewards to use for priority fee estimation + pub reward_percentile: f64, +} + +pub fn default_past_blocks() -> u64 { + 10 +} + +pub fn default_reward_percentile() -> f64 { + 20.0 +} + +/// A configurable EIP-1559 gas price estimator. +/// +/// Uses alloy's default estimation algorithm but with configurable +/// `past_blocks` and `reward_percentile` parameters for the fee history query. +pub struct ConfigurableGasPriceEstimator { + provider: AlloyProvider, + config: EstimatorConfig, +} + +impl ConfigurableGasPriceEstimator { + pub fn new(provider: AlloyProvider, config: EstimatorConfig) -> Self { + Self { provider, config } + } +} + +#[async_trait::async_trait] +impl GasPriceEstimating for ConfigurableGasPriceEstimator { + #[instrument(skip(self), fields( + past_blocks = %self.config.past_blocks, + reward_percentile = %self.config.reward_percentile + ))] + async fn estimate(&self) -> Result { + // Fetch fee history with our configured parameters + let fee_history = self + .provider + .get_fee_history( + self.config.past_blocks, + BlockNumberOrTag::Latest, + &[self.config.reward_percentile], + ) + .await + .context("failed to fetch fee history")?; + + // Get base fee: use latest block's base fee, or fall back to fetching + // latest block directly if fee history is empty + let base_fee_per_gas = match fee_history.latest_block_base_fee() { + Some(base_fee) if base_fee != 0 => base_fee, + _ => { + // empty response, fetch basefee from latest block directly + let block = self + .provider + .get_block_by_number(BlockNumberOrTag::Latest) + .await + .context("failed to fetch latest block")? + .context("latest block not found")?; + u128::from( + block + .header + .base_fee_per_gas + .context("base_fee_per_gas not available (eip1559 not supported)")?, + ) + } + }; + + // Use alloy's default estimation algorithm + let estimation = + eip1559_default_estimator(base_fee_per_gas, &fee_history.reward.unwrap_or_default()); + + Ok(GasPrice1559 { + base_fee_per_gas: u128_to_f64(base_fee_per_gas) + .context("could not convert base_fee_per_gas to f64")?, + max_fee_per_gas: u128_to_f64(estimation.max_fee_per_gas) + .context("could not convert max_fee_per_gas to f64")?, + max_priority_fee_per_gas: u128_to_f64(estimation.max_priority_fee_per_gas) + .context("could not convert max_priority_fee_per_gas to f64")?, + }) + } +} diff --git a/crates/shared/src/gas_price_estimation/mod.rs b/crates/shared/src/gas_price_estimation/mod.rs index 6f49cee005..2aad505340 100644 --- a/crates/shared/src/gas_price_estimation/mod.rs +++ b/crates/shared/src/gas_price_estimation/mod.rs @@ -1,4 +1,4 @@ -pub mod alloy; +pub mod configurable_alloy; pub mod driver; pub mod eth_node; pub mod fake; @@ -9,7 +9,12 @@ use { crate::{ ethrpc::Web3, gas_price_estimation::{ - alloy::Eip1559GasPriceEstimator, + configurable_alloy::{ + ConfigurableGasPriceEstimator, + EstimatorConfig, + default_past_blocks, + default_reward_percentile, + }, eth_node::NodeGasPriceEstimator, priority::PriorityGasPriceEstimating, }, @@ -73,7 +78,14 @@ pub async fn create_priority_estimator( estimators.push(Box::new(NodeGasPriceEstimator::new(web3.alloy.clone()))) } GasEstimatorType::Alloy => { - estimators.push(Box::new(Eip1559GasPriceEstimator::new(web3.alloy.clone()))) + let estimator = ConfigurableGasPriceEstimator::new( + web3.alloy.clone(), + EstimatorConfig { + past_blocks: default_past_blocks(), + reward_percentile: default_reward_percentile(), + }, + ); + estimators.push(Box::new(estimator)) } } } From 22ba97683094ecec762c277cfee71a5ab1bc96c5 Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Mon, 26 Jan 2026 10:45:55 +0100 Subject: [PATCH 002/219] Improve handling of unverifiable quotes (#4085) # Description Currently the quote verification leads to weird results - especially for Ondo tokens. Routing these tokens requires the use of a proprietary API which does not give out usable call data without an actual trade intent. To adhere to the API solvers simply leave the execution plan of their solution blank (pre interactions, regular interactions, JIT orders). Normally this would lead to a revert in the trade simulation which would in turn cause our system to keep the quotes but mark them as unverifiable. However, if the settlement contract has enough buy_tokens to pay for the entire quoted amount the simulation will not revert but the analysis afterwards will sniff out that the quote is giving money away for free. This will then lead to the quote getting discarded entirely. This causes 2 main issues: 1. it is possible to get a `fast` quote (which skips verification alltogether) and then end up with `NoLiquidity` errors for `optimal` quotes which is confusing 2. said `NoLiquidity` errors then prevent users from placing orders # Changes Since this needs to be resolved urgently I went for a relatively simple approach where we detect whether a solution contains any execution plan at all. Now we only discard quotes that are too inaccurate if the solver actually tried to provide such a plan. If no plan is provided we simply assume it's because no plan could be provided. Note that there is still an incentive to provide verifiable calldata because any verifiable quote will be preferred over any non-verifiable quote. So solvers that don't make the effort to provide the calldata will basically never win quotes for trades where it's possible to provide calldata. Minor other changes: * renamed `Error::TooInaccurate` to `Error::BuffersPayForOrder` to hopefully make the error case more self explanatory * adjusted some getter functions to return `impl Iterator` instead of `Vec` to avoid unnecessary cloning ## How to test This is very hard to test with unit or e2e tests. Given how small the actual change is I think existing e2e tests should be enough to cover the correctness of the regular case and a deployment to prod will show if we now indeed handle the Ondo token case better. --- .../price_estimation/trade_verifier/mod.rs | 77 ++++++++++++------- crates/shared/src/trade_finding/external.rs | 6 +- crates/shared/src/trade_finding/mod.rs | 32 +++++--- 3 files changed, 74 insertions(+), 41 deletions(-) diff --git a/crates/shared/src/price_estimation/trade_verifier/mod.rs b/crates/shared/src/price_estimation/trade_verifier/mod.rs index 76a2d2ba50..4f5cee74cc 100644 --- a/crates/shared/src/price_estimation/trade_verifier/mod.rs +++ b/crates/shared/src/price_estimation/trade_verifier/mod.rs @@ -223,9 +223,9 @@ impl TradeVerifier { solver: trade.solver(), verified: true, execution: QuoteExecution { - interactions: map_interactions_data(&trade.interactions()), - pre_interactions: map_interactions_data(&trade.pre_interactions()), - jit_orders: trade.jit_orders(), + interactions: map_interactions_data(trade.interactions()), + pre_interactions: map_interactions_data(trade.pre_interactions()), + jit_orders: trade.jit_orders().cloned().collect(), }, }; tracing::warn!( @@ -305,7 +305,7 @@ impl TradeVerifier { summary.out_amount = I512::from(query.in_amount.get()) + summary.out_amount; } else if summary.out_amount < I512::ZERO { tracing::debug!("Trade out amount is negative"); - return Err(Error::TooInaccurate); + return Err(Error::BuffersPayForOrder); } } @@ -517,9 +517,9 @@ impl TradeVerifying for TradeVerifier { solver: trade.solver(), verified: false, execution: QuoteExecution { - interactions: map_interactions_data(&trade.interactions()), - pre_interactions: map_interactions_data(&trade.pre_interactions()), - jit_orders: trade.jit_orders(), + interactions: map_interactions_data(trade.interactions()), + pre_interactions: map_interactions_data(trade.pre_interactions()), + jit_orders: trade.jit_orders().cloned().collect(), }, }) .context("solver provided no gas estimate"); @@ -537,20 +537,42 @@ impl TradeVerifying for TradeVerifier { .await { Ok(verified) => Ok(verified), - Err(Error::SimulationFailed(err)) => { - tracing::debug!(estimate = ?unverified_result, ?err, "quote verification failed"); - unverified_result - } - Err(err @ Error::TooInaccurate) => { - tracing::debug!("discarding quote because it's too inaccurate"); - Err(err.into()) + Err(err) => { + // For some tokens it's not possible to provide verifiable calldata in the + // quote (e.g. when they require the use of proprietary APIs which don't give + // out calldata willy nilly). + // + // Since you can't magically make up calldata that makes your quote verifiable + // solvers don't provide any call data in those cases. + // This has 2 possible outcomes: + // 1. the settlement contract has enough buy_tokens to pay for the order => + // Error::BuffersPayForOrder + // 2. not enough buy tokens in buffer => error::SimulationFailure + // + // To make handling of these quotes more predictable we'll only discard + // `Error::BufferPayForOrder` errors if the solver actually tried to provide a + // an execution plan but it's just not correct. In all other cases we just flag + // the solution as unverified but let it pass. + let has_execution_plan = trade.has_execution_plan(); + if has_execution_plan && matches!(err, Error::BuffersPayForOrder) { + tracing::debug!( + has_execution_plan, + "discarding quote because buffers pay for order" + ); + Err(err.into()) + } else { + tracing::debug!(estimate = ?unverified_result, ?err, "quote verification failed"); + unverified_result + } } } } } -fn encode_interactions(interactions: &[Interaction]) -> Vec { - interactions.iter().map(|i| i.encode()).collect() +fn encode_interactions<'a>( + interactions: impl IntoIterator, +) -> Vec { + interactions.into_iter().map(|i| i.encode()).collect() } #[expect(clippy::too_many_arguments)] @@ -565,7 +587,7 @@ fn encode_settlement( domain_separator: &DomainSeparator, settlement: Address, ) -> Result { - let mut trade_interactions = encode_interactions(&trade.interactions()); + let mut trade_interactions = encode_interactions(trade.interactions()); if query.buy_token == BUY_ETH_ADDRESS { // Because the `driver` manages `WETH` unwraps under the hood the `TradeFinder` // does not have to emit unwraps to pay out `ETH` in a trade. @@ -624,7 +646,7 @@ fn encode_settlement( let user_interactions = verification.pre_interactions.iter().cloned(); let pre_interactions: Vec<_> = user_interactions - .chain(trade.pre_interactions()) + .chain(trade.pre_interactions().cloned()) .chain([trade_setup_interaction]) .collect(); @@ -899,7 +921,7 @@ fn ensure_quote_accuracy( .context("summary buy token is missing")?; if (*sell_token_lost >= sell_token_lost_limit) || (*buy_token_lost >= buy_token_lost_limit) { - return Err(Error::TooInaccurate); + return Err(Error::BuffersPayForOrder); } Ok(Estimate { @@ -908,9 +930,9 @@ fn ensure_quote_accuracy( solver: trade.solver(), verified: true, execution: QuoteExecution { - interactions: map_interactions_data(&trade.interactions()), - pre_interactions: map_interactions_data(&trade.pre_interactions()), - jit_orders: trade.jit_orders(), + interactions: map_interactions_data(trade.interactions()), + pre_interactions: map_interactions_data(trade.pre_interactions()), + jit_orders: trade.jit_orders().cloned().collect(), }, }) } @@ -927,9 +949,10 @@ pub struct PriceQuery { #[derive(thiserror::Error, Debug)] enum Error { /// Verification logic ran successfully but the quote was deemed too - /// inaccurate to be usable. - #[error("too inaccurate")] - TooInaccurate, + /// inaccurate because too many buy tokens came from the settlement + /// contract's buffers. + #[error("buffers pay for order")] + BuffersPayForOrder, /// Some error caused the simulation to not finish successfully. #[error("quote could not be simulated")] SimulationFailed(#[from] anyhow::Error), @@ -1010,7 +1033,7 @@ mod tests { let estimate = ensure_quote_accuracy(&low_threshold, &query, &Default::default(), &sell_more); - assert!(matches!(estimate, Err(Error::TooInaccurate))); + assert!(matches!(estimate, Err(Error::BuffersPayForOrder))); // passes with slightly higher tolerance let estimate = @@ -1030,7 +1053,7 @@ mod tests { let estimate = ensure_quote_accuracy(&low_threshold, &query, &Default::default(), &pay_out_more); - assert!(matches!(estimate, Err(Error::TooInaccurate))); + assert!(matches!(estimate, Err(Error::BuffersPayForOrder))); // passes with slightly higher tolerance let estimate = diff --git a/crates/shared/src/trade_finding/external.rs b/crates/shared/src/trade_finding/external.rs index ca180729dc..dae60321a4 100644 --- a/crates/shared/src/trade_finding/external.rs +++ b/crates/shared/src/trade_finding/external.rs @@ -219,9 +219,9 @@ impl TradeFinding for ExternalTradeFinder { gas_estimate, solver: trade.solver(), execution: QuoteExecution { - interactions: map_interactions_data(&trade.interactions()), - pre_interactions: map_interactions_data(&trade.pre_interactions()), - jit_orders: trade.jit_orders(), + interactions: map_interactions_data(trade.interactions()), + pre_interactions: map_interactions_data(trade.pre_interactions()), + jit_orders: trade.jit_orders().cloned().collect(), }, }) } diff --git a/crates/shared/src/trade_finding/mod.rs b/crates/shared/src/trade_finding/mod.rs index 824e5c8b3a..a406500d60 100644 --- a/crates/shared/src/trade_finding/mod.rs +++ b/crates/shared/src/trade_finding/mod.rs @@ -95,24 +95,32 @@ impl TradeKind { } } - pub fn interactions(&self) -> Vec { + /// Returns whether the solution contains anything that could + /// actually produce the promised buy tokens. + pub fn has_execution_plan(&self) -> bool { + self.interactions().next().is_some() + || self.jit_orders().next().is_some() + || self.pre_interactions().next().is_some() + } + + pub fn interactions(&self) -> impl std::iter::Iterator { match self { - TradeKind::Legacy(trade) => trade.interactions.clone(), - TradeKind::Regular(trade) => trade.interactions.clone(), + TradeKind::Legacy(trade) => trade.interactions.iter(), + TradeKind::Regular(trade) => trade.interactions.iter(), } } - pub fn pre_interactions(&self) -> Vec { + pub fn pre_interactions(&self) -> impl std::iter::Iterator { match self { - TradeKind::Legacy(_) => Vec::new(), - TradeKind::Regular(trade) => trade.pre_interactions.clone(), + TradeKind::Legacy(_) => [].iter(), + TradeKind::Regular(trade) => trade.pre_interactions.iter(), } } - pub fn jit_orders(&self) -> Vec { + pub fn jit_orders(&self) -> impl std::iter::Iterator { match self { - TradeKind::Legacy(_) => Vec::new(), - TradeKind::Regular(trade) => trade.jit_orders.clone(), + TradeKind::Legacy(_) => [].iter(), + TradeKind::Regular(trade) => trade.jit_orders.iter(), } } } @@ -281,9 +289,11 @@ pub fn map_interactions(interactions: &[InteractionData]) -> Vec { interactions.iter().cloned().map(Into::into).collect() } -pub fn map_interactions_data(interactions: &[Interaction]) -> Vec { +pub fn map_interactions_data<'a>( + interactions: impl IntoIterator, +) -> Vec { interactions - .iter() + .into_iter() .map(|i| i.to_interaction_data()) .collect() } From adda1aebada3c5b06a2313a0dbc39d441a7b3839 Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Mon, 26 Jan 2026 11:17:07 +0100 Subject: [PATCH 003/219] Log solve request data transfer (#4082) # Description Some solvers reported that some requests come significantly delayed (judging by the auction deadline). Currently we have no way to distinguish between receiving the start of the `/solve` request and streaming the actual data. This PR makes this possible by making the `solve` handler take a raw http request and stream the body afterwards. # Changes Instead of: 1. collecting the whole body into a `String` (including utf8 check) 2. logging that we received a request 3. putting that `String` into an `Arc` to make copying it cheap 4. deserializing the string into a `SolveRequest` We now do: 1. receive raw http request 2. log that we received it 3. stream the body into a cheaply copyable `Bytes` type 4. log how long the data transfer took 5. deserialize raw bytes into `SolveRequest` Since handling the raw request seems to bypass axum's request size checks I did it manually for this endpoint. ## How to test Existing tests should suffice --------- Co-authored-by: ilya --- Cargo.lock | 1 + Cargo.toml | 1 + crates/driver/Cargo.toml | 1 + crates/driver/src/domain/competition/mod.rs | 3 +- .../src/domain/competition/pre_processing.rs | 13 ++-- crates/driver/src/infra/api/mod.rs | 2 +- .../driver/src/infra/api/routes/solve/mod.rs | 77 ++++++++++++++++--- crates/driver/src/infra/observe/mod.rs | 8 +- 8 files changed, 84 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 838549e714..108ddec027 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2732,6 +2732,7 @@ dependencies = [ "ethrpc", "futures", "hex-literal", + "http-body 0.4.6", "humantime", "humantime-serde", "hyper 0.14.32", diff --git a/Cargo.toml b/Cargo.toml index 3d725f5013..ed1c68a30e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,6 +25,7 @@ flate2 = "1.0.30" futures = "0.3.30" const-hex = "1.17.0" hex-literal = "0.4.1" +http-body = "0.4.6" humantime = "2.1.0" humantime-serde = "1.1.1" hyper = "0.14.29" diff --git a/crates/driver/Cargo.toml b/crates/driver/Cargo.toml index 6edf782820..edd2f47f6e 100644 --- a/crates/driver/Cargo.toml +++ b/crates/driver/Cargo.toml @@ -30,6 +30,7 @@ ethrpc = { workspace = true } futures = { workspace = true } const-hex = { workspace = true } hex-literal = { workspace = true } +http-body = { workspace = true } humantime = { workspace = true } humantime-serde = { workspace = true } hyper = { workspace = true } diff --git a/crates/driver/src/domain/competition/mod.rs b/crates/driver/src/domain/competition/mod.rs index a0d2c41d0c..5c83a800b1 100644 --- a/crates/driver/src/domain/competition/mod.rs +++ b/crates/driver/src/domain/competition/mod.rs @@ -22,6 +22,7 @@ use { util::{Bytes, math}, }, futures::{StreamExt, future::Either, stream::FuturesUnordered}, + hyper::body::Bytes as RequestBytes, itertools::Itertools, std::{ cmp::Reverse, @@ -112,7 +113,7 @@ impl Competition { } /// Solve an auction as part of this competition. - pub async fn solve(&self, auction: Arc) -> Result, Error> { + pub async fn solve(&self, auction: RequestBytes) -> Result, Error> { let start = Instant::now(); let timer = ::observe::metrics::metrics() .on_auction_overhead_start("driver", "pre_processing_total"); diff --git a/crates/driver/src/domain/competition/pre_processing.rs b/crates/driver/src/domain/competition/pre_processing.rs index 8b0f4b47ac..99bd4bb178 100644 --- a/crates/driver/src/domain/competition/pre_processing.rs +++ b/crates/driver/src/domain/competition/pre_processing.rs @@ -13,6 +13,7 @@ use { anyhow::{Context, Result}, chrono::Utc, futures::{FutureExt, StreamExt, future::BoxFuture, stream::FuturesUnordered}, + hyper::body::Bytes as RequestBytes, itertools::Itertools, model::{ interaction::InteractionData, @@ -73,7 +74,7 @@ impl std::fmt::Debug for Utilities { #[derive(Debug)] struct ControlBlock { /// Auction for which the data aggregation task was spawned. - solve_request: Arc, + solve_request: RequestBytes, /// Data aggregation task. tasks: DataFetchingTasks, } @@ -90,7 +91,7 @@ impl DataAggregator { /// only once for all connected solvers to share. pub async fn start_or_get_tasks_for_auction( &self, - request: Arc, + request: RequestBytes, ) -> Result { let mut lock = self.control.lock().await; let current_auction = &lock.solve_request; @@ -99,7 +100,7 @@ impl DataAggregator { // requests per auction. That means we can use the significantly // cheaper string comparison instead of parsing the JSON to compare // the auction ids. - if &request == current_auction { + if request == current_auction { let id = lock.tasks.auction.clone().await.id; init_auction_id_in_span(id.map(|i| i.0)); tracing::debug!("await running data aggregation task"); @@ -164,7 +165,7 @@ impl DataAggregator { } } - async fn assemble_tasks(&self, request: Arc) -> Result { + async fn assemble_tasks(&self, request: RequestBytes) -> Result { let auction = self.utilities.parse_request(request).await?; let balances = @@ -211,14 +212,14 @@ impl Utilities { /// Parses the JSON body of the `/solve` request during the unified /// auction pre-processing since eagerly deserializing these requests /// is surprisingly costly because their are so big. - async fn parse_request(&self, solve_request: Arc) -> Result> { + async fn parse_request(&self, solve_request: RequestBytes) -> Result> { let auction_dto: SolveRequest = { let _timer = metrics::get().processing_stage_timer("parse_dto"); let _timer2 = observe::metrics::metrics().on_auction_overhead_start("driver", "parse_dto"); // deserialization takes tens of milliseconds so run it on a blocking task tokio::task::spawn_blocking(move || { - serde_json::from_str(&solve_request).context("could not parse solve request") + serde_json::from_slice(&solve_request).context("could not parse solve request") }) .await .context("failed to await blocking task")?? diff --git a/crates/driver/src/infra/api/mod.rs b/crates/driver/src/infra/api/mod.rs index afd3f27578..5b42fc88e5 100644 --- a/crates/driver/src/infra/api/mod.rs +++ b/crates/driver/src/infra/api/mod.rs @@ -31,7 +31,7 @@ use { mod error; pub mod routes; -const REQUEST_BODY_LIMIT: usize = 10 * 1024 * 1024; +pub const REQUEST_BODY_LIMIT: usize = 10 * 1024 * 1024; pub struct Api { pub solvers: Vec, diff --git a/crates/driver/src/infra/api/routes/solve/mod.rs b/crates/driver/src/infra/api/routes/solve/mod.rs index 3750b7ef1f..6ed72ffef5 100644 --- a/crates/driver/src/infra/api/routes/solve/mod.rs +++ b/crates/driver/src/infra/api/routes/solve/mod.rs @@ -2,11 +2,16 @@ pub mod dto; pub use dto::AuctionError; use { - crate::infra::{ - api::{Error, State}, - observe, + crate::{ + domain::competition, + infra::{ + api::{Error, REQUEST_BODY_LIMIT, State}, + observe, + }, }, - std::sync::Arc, + axum::{body::Body, http::Request}, + hyper::body::Bytes, + std::time::{Duration, Instant}, tracing::Instrument, }; @@ -16,17 +21,21 @@ pub(in crate::infra::api) fn solve(router: axum::Router) -> axum::Router< async fn route( state: axum::extract::State, - // take the request body as a raw string to delay parsing as much - // as possible because many requests don't have to be parsed at all - req: String, + // Take the request as raw request to extract the body as a stream. + // This delays interpreting the data as much as possible and allows + // logging how long the raw data transfer takes. + request: Request, ) -> Result, (hyper::StatusCode, axum::Json)> { + let solver = state.solver().name().as_str(); + let handle_request = async { + let body_bytes = collect_request_body(request, solver).await?; let competition = state.competition(); - let result = competition.solve(Arc::new(req)).await; + let result = competition.solve(body_bytes).await; // Solving takes some time, so there is a chance for the settlement queue to // have capacity again. competition.ensure_settle_queue_capacity()?; - observe::solved(state.solver().name(), &result); + observe::solved(solver, &result); Ok(axum::Json(dto::SolveResponse::new( result?, &competition.solver, @@ -34,6 +43,54 @@ async fn route( }; handle_request - .instrument(tracing::info_span!("/solve", solver = %state.solver().name(), auction_id = tracing::field::Empty)) + .instrument(tracing::info_span!( + "/solve", + solver, + auction_id = tracing::field::Empty + )) .await } + +async fn collect_request_body( + request: Request, + solver: &str, +) -> Result { + tracing::trace!("start streaming request body"); + let start = Instant::now(); + + // accepting the raw request bypasses axum's request body limiting layer + // so we have to manually ensure the body has a reasonable size. + let limited_body = http_body::Limited::new(request.into_body(), REQUEST_BODY_LIMIT); + let body_bytes = hyper::body::to_bytes(limited_body).await.map_err(|err| { + tracing::warn!(?err, "failed to stream request body"); + competition::Error::MalformedRequest + })?; + + let duration = start.elapsed(); + Metrics::measure_solve_transfer_time(solver, duration); + tracing::trace!(?duration, "finished streaming request body"); + Ok(body_bytes) +} + +#[derive(prometheus_metric_storage::MetricStorage)] +struct Metrics { + /// Time spent by the driver reading the full solve request body into + /// memory. + #[metric(labels("solver"))] + #[metric(buckets(0.0001, 0.0005, 0.002, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.75, 1, 1.5))] + solve_request_body_read_duration_seconds: prometheus::HistogramVec, +} + +impl Metrics { + fn get() -> &'static Metrics { + Metrics::instance(::observe::metrics::get_storage_registry()) + .expect("unexpected error getting metrics instance") + } + + fn measure_solve_transfer_time(solver: &str, time: Duration) { + Self::get() + .solve_request_body_read_duration_seconds + .with_label_values(&[solver]) + .observe(time.as_secs_f64()); + } +} diff --git a/crates/driver/src/infra/observe/mod.rs b/crates/driver/src/infra/observe/mod.rs index 0847778fbe..0a6f232b5b 100644 --- a/crates/driver/src/infra/observe/mod.rs +++ b/crates/driver/src/infra/observe/mod.rs @@ -222,27 +222,27 @@ pub fn settled(solver: &solver::Name, result: &Result, competition::Error>) { +pub fn solved(solver: &str, result: &Result, competition::Error>) { match result { Ok(Some(solved)) => { tracing::info!(?solved, "solved auction"); metrics::get() .solutions - .with_label_values(&[solver.as_str(), "Success"]) + .with_label_values(&[solver, "Success"]) .inc(); } Ok(None) => { tracing::debug!("no solution found"); metrics::get() .solutions - .with_label_values(&[solver.as_str(), "SolutionNotFound"]) + .with_label_values(&[solver, "SolutionNotFound"]) .inc(); } Err(err) => { tracing::warn!(?err, "failed to solve auction"); metrics::get() .solutions - .with_label_values(&[solver.as_str(), competition_error(err)]) + .with_label_values(&[solver, competition_error(err)]) .inc(); } } From 5ca01e7e45fd926b25cd169655684e44ca10c5c1 Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Mon, 26 Jan 2026 12:03:15 +0100 Subject: [PATCH 004/219] Replace RPC mempool API with in-memory tracking (#4086) # Description In order to know which gas price we have to beat at least (in case of cancellations) we made the driver scan the RPC node's mempool using the respective API as this is the ultimate source of truth. However, this has 2 issues: 1. not widely supported 2. introduces latency (apparently up to 2s on mainnet at times) Especially the latency seemingly causes us to not notify the connected solver about the tx submission at times. The submission process works as follows: 1. driver receives a `/settle` call and starts the submission 2. driver does the usual tx submission where it monitors the submission deadline and initiate the cancellation if necessary 3. due to an [issue](https://github.com/cowprotocol/services/pull/3427) with dead block streams the driver also monitors if the autopilot is still waiting for the response for the `/settle` call 4. if the autopilot terminates the `/settle` call the driver only polls the submission future for 1 more second but otherwise simply stops polling it ([code](https://github.com/cowprotocol/services/blob/main/crates/driver/src/domain/competition/mod.rs#L630-L643)) Usually the submission future and autopilot detect the breach of the submission deadline at the same time so the settle future naturally executes the cancellation logic during that grace period. However, with the latency introduced by the mempool API this grace period is often not sufficient anymore (especially on mainnet). Doing some back of the napkin calculation using logs it appears as if the driver is currently not cancelling and submitting the respective notification for ~40% of the `/settle` calls. There is an argument to be made that the submission strategy should be refactored more broadly to ensure that cancellations always get initiated (instead of just stopping to poll the settle future) but this PR should at least already resolve the current issue. # Changes Instead of using the RPC's `mempool` API we simply store the last successfully submitted transactions in memory. Now that we only have to lookup a key in a `Dashmap` the latency will be as it was before. --- crates/driver/src/domain/competition/mod.rs | 15 +++--- crates/driver/src/domain/mempools.rs | 57 +++++++++++---------- crates/driver/src/infra/mempool/mod.rs | 31 +++++++++-- crates/e2e/Cargo.toml | 2 +- 4 files changed, 66 insertions(+), 39 deletions(-) diff --git a/crates/driver/src/domain/competition/mod.rs b/crates/driver/src/domain/competition/mod.rs index 5c83a800b1..655ccc568f 100644 --- a/crates/driver/src/domain/competition/mod.rs +++ b/crates/driver/src/domain/competition/mod.rs @@ -634,19 +634,20 @@ impl Competition { // disconnected). This is a fallback to recover from issues // like a stuck driver (e.g., stalled block stream). Either::Left((_closed, settle_fut)) => { - // Add a grace period to give driver the last chance to fetch the settlement - // tx. + tracing::debug!("autopilot terminated settle call"); + // Add a grace period to give driver the last chance to cancel the + // tx if needed. tokio::time::timeout(Duration::from_secs(1), settle_fut) .await - .unwrap_or_else(|_| Err(DeadlineExceeded.into())) + .unwrap_or_else(|_| { + tracing::error!("didn't finish tx submission within grace period"); + Err(DeadlineExceeded.into()) + }) } Either::Right((res, _)) => res, }; observe::settled(self.solver.name(), &result); - - if let Err(err) = response_sender.send(result) { - tracing::error!(?err, "Failed to send /settle response"); - } + let _ = response_sender.send(result); } .instrument(tracing_span) .await diff --git a/crates/driver/src/domain/mempools.rs b/crates/driver/src/domain/mempools.rs index 40a32b9dc5..afd6fec97a 100644 --- a/crates/driver/src/domain/mempools.rs +++ b/crates/driver/src/domain/mempools.rs @@ -145,7 +145,7 @@ impl Mempools { .minimum_replacement_gas_price(mempool, solver, nonce) .await; let final_gas_price = match &replacement_gas_price { - Ok(Some(replacement_gas_price)) + Some(replacement_gas_price) if replacement_gas_price.max() > current_gas_price.max() => { *replacement_gas_price @@ -281,7 +281,7 @@ impl Mempools { // replacement gas price, but if that fails for whatever reason // we use our best estimate based on the originally submitted tx let final_gas_price = match &replacement_gas_price { - Ok(Some(replacement)) => *replacement, + Some(replacement) => *replacement, _ => fallback_gas_price, }; @@ -311,35 +311,40 @@ impl Mempools { .await } - /// Tries to determine the minimum price to replace an existing - /// transaction in the mempool. + /// Computes minimum price to replace the last tx that was submitted + /// with the given nonce. Returns `None` if no tx was submitted with + /// that nonce yet. async fn minimum_replacement_gas_price( &self, mempool: &infra::Mempool, solver: &Solver, - nonce: u64, - ) -> anyhow::Result> { - let pending_tx = match mempool - .find_pending_tx_in_mempool(solver.address(), nonce) - .await? - { - Some(tx) => tx, - None => return Ok(None), - }; + next_nonce: u64, + ) -> Option { + if let Some(last_submission) = mempool.last_submission(solver.address()) { + (last_submission.nonce == next_nonce) + .then_some(last_submission.gas_price * GAS_PRICE_BUMP) + } else { + // If we don't have the last submission in-memory (i.e. first submission + // attempt after a restart) we try to inspect the nodes transaction mempool. + // This is only done as a backup since it can incur significant latency and + // is generally not very widely supported. + let pending_tx = mempool + .find_pending_tx_in_mempool(solver.address(), next_nonce) + .await + .inspect_err(|err| tracing::debug!(?err, "could not inspect tx mempool")) + .ok()??; - let pending_tx_gas_price = eth::GasPrice::new( - eth::U256::from(pending_tx.max_fee_per_gas()).into(), - eth::U256::from(pending_tx.max_priority_fee_per_gas().with_context(|| { - format!( - "pending tx is not EIP 1559 ({})", - pending_tx.inner.tx_hash() - ) - })?) - .into(), - eth::U256::from(pending_tx.max_fee_per_gas()).into(), - ); - // in order to replace a tx we need to increase the price - Ok(Some(pending_tx_gas_price * GAS_PRICE_BUMP)) + let pending_tx_gas_price = eth::GasPrice::new( + eth::U256::from(pending_tx.max_fee_per_gas()).into(), + eth::U256::from(pending_tx.max_priority_fee_per_gas().or_else(|| { + tracing::error!(tx = ?pending_tx.inner.tx_hash(), "pending tx is not EIP 1559"); + None + })?) + .into(), + eth::U256::from(pending_tx.max_fee_per_gas()).into(), + ); + Some(pending_tx_gas_price * GAS_PRICE_BUMP) + } } } diff --git a/crates/driver/src/infra/mempool/mod.rs b/crates/driver/src/infra/mempool/mod.rs index ac25841d3f..00d017908a 100644 --- a/crates/driver/src/infra/mempool/mod.rs +++ b/crates/driver/src/infra/mempool/mod.rs @@ -7,11 +7,14 @@ use { alloy::{ consensus::Transaction, eips::BlockNumberOrTag, + primitives::Address, providers::{Provider, ext::TxPoolApi}, rpc::types::TransactionRequest, }, anyhow::Context, + dashmap::DashMap, ethrpc::Web3, + std::sync::Arc, url::Url, }; @@ -64,6 +67,13 @@ pub enum RevertProtection { pub struct Mempool { transport: Web3, config: Config, + last_submissions: Arc>, +} + +#[derive(Debug, Clone)] +pub struct Submission { + pub nonce: u64, + pub gas_price: eth::GasPrice, } impl std::fmt::Display for Mempool { @@ -79,7 +89,11 @@ impl Mempool { for account in solver_accounts { transport.wallet.register_signer(account); } - Self { transport, config } + Self { + transport, + config, + last_submissions: Default::default(), + } } /// Fetches the transaction count (nonce) for the given address at the @@ -153,19 +167,19 @@ impl Mempool { solver = ?solver.address(), "successfully submitted tx to mempool" ); + self.last_submissions + .insert(solver.address(), Submission { nonce, gas_price }); Ok(eth::TxId(*tx.tx_hash())) } Err(err) => { // log pending tx in case we failed to replace a pending tx - let pending_tx = self - .find_pending_tx_in_mempool(solver.address(), nonce) - .await; + let last_submission = self.last_submission(solver.address()); tracing::debug!( ?err, new_gas_price = ?gas_price, ?nonce, - ?pending_tx, + ?last_submission, ?gas_limit, solver = ?solver.address(), "failed to submit tx to mempool" @@ -199,6 +213,13 @@ impl Mempool { Ok(pending_tx) } + /// Looks up the last tx that was submitted for that signer. + pub fn last_submission(&self, signer: eth::Address) -> Option { + self.last_submissions + .get(&signer) + .map(|entry| entry.value().clone()) + } + pub fn config(&self) -> &Config { &self.config } diff --git a/crates/e2e/Cargo.toml b/crates/e2e/Cargo.toml index 6bc22e894f..5507ec5045 100644 --- a/crates/e2e/Cargo.toml +++ b/crates/e2e/Cargo.toml @@ -9,7 +9,7 @@ edition = "2024" license = "MIT OR Apache-2.0" [dependencies] -alloy = { workspace = true, default-features = false, features = ["json-rpc", "providers", "rpc-client", "rpc-types", "transports", "reqwest", "signers", "signer-local", "signer-mnemonic", "provider-anvil-api", "provider-debug-api", "sol-types"] } +alloy = { workspace = true, default-features = false, features = ["json-rpc", "providers", "provider-txpool-api", "rpc-client", "rpc-types", "transports", "reqwest", "signers", "signer-local", "signer-mnemonic", "provider-anvil-api", "provider-debug-api", "sol-types"] } alloy-signer = {workspace = true, default-features = false, features = ["eip712"]} anyhow = { workspace = true } app-data = { workspace = true } From 7081b03f66c5154cc84ae93f79542425803b4639 Mon Sep 17 00:00:00 2001 From: Marcin Szymczak Date: Mon, 26 Jan 2026 13:00:35 +0100 Subject: [PATCH 005/219] Optimize live orders queries based on confirmed_valid_to column (#4055) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description Second part of https://github.com/cowprotocol/services/pull/4047 which introduced optimized queries based on the introduced confirmed_valid_to column It is **crucial** for the database to be already migrated manually as described in previous PR before applying this one. # Changes - [x] Adapt user_orders_with_quote query to use new column - [x] Adapt solvable_orders query to use new column ## How to test Tested on a test-db created by @MartinquaXD which contains a snapshot of prod data. The optimized queries run significantly faster due to changes in `orders` table and new indices. ## Related Issues https://github.com/cowprotocol/services/pull/4021 --------- Co-authored-by: ilya Co-authored-by: José Duarte Co-authored-by: Claude Co-authored-by: Martin Magnus --- crates/database/src/orders.rs | 96 ++++++++----------- database/README.md | 2 + .../sql/V098__set_true_valid_to_non_null.sql | 8 ++ .../V099__create_true_valid_to_indexes.sql | 4 + 4 files changed, 56 insertions(+), 54 deletions(-) create mode 100644 database/sql/V098__set_true_valid_to_non_null.sql create mode 100644 database/sql/V099__create_true_valid_to_indexes.sql diff --git a/crates/database/src/orders.rs b/crates/database/src/orders.rs index d1f77cb052..78e2d73b86 100644 --- a/crates/database/src/orders.rs +++ b/crates/database/src/orders.rs @@ -721,26 +721,23 @@ pub fn solvable_orders( /// - pending pre-signature /// - ethflow specific invalidation conditions const OPEN_ORDERS: &str = r#" - WITH live_orders AS ( + WITH live_orders AS MATERIALIZED ( SELECT o.* FROM orders o - LEFT JOIN ethflow_orders e ON e.uid = o.uid WHERE o.cancellation_timestamp IS NULL - AND o.valid_to >= $1 - AND (e.valid_to IS NULL OR e.valid_to >= $1) - AND NOT EXISTS (SELECT 1 FROM invalidations i WHERE i.order_uid = o.uid) - AND NOT EXISTS (SELECT 1 FROM onchain_order_invalidations oi WHERE oi.uid = o.uid) - AND NOT EXISTS (SELECT 1 FROM onchain_placed_orders op WHERE op.uid = o.uid - AND op.placement_error IS NOT NULL) + AND o.true_valid_to >= $1 + AND NOT EXISTS (SELECT 1 FROM invalidations i WHERE i.order_uid = o.uid) + AND NOT EXISTS (SELECT 1 FROM onchain_order_invalidations oi WHERE oi.uid = o.uid) + AND NOT EXISTS (SELECT 1 FROM onchain_placed_orders op WHERE op.uid = o.uid AND op.placement_error IS NOT NULL) ), trades_agg AS ( - SELECT t.order_uid, - SUM(t.buy_amount) AS sum_buy, - SUM(t.sell_amount) AS sum_sell, - SUM(t.fee_amount) AS sum_fee - FROM trades t - JOIN live_orders lo ON lo.uid = t.order_uid - GROUP BY t.order_uid + SELECT t.order_uid, + SUM(t.buy_amount) AS sum_buy, + SUM(t.sell_amount) AS sum_sell, + SUM(t.fee_amount) AS sum_fee + FROM trades t + JOIN live_orders lo ON lo.uid = t.order_uid + GROUP BY t.order_uid ) SELECT lo.uid, @@ -960,46 +957,37 @@ pub async fn user_orders_with_quote( owner: &Address, ) -> Result, sqlx::Error> { // Optimized version following the same pattern as OPEN_ORDERS - #[rustfmt::skip] const QUERY: &str = r#" -WITH live_orders AS ( - SELECT o.* - FROM orders o - LEFT JOIN ethflow_orders e ON e.uid = o.uid - WHERE o.cancellation_timestamp IS NULL - AND o.valid_to >= $1 - AND (e.valid_to IS NULL OR e.valid_to >= $1) - AND NOT EXISTS (SELECT 1 FROM invalidations i WHERE i.order_uid = o.uid) - AND NOT EXISTS (SELECT 1 FROM onchain_order_invalidations oi WHERE oi.uid = o.uid) - AND NOT EXISTS (SELECT 1 FROM onchain_placed_orders op WHERE op.uid = o.uid - AND op.placement_error IS NOT NULL) - AND o.owner = $2 - AND o.class = 'limit' -), -trades_agg AS ( - SELECT t.order_uid, - SUM(t.buy_amount) AS sum_buy, - SUM(t.sell_amount) AS sum_sell, - SUM(t.fee_amount) AS sum_fee - FROM trades t - JOIN live_orders lo ON lo.uid = t.order_uid - GROUP BY t.order_uid -) -SELECT - o_quotes.sell_amount as quote_sell_amount, - lo.sell_amount as order_sell_amount, - o_quotes.buy_amount as quote_buy_amount, - lo.buy_amount as order_buy_amount, - lo.kind as order_kind, - o_quotes.gas_amount as quote_gas_amount, - o_quotes.gas_price as quote_gas_price, - o_quotes.sell_token_price as quote_sell_token_price -FROM live_orders lo -LEFT JOIN trades_agg ta ON ta.order_uid = lo.uid -INNER JOIN order_quotes o_quotes ON lo.uid = o_quotes.order_uid -WHERE ((lo.kind = 'sell' AND COALESCE(ta.sum_sell,0) < lo.sell_amount) OR - (lo.kind = 'buy' AND COALESCE(ta.sum_buy ,0) < lo.buy_amount)) -"#; + WITH live_orders AS MATERIALIZED ( + SELECT o.* + FROM orders o + WHERE o.cancellation_timestamp IS NULL + AND o.true_valid_to >= $1 + AND NOT EXISTS (SELECT 1 FROM invalidations i WHERE i.order_uid = o.uid) + AND NOT EXISTS (SELECT 1 FROM onchain_order_invalidations oi WHERE oi.uid = o.uid) + AND NOT EXISTS (SELECT 1 FROM onchain_placed_orders op WHERE op.uid = o.uid AND op.placement_error IS NOT NULL) + AND o.owner = $2 + AND o.class = 'limit' + ) + SELECT + o_quotes.sell_amount AS quote_sell_amount, + lo.sell_amount AS order_sell_amount, + o_quotes.buy_amount AS quote_buy_amount, + lo.buy_amount AS order_buy_amount, + lo.kind AS order_kind, + o_quotes.gas_amount AS quote_gas_amount, + o_quotes.gas_price AS quote_gas_price, + o_quotes.sell_token_price AS quote_sell_token_price + FROM live_orders lo + INNER JOIN order_quotes o_quotes ON lo.uid = o_quotes.order_uid + WHERE ( + lo.kind = 'sell' + AND COALESCE((SELECT SUM(sell_amount) FROM trades WHERE order_uid = lo.uid), 0) < lo.sell_amount + ) OR ( + lo.kind = 'buy' + AND COALESCE((SELECT SUM(buy_amount) FROM trades WHERE order_uid = lo.uid), 0) < lo.buy_amount + ); + "#; sqlx::query_as::<_, OrderWithQuote>(QUERY) .bind(min_valid_to) .bind(owner) diff --git a/database/README.md b/database/README.md index 4e427976a7..04d5407171 100644 --- a/database/README.md +++ b/database/README.md @@ -272,6 +272,8 @@ Indexes: - order_sell_buy_tokens: btree(`sell_token`, `buy_token`) - user_order_creation_timestamp: btree(`owner`, `creation_timestamp` DESC) - version_idx: btree(`settlement_contract`) +- orders\_true\_valid\_to: btree(`true_valid_to`) +- okay\_onchain\_orders: btree(`uid`) WHERE placement\_error IS NOT NULL ### fee_policies diff --git a/database/sql/V098__set_true_valid_to_non_null.sql b/database/sql/V098__set_true_valid_to_non_null.sql new file mode 100644 index 0000000000..76a0928527 --- /dev/null +++ b/database/sql/V098__set_true_valid_to_non_null.sql @@ -0,0 +1,8 @@ +-- migrate any remaining orders +UPDATE orders +SET true_valid_to = COALESCE( + (SELECT ethflow_orders.valid_to FROM ethflow_orders WHERE ethflow_orders.uid = orders.uid), + orders.valid_to +); +-- at this point every order has the true_valid_to filled in +ALTER TABLE orders ALTER COLUMN true_valid_to SET NOT NULL; diff --git a/database/sql/V099__create_true_valid_to_indexes.sql b/database/sql/V099__create_true_valid_to_indexes.sql new file mode 100644 index 0000000000..1b4754f29b --- /dev/null +++ b/database/sql/V099__create_true_valid_to_indexes.sql @@ -0,0 +1,4 @@ +--index on `true_valid_to` for quickly discarding expired orders +CREATE INDEX CONCURRENTLY orders_true_valid_to ON orders USING btree (true_valid_to); +-- further drops the query from 100ms to 80ms (warmed cache) +CREATE INDEX CONCURRENTLY okay_onchain_orders ON onchain_placed_orders USING btree (uid) WHERE placement_error IS NOT NULL; From a71f5776eb227f054b29fe5fb082c25a0baaf293 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Duarte?= Date: Mon, 26 Jan 2026 12:36:17 +0000 Subject: [PATCH 006/219] [TRIVIAL] Remove atty and move maplit to dev-dependencies (#4089) # Description While migrating the orderbook to axum I did another pass over the dependencies and found that atty not only is deprecated, it has a RUSTSEC because its unmaintained and a proper replacement since Rust 1.70 # Changes - [ ] Replace deprecated atty crate with std::io::IsTerminal (https://github.com/softprops/atty/blob/master/README.md?plain=1#L3-L7) - [ ] Move maplit to dev-dependencies ## How to test Compilation --------- Co-authored-by: Claude --- .cargo/audit.toml | 12 ------ Cargo.lock | 45 +------------------- Cargo.toml | 1 - crates/observe/Cargo.toml | 1 - crates/observe/src/tracing.rs | 5 ++- crates/solver/Cargo.toml | 2 +- crates/solver/src/interactions/allowances.rs | 3 +- 7 files changed, 6 insertions(+), 63 deletions(-) diff --git a/.cargo/audit.toml b/.cargo/audit.toml index fbc8447a84..6b6afa04fe 100644 --- a/.cargo/audit.toml +++ b/.cargo/audit.toml @@ -29,22 +29,10 @@ ignore = [ # Needs upgrade to 0.3.20+ (minimal risk in current usage) "RUSTSEC-2025-0055", - # atty - Unmaintained (RUSTSEC-2024-0375) - # Migrate to std::io::IsTerminal (tracked in #3338) - "RUSTSEC-2024-0375", - - # atty - Unsound potential unaligned read (RUSTSEC-2021-0145) - # Will be removed when migrating to std::io::IsTerminal - "RUSTSEC-2021-0145", - # derivative - Unmaintained (RUSTSEC-2024-0388) # Evaluate alternatives (tracked in #3338) "RUSTSEC-2024-0388", - # adler - Unmaintained (RUSTSEC-2025-0056) - # Use adler2 instead (tracked in #3338) - "RUSTSEC-2025-0056", - # instant - Unmaintained (RUSTSEC-2024-0384) # Use web-time instead (tracked in #3338) "RUSTSEC-2024-0384", diff --git a/Cargo.lock b/Cargo.lock index 108ddec027..bad5e2cfac 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1136,17 +1136,6 @@ version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi 0.1.19", - "libc", - "winapi", -] - [[package]] name = "auto_impl" version = "1.3.0" @@ -3521,15 +3510,6 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" -[[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" -dependencies = [ - "libc", -] - [[package]] name = "hermit-abi" version = "0.5.2" @@ -4643,7 +4623,7 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" dependencies = [ - "hermit-abi 0.5.2", + "hermit-abi", "libc", ] @@ -4718,7 +4698,6 @@ name = "observe" version = "0.1.0" dependencies = [ "async-trait", - "atty", "axum", "chrono", "console-subscriber", @@ -7939,28 +7918,6 @@ dependencies = [ "wasite", ] -[[package]] -name = "winapi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" -dependencies = [ - "winapi-i686-pc-windows-gnu", - "winapi-x86_64-pc-windows-gnu", -] - -[[package]] -name = "winapi-i686-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" - -[[package]] -name = "winapi-x86_64-pc-windows-gnu" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - [[package]] name = "windows-core" version = "0.62.2" diff --git a/Cargo.toml b/Cargo.toml index ed1c68a30e..d0c677d393 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -60,7 +60,6 @@ web3 = { version = "0.19.0", default-features = false } app-data = { path = "crates/app-data" } arc-swap = "1.7.1" async-stream = "0.3.5" -atty = "0.2" autopilot = { path = "crates/autopilot" } aws-config = "1.5.1" aws-sdk-s3 = { version = "1.34.0", default-features = false } diff --git a/crates/observe/Cargo.toml b/crates/observe/Cargo.toml index 0a21e36dfa..91578c049f 100644 --- a/crates/observe/Cargo.toml +++ b/crates/observe/Cargo.toml @@ -7,7 +7,6 @@ license = "MIT OR Apache-2.0" [dependencies] axum = { workspace = true } -atty = { workspace = true } async-trait = { workspace = true } chrono = { workspace = true, features = ["now"] } console-subscriber = { workspace = true, optional = true } diff --git a/crates/observe/src/tracing.rs b/crates/observe/src/tracing.rs index 98b1012b80..76b01e2196 100644 --- a/crates/observe/src/tracing.rs +++ b/crates/observe/src/tracing.rs @@ -84,11 +84,12 @@ fn set_tracing_subscriber(config: &Config) { .with_filter($env_filter) .boxed() } else { + let is_terminal = std::io::IsTerminal::is_terminal(&std::io::stdout()); tracing_subscriber::fmt::layer() .with_timer(timer) - .with_ansi(atty::is(atty::Stream::Stdout)) + .with_ansi(is_terminal) .map_event_format(|formatter| TraceIdFmt { - inner: formatter.with_ansi(atty::is(atty::Stream::Stdout)), + inner: formatter.with_ansi(is_terminal), }) .with_writer(writer) .with_filter($env_filter) diff --git a/crates/solver/Cargo.toml b/crates/solver/Cargo.toml index eac8a5b21a..71efe6cdcd 100644 --- a/crates/solver/Cargo.toml +++ b/crates/solver/Cargo.toml @@ -22,7 +22,6 @@ observe = { workspace = true } const-hex = { workspace = true } hex-literal = { workspace = true } itertools = { workspace = true } -maplit = { workspace = true } model = { workspace = true } num = { workspace = true } number = { workspace = true } @@ -36,6 +35,7 @@ tracing = { workspace = true } [dev-dependencies] derivative = { workspace = true } +maplit = { workspace = true } tokio = { workspace = true, features = ["test-util"] } testlib = { workspace = true } mockall = { workspace = true } diff --git a/crates/solver/src/interactions/allowances.rs b/crates/solver/src/interactions/allowances.rs index 085289d2dc..345e40c047 100644 --- a/crates/solver/src/interactions/allowances.rs +++ b/crates/solver/src/interactions/allowances.rs @@ -12,7 +12,6 @@ use { anyhow::{Context as _, Result, anyhow, ensure}, contracts::alloy::ERC20, ethrpc::Web3, - maplit::hashmap, shared::{ http_solver::model::TokenAmount, interaction::{EncodedInteraction, Interaction}, @@ -155,7 +154,7 @@ impl AllowanceManaging for AllowanceManager { Ok(fetch_allowances( self.web3.alloy.clone(), self.owner, - hashmap! { spender => tokens }, + HashMap::from([(spender, tokens)]), ) .await? .remove(&spender) From 0188a29597b7ac70459afeacebfcbb06f9213bc2 Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Mon, 26 Jan 2026 14:16:31 +0100 Subject: [PATCH 007/219] Remove useless logs (#4084) # Description Our services are extremely chatty which is annoying for debugging and overwhelms our logging infra. This PR removes or strips down logs that should not be needed. # Changes - removes huge structs like calldata, access lists, and duplicated transactions - calldata is still preserved where it matters most (when resimulating quotes, or in revert errors) - removes tempo items that needlessly get printed in every log of the respective trace (where it seemed useful I added 1 log that contained the data) - removes logs when solutions could not be merged (this is an optimistic optimization and solutions are not expected to always be mergeable) - downgraded some logs from `debug` to `trace` (the ones I think I never used for any debugging but on the surface level seemed like they might be useful eventually) - 404 errors from `/notify` requests --- crates/autopilot/src/infra/api.rs | 12 +--------- crates/autopilot/src/run_loop.rs | 14 ++++++++---- crates/autopilot/src/shadow.rs | 4 ++-- crates/driver/src/domain/competition/mod.rs | 11 +++------- .../src/domain/competition/solution/mod.rs | 22 +++++-------------- .../domain/competition/solution/settlement.rs | 6 +++-- .../domain/competition/solution/slippage.rs | 4 ++-- crates/driver/src/domain/eth/mod.rs | 15 ++----------- crates/driver/src/domain/quote.rs | 6 ++++- crates/driver/src/infra/observe/mod.rs | 7 +----- crates/driver/src/infra/solver/mod.rs | 6 +++-- crates/driver/src/util/http.rs | 2 +- crates/shared/src/price_estimation/mod.rs | 3 ++- crates/shared/src/trade_finding/mod.rs | 3 ++- 14 files changed, 44 insertions(+), 71 deletions(-) diff --git a/crates/autopilot/src/infra/api.rs b/crates/autopilot/src/infra/api.rs index d45789c671..d7c7477af5 100644 --- a/crates/autopilot/src/infra/api.rs +++ b/crates/autopilot/src/infra/api.rs @@ -83,17 +83,7 @@ async fn get_native_price( let start = Instant::now(); match state.estimator.estimate_native_price(token, timeout).await { - Ok(price) => { - let elapsed = start.elapsed(); - tracing::debug!( - ?token, - ?timeout, - ?elapsed, - ?price, - "estimated native token price" - ); - Json(NativeTokenPrice { price }).into_response() - } + Ok(price) => Json(NativeTokenPrice { price }).into_response(), Err(err) => { let elapsed = start.elapsed(); tracing::warn!( diff --git a/crates/autopilot/src/run_loop.rs b/crates/autopilot/src/run_loop.rs index f793d07c8a..6fce69cbc2 100644 --- a/crates/autopilot/src/run_loop.rs +++ b/crates/autopilot/src/run_loop.rs @@ -245,7 +245,7 @@ impl RunLoop { /// Sleeps until the next auction is supposed to start, builds it and /// returns it. - #[instrument(skip(self, prev_auction), fields(prev_auction = prev_auction.as_ref().map(|a| a.id)))] + #[instrument(skip_all)] async fn next_auction( &self, start_block: BlockInfo, @@ -264,7 +264,7 @@ impl RunLoop { return None; } - observe::log_auction_delta(&previous, &auction); + observe::log_auction_delta(&previous, &auction, &start_block); self.probes.liveness.auction(); Metrics::auction_ready(start_block.observed_at); Some(auction) @@ -311,7 +311,7 @@ impl RunLoop { }) } - #[instrument(skip_all, fields(auction_id = auction.id, auction_block = auction.block, auction_orders = auction.orders.len()))] + #[instrument(skip_all)] async fn single_run(self: &Arc, auction: domain::Auction) { let single_run_start = Instant::now(); tracing::info!(auction_id = ?auction.id, "solving"); @@ -1129,10 +1129,15 @@ pub mod observe { self, competition::{Unscored, winner_selection::Ranking}, }, + ethrpc::block_stream::BlockInfo, std::collections::HashSet, }; - pub fn log_auction_delta(previous: &Option, current: &domain::Auction) { + pub fn log_auction_delta( + previous: &Option, + current: &domain::Auction, + start_block: &BlockInfo, + ) { let previous_uids = match previous { Some(previous) => previous .orders @@ -1158,6 +1163,7 @@ pub mod observe { removed = ?removed, "Orders no longer in auction" ); + tracing::debug!(auction_id = current.id, ?start_block); } pub fn bids(bids: &[domain::competition::Bid]) { diff --git a/crates/autopilot/src/shadow.rs b/crates/autopilot/src/shadow.rs index 9198c21fd1..5c28857a7f 100644 --- a/crates/autopilot/src/shadow.rs +++ b/crates/autopilot/src/shadow.rs @@ -77,12 +77,12 @@ impl RunLoop { loop { // We use this as a synchronization mechanism to sync the run loop starts with // the next mined block - let _ = ethrpc::block_stream::next_block(&self.current_block).await; + let start_block = ethrpc::block_stream::next_block(&self.current_block).await; let Some(auction) = self.next_auction().await else { tokio::time::sleep(Duration::from_secs(1)).await; continue; }; - observe::log_auction_delta(&previous, &auction); + observe::log_auction_delta(&previous, &auction, &start_block); self.liveness.auction(); self.single_run(&auction) diff --git a/crates/driver/src/domain/competition/mod.rs b/crates/driver/src/domain/competition/mod.rs index 655ccc568f..869d51d683 100644 --- a/crates/driver/src/domain/competition/mod.rs +++ b/crates/driver/src/domain/competition/mod.rs @@ -768,14 +768,9 @@ fn merge( for solution in solutions.take(MAX_SOLUTIONS_TO_MERGE) { let mut extension = vec![]; for already_merged in merged.iter() { - match solution.merge(already_merged, max_orders_per_merged_solution) { - Ok(merged) => { - observe::merged(&solution, already_merged, &merged); - extension.push(merged); - } - Err(err) => { - observe::not_merged(&solution, already_merged, err); - } + if let Ok(merged) = solution.merge(already_merged, max_orders_per_merged_solution) { + observe::merged(&solution, already_merged, &merged); + extension.push(merged); } } diff --git a/crates/driver/src/domain/competition/solution/mod.rs b/crates/driver/src/domain/competition/solution/mod.rs index f637661f98..46404cc955 100644 --- a/crates/driver/src/domain/competition/solution/mod.rs +++ b/crates/driver/src/domain/competition/solution/mod.rs @@ -51,18 +51,22 @@ pub struct WrapperCall { /// A solution represents a set of orders which the solver has found an optimal /// way to settle. A [`Solution`] is generated by a solver as a response to a /// [`competition::Auction`]. See also [`settlement::Settlement`]. -#[derive(Clone)] +#[derive(derive_more::Debug, Clone)] pub struct Solution { id: Id, trades: Vec, prices: Prices, + #[debug(ignore)] pre_interactions: Vec, + #[debug(ignore)] interactions: Vec, + #[debug(ignore)] post_interactions: Vec, solver: Solver, weth: eth::WethAddress, gas: Option, flashloans: HashMap, + #[debug(ignore)] wrappers: Vec, } @@ -515,22 +519,6 @@ impl Solution { } } -impl std::fmt::Debug for Solution { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - f.debug_struct("Solution") - .field("id", &self.id) - .field("trades", &self.trades) - .field("prices", &self.prices) - .field("pre_interactions", &self.pre_interactions) - .field("interactions", &self.interactions) - .field("post_interactions", &self.post_interactions) - .field("solver", &self.solver.name()) - .field("gas", &self.gas) - .field("flashloans", &self.flashloans) - .finish() - } -} - /// Given two solutions returns the factors with /// which prices of the second solution would have to be multiplied so that the /// given token would have the same price in both solutions. diff --git a/crates/driver/src/domain/competition/solution/settlement.rs b/crates/driver/src/domain/competition/solution/settlement.rs index 26a4327fb5..a53cb14a0b 100644 --- a/crates/driver/src/domain/competition/solution/settlement.rs +++ b/crates/driver/src/domain/competition/solution/settlement.rs @@ -37,20 +37,22 @@ use { /// for the solver (earning reduced rewards). Enforcing these rules ensures that /// the settlement can be broadcast safely with high confidence that it will not /// be reverted and that it will not result in slashing for the solver. -#[derive(Debug, Clone)] +#[derive(derive_more::Debug, Clone)] pub struct Settlement { pub auction_id: auction::Id, /// The prepared on-chain transaction for this settlement transaction: SettlementTx, /// The gas parameters used by the settlement. pub gas: Gas, + #[debug(ignore)] solution: Solution, } -#[derive(Debug, Clone)] +#[derive(derive_more::Debug, Clone)] struct SettlementTx { /// Transaction with all internalizable interactions omitted internalized: eth::Tx, + #[debug(ignore)] /// Full Transaction without internalizing any interactions uninternalized: eth::Tx, /// Whether this settlement has interactions that could make it revert diff --git a/crates/driver/src/domain/competition/solution/slippage.rs b/crates/driver/src/domain/competition/solution/slippage.rs index acb30cb147..458a22648d 100644 --- a/crates/driver/src/domain/competition/solution/slippage.rs +++ b/crates/driver/src/domain/competition/solution/slippage.rs @@ -87,7 +87,7 @@ impl Parameters { .checked_div(&interaction.output.amount) .ok_or(super::error::Math::DivisionByZero)? } else { - tracing::warn!( + tracing::trace!( input_token = ?interaction.input.token, output_token = ?interaction.output.token, "unable to compute capped slippage; falling back to relative slippage", @@ -98,7 +98,7 @@ impl Parameters { .into() }; - tracing::debug!(?interaction, ?slippage, "applying slippage to liquidity",); + tracing::trace!(?interaction, ?slippage, "applying slippage to liquidity",); Ok(( MaxInput(eth::Asset { amount: interaction.input.amount + slippage, diff --git a/crates/driver/src/domain/eth/mod.rs b/crates/driver/src/domain/eth/mod.rs index f09d6db76c..53a72b84c4 100644 --- a/crates/driver/src/domain/eth/mod.rs +++ b/crates/driver/src/domain/eth/mod.rs @@ -364,27 +364,16 @@ pub enum TxStatus { } /// An onchain transaction. -#[derive(Clone)] +#[derive(derive_more::Debug, Clone)] pub struct Tx { pub from: Address, pub to: Address, pub value: Ether, pub input: Bytes>, + #[debug(ignore)] pub access_list: AccessList, } -impl std::fmt::Debug for Tx { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("Tx") - .field("from", &self.from) - .field("to", &self.to) - .field("value", &self.value) - .field("input", &self.input) - .field("access_list", &self.access_list) - .finish() - } -} - impl From for TransactionRequest { fn from(value: Tx) -> Self { TransactionRequest::default() diff --git a/crates/driver/src/domain/quote.rs b/crates/driver/src/domain/quote.rs index f2b5b33554..ad513d0bfc 100644 --- a/crates/driver/src/domain/quote.rs +++ b/crates/driver/src/domain/quote.rs @@ -20,15 +20,19 @@ use { }; /// A quote describing the expected outcome of an order. -#[derive(Debug)] +#[derive(derive_more::Debug)] pub struct Quote { pub clearing_prices: HashMap, + #[debug(ignore)] pub pre_interactions: Vec, + #[debug(ignore)] pub interactions: Vec, pub solver: eth::Address, pub gas: Option, /// Which `tx.origin` is required to make the quote simulation pass. + #[debug(ignore)] pub tx_origin: Option, + #[debug(ignore)] pub jit_orders: Vec, } diff --git a/crates/driver/src/infra/observe/mod.rs b/crates/driver/src/infra/observe/mod.rs index 0a6f232b5b..07e1b06a12 100644 --- a/crates/driver/src/infra/observe/mod.rs +++ b/crates/driver/src/infra/observe/mod.rs @@ -132,12 +132,7 @@ pub fn encoding_failed(solver: &solver::Name, id: &solution::Id, err: &solution: /// Observe that two solutions were merged. pub fn merged(first: &Solution, other: &Solution, result: &Solution) { - tracing::debug!(?first, ?other, ?result, "merged solutions"); -} - -/// Observe that it was not possible to merge two solutions. -pub fn not_merged(first: &Solution, other: &Solution, err: solution::error::Merge) { - tracing::debug!(?err, ?first, ?other, "solutions can't be merged"); + tracing::trace!(?first, ?other, ?result, "merged solutions"); } /// Observe that scoring is about to start. diff --git a/crates/driver/src/infra/solver/mod.rs b/crates/driver/src/infra/solver/mod.rs index 2dc592f5ed..1294375479 100644 --- a/crates/driver/src/infra/solver/mod.rs +++ b/crates/driver/src/infra/solver/mod.rs @@ -448,8 +448,10 @@ impl Solver { } let response_size = self.config.response_size_limit_max_bytes; let future = async move { - if let Err(error) = util::http::send(response_size, req).await { - tracing::warn!(?error, "failed to notify solver"); + if let Err(error) = util::http::send(response_size, req).await + && !matches!(error, util::http::Error::NotOk { code: 404, .. }) + { + tracing::debug!(?error, "failed to notify solver"); } }; tokio::task::spawn(future.in_current_span()); diff --git a/crates/driver/src/util/http.rs b/crates/driver/src/util/http.rs index 47b2891610..fee7493b61 100644 --- a/crates/driver/src/util/http.rs +++ b/crates/driver/src/util/http.rs @@ -31,7 +31,7 @@ pub async fn send(limit_bytes: usize, req: reqwest::RequestBuilder) -> Result Date: Mon, 26 Jan 2026 14:48:55 +0100 Subject: [PATCH 008/219] Fetch inflight orders from DB (#4087) # Description In order to avoid solver solutions conflicting with each other once a solution for an order was proposed it will get removed from the auction until its submission deadline has been reached. So far this was managed entirely in-memory which can lead to issues whenever the autopilot gets restarted. # Changes Since the DB scheme refactor a while ago we now have all the data we need to recover inflight orders from the DB. This PR replaces the in-memory inflight order handling by looking them up from the DB. To make the query fast enough I added an index on the deadline column on the `competition_auctions` table. With that the query takes ~0.1ms to look up 10 auctions worth of inflight orders.
execution plan ``` "Unique (cost=1352.80..1352.98 rows=35 width=57) (actual time=0.041..0.043 rows=1 loops=1)" " -> Sort (cost=1352.80..1352.89 rows=35 width=57) (actual time=0.040..0.041 rows=1 loops=1)" " Sort Key: pte.order_uid" " Sort Method: quicksort Memory: 25kB" " -> Nested Loop (cost=1.86..1351.90 rows=35 width=57) (actual time=0.028..0.033 rows=1 loops=1)" " -> Nested Loop Anti Join (cost=1.29..1339.25 rows=4 width=24) (actual time=0.023..0.028 rows=1 loops=1)" " Join Filter: (s.solution_uid = ps.uid)" " -> Nested Loop (cost=0.86..1171.92 rows=4 width=24) (actual time=0.013..0.020 rows=2 loops=1)" " -> Index Scan using competition_auction_deadline on competition_auctions ca (cost=0.43..11.96 rows=5 width=8) (actual time=0.005..0.006 rows=2 loops=1)" " Index Cond: (deadline > 24300390)" " -> Index Scan using proposed_solutions_pkey on proposed_solutions ps (cost=0.43..231.80 rows=19 width=16) (actual time=0.003..0.006 rows=1 loops=2)" " Index Cond: (auction_id = ca.id)" " Filter: is_winner" " Rows Removed by Filter: 8" " -> Index Scan using settlements_auction_id on settlements s (cost=0.43..41.69 rows=11 width=16) (actual time=0.003..0.003 rows=0 loops=2)" " Index Cond: (auction_id = ca.id)" " -> Index Only Scan using proposed_trade_executions_pkey on proposed_trade_executions pte (cost=0.56..3.15 rows=1 width=73) (actual time=0.004..0.004 rows=1 loops=1)" " Index Cond: ((auction_id = ps.auction_id) AND (solution_uid = ps.uid))" " Heap Fetches: 1" "Planning Time: 0.543 ms" "Execution Time: 0.079 ms" ```
## How to test added a new unit test for the DB query --- crates/autopilot/src/infra/persistence/mod.rs | 23 ++- crates/autopilot/src/run_loop.rs | 30 ++-- crates/database/src/solver_competition_v2.rs | 148 ++++++++++++++++++ database/README.md | 1 + database/sql/V100__add_auction_deadline.sql | 2 + 5 files changed, 182 insertions(+), 22 deletions(-) create mode 100644 database/sql/V100__add_auction_deadline.sql diff --git a/crates/autopilot/src/infra/persistence/mod.rs b/crates/autopilot/src/infra/persistence/mod.rs index e9b8dec947..b816358fda 100644 --- a/crates/autopilot/src/infra/persistence/mod.rs +++ b/crates/autopilot/src/infra/persistence/mod.rs @@ -21,7 +21,7 @@ use { SellTokenSource as DbSellTokenSource, SigningScheme as DbSigningScheme, }, - solver_competition_v2::{Order, Solution}, + solver_competition_v2::{self, Order, Solution}, }, domain::auction::order::{ BuyTokenDestination as DomainBuyTokenDestination, @@ -1024,6 +1024,27 @@ impl Persistence { .context("solver_competition::fetch_solver_winning_solutions")?, ) } + + /// Fetches orders which are currently inflight. Those orders should + /// be omitted from the current auction to avoid onchain reverts. + pub async fn fetch_in_flight_orders( + &self, + current_block: u64, + ) -> anyhow::Result> { + let _timer = Metrics::get() + .database_queries + .with_label_values(&["inflight_orders"]) + .start_timer(); + + let mut ex = self.postgres.pool.acquire().await.context("acquire")?; + let orders = + solver_competition_v2::fetch_in_flight_orders(&mut ex, current_block.cast_signed()) + .await?; + Ok(orders + .into_iter() + .map(|o| crate::domain::OrderUid(o.0)) + .collect()) + } } #[derive(prometheus_metric_storage::MetricStorage)] diff --git a/crates/autopilot/src/run_loop.rs b/crates/autopilot/src/run_loop.rs index 6fce69cbc2..937f50000f 100644 --- a/crates/autopilot/src/run_loop.rs +++ b/crates/autopilot/src/run_loop.rs @@ -3,7 +3,6 @@ use { database::competition::Competition, domain::{ self, - OrderUid, auction::Id, competition::{ self, @@ -53,7 +52,6 @@ use { }, time::{Duration, Instant}, }, - tokio::sync::Mutex, tracing::{Instrument, instrument}, }; @@ -83,7 +81,6 @@ pub struct RunLoop { solver_participation_guard: SolverParticipationGuard, solvable_orders_cache: Arc, trusted_tokens: AutoUpdatingTokenList, - in_flight_orders: Arc>>, probes: Probes, /// Maintenance tasks that should run before every runloop to have /// the most recent data available. @@ -126,7 +123,6 @@ impl RunLoop { solver_participation_guard, solvable_orders_cache, trusted_tokens, - in_flight_orders: Default::default(), probes, maintenance, competition_updates_sender, @@ -403,11 +399,6 @@ impl RunLoop { block_deadline: u64, ) { let solved_order_uids: HashSet<_> = solution.orders().keys().cloned().collect(); - self.in_flight_orders - .lock() - .await - .extend(solved_order_uids.clone()); - let solution_id = solution.id(); let solver = solution.solver(); let self_ = self.clone(); @@ -420,7 +411,6 @@ impl RunLoop { match self_ .settle( &driver_, - solved_order_uids.clone(), solver, auction_id, solution_id, @@ -734,11 +724,9 @@ impl RunLoop { /// Execute the solver's solution. Returns Ok when the corresponding /// transaction has been mined. - #[expect(clippy::too_many_arguments)] async fn settle( &self, driver: &infra::Driver, - solved_order_uids: HashSet, solver: eth::Address, auction_id: i64, solution_id: u64, @@ -794,12 +782,6 @@ impl RunLoop { self.store_execution_ended(solver, auction_id, solution_uid, &result); - // Clean up the in-flight orders regardless the result. - self.in_flight_orders - .lock() - .await - .retain(|order| !solved_order_uids.contains(order)); - result } @@ -913,13 +895,19 @@ impl RunLoop { Err(SettleError::Timeout) } - /// Removes orders that are currently being settled to avoid solvers trying - /// to fill an order a second time. + /// Removes orders that are currently being settled to avoid solver + /// solutions conflicting with each other. async fn remove_in_flight_orders( &self, mut auction: domain::RawAuctionData, ) -> domain::RawAuctionData { - let in_flight = &*self.in_flight_orders.lock().await; + let in_flight = self + .persistence + .fetch_in_flight_orders(auction.block) + .await + .inspect_err(|err| tracing::warn!(?err, "failed to fetch in-flight orders")) + .unwrap_or_default(); + if in_flight.is_empty() { return auction; }; diff --git a/crates/database/src/solver_competition_v2.rs b/crates/database/src/solver_competition_v2.rs index 0a61cd8382..11aefbcdc9 100644 --- a/crates/database/src/solver_competition_v2.rs +++ b/crates/database/src/solver_competition_v2.rs @@ -563,6 +563,35 @@ fn map_rows_to_solutions(rows: Vec) -> Result, sqlx:: Ok(solutions) } +/// Fetches all orders for which we must assume that there are +/// still onchain transactions being mined or submitted. +/// +/// Those are all orders (JIT or regular) that belong to winning +/// solutions with a deadline greater than the current block +/// where the execution actually has not been observed onchain yet. +pub async fn fetch_in_flight_orders( + ex: &mut PgConnection, + current_block: i64, +) -> Result, sqlx::Error> { + const QUERY: &str = r#" + SELECT DISTINCT order_uid + FROM competition_auctions ca + JOIN proposed_solutions ps ON ps.auction_id = ca.id + JOIN proposed_trade_executions pte ON pte.auction_id = ca.id AND pte.solution_uid = ps.uid + WHERE ca.deadline > $1 + AND ps.is_winner = true + AND NOT EXISTS ( + SELECT 1 FROM settlements s + WHERE s.auction_id = ca.id AND s.solution_uid = ps.uid + ); + "#; + + sqlx::query_as(QUERY) + .bind(current_block) + .fetch_all(ex) + .await +} + #[cfg(test)] mod tests { use { @@ -1242,4 +1271,123 @@ mod tests { assert_eq!(auction_participants.len(), 1); assert_eq!(auction_participants[0].participant, solutions[0].solver); } + + #[tokio::test] + #[ignore] + async fn postgres_fetch_inflight_orders() { + let mut db = PgConnection::connect("postgresql://").await.unwrap(); + let mut db = db.begin().await.unwrap(); + crate::clear_DANGER_(&mut db).await.unwrap(); + + let order_uid = |i| ByteArray([i; 56]); + let order = |i| Order { + uid: order_uid(i), + ..Default::default() + }; + let solutions = vec![ + Solution { + uid: 0, + id: 0.into(), + orders: vec![order(0)], + is_winner: true, + ..Default::default() + }, + Solution { + uid: 1, + id: 0.into(), + orders: vec![order(1)], + is_winner: true, + ..Default::default() + }, + ]; + crate::auction::save( + &mut db, + crate::auction::Auction { + id: 0, + block: 0, + deadline: 5, + order_uids: Default::default(), + price_tokens: Default::default(), + price_values: Default::default(), + surplus_capturing_jit_order_owners: Default::default(), + }, + ) + .await + .unwrap(); + save(&mut db, 0, &solutions).await.unwrap(); + + let solutions = vec![ + Solution { + uid: 2, + id: 1.into(), + orders: vec![order(2)], + is_winner: true, + ..Default::default() + }, + Solution { + uid: 3, + id: 1.into(), + orders: vec![order(3)], + is_winner: true, + ..Default::default() + }, + ]; + crate::auction::save( + &mut db, + crate::auction::Auction { + id: 1, + block: 5, + deadline: 10, + order_uids: Default::default(), + price_tokens: Default::default(), + price_values: Default::default(), + surplus_capturing_jit_order_owners: Default::default(), + }, + ) + .await + .unwrap(); + save(&mut db, 1, &solutions).await.unwrap(); + + // all orders in flight at block 4 + let early_block = fetch_in_flight_orders(&mut db, 4).await.unwrap(); + assert_eq!(early_block.len(), 4); + assert!( + [0, 1, 2, 3] + .into_iter() + .all(|id| early_block.contains(&order_uid(id))) + ); + + // only orders from the later auction in flight at block 5 + let later_block = fetch_in_flight_orders(&mut db, 5).await.unwrap(); + assert_eq!(later_block.len(), 2); + assert!( + [2, 3] + .into_iter() + .all(|id| later_block.contains(&order_uid(id))) + ); + + // observe settlement event + crate::events::insert_settlement( + &mut db, + &EventIndex { + block_number: 5, + log_index: 0, + }, + &Default::default(), + ) + .await + .unwrap(); + // associate with auction 1 + settlements::update_settlement_auction(&mut db, 5, 0, 1) + .await + .unwrap(); + // associate with solution 3 + settlements::update_settlement_solver(&mut db, 5, 0, Default::default(), 3) + .await + .unwrap(); + + // when an order gets marked as settled we dont consider it inflight anymore + let later_block_with_settlement = fetch_in_flight_orders(&mut db, 5).await.unwrap(); + assert_eq!(later_block_with_settlement, vec![order_uid(2)]); + } } diff --git a/database/README.md b/database/README.md index 04d5407171..8176269204 100644 --- a/database/README.md +++ b/database/README.md @@ -64,6 +64,7 @@ Contains all auctions for which a valid solver competition exists. Indexes: - PRIMARY KEY: btree(`id`) +- competition_auction_deadline: btree(`deadline`) ### ethflow\_orders diff --git a/database/sql/V100__add_auction_deadline.sql b/database/sql/V100__add_auction_deadline.sql new file mode 100644 index 0000000000..46e29b8ae3 --- /dev/null +++ b/database/sql/V100__add_auction_deadline.sql @@ -0,0 +1,2 @@ +-- adds index on the deadline of an auction to quickly look up inflight orders from the db +CREATE INDEX CONCURRENTLY competition_auction_deadline ON competition_auctions USING BTREE(deadline); From 03cad15cfc31cedfde79ba5751473deeaf31880a Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Tue, 27 Jan 2026 10:23:15 +0100 Subject: [PATCH 009/219] Stop enforcing body size limit (#4092) # Description The shadow competition broke because the driver is now rejecting `/solve` requests that are too large due to this new [code](https://github.com/cowprotocol/services/pull/4082/changes#diff-b997d6f696c5591860aef8658bb56d2a03fc4fa6b37b5e0432ce8e5e4e356aa9R61-R64). This was surprising to me because that code was added specifically because the new handler is bypassing the original content length limiting layer so I would have expected huge requests to already cause issues. During the investigation I confirmed using the `/solve` requests stored on S3 that recent auctions are indeed larger than. 10MB. Afterwards I spun up a driver locally and sent that solve request to the original code to confirm that it's indeed not throwing any errors. I further investigated and concluded that the issue is how we build the driver's http router. The size limiting layer is the first thing that gets added to the router but it should actually be the last. This caused the size limit to never go into effect. # Changes To resolve the issue quickly and remove this breaking change ASAP I simply removed the new size limiting logic from the `/solve` request. In a follow up PR I'll make the size limit configurable and fix the router. ## How to test manual test --- crates/driver/src/infra/api/mod.rs | 2 +- crates/driver/src/infra/api/routes/solve/mod.rs | 15 +++++++-------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/crates/driver/src/infra/api/mod.rs b/crates/driver/src/infra/api/mod.rs index 5b42fc88e5..afd3f27578 100644 --- a/crates/driver/src/infra/api/mod.rs +++ b/crates/driver/src/infra/api/mod.rs @@ -31,7 +31,7 @@ use { mod error; pub mod routes; -pub const REQUEST_BODY_LIMIT: usize = 10 * 1024 * 1024; +const REQUEST_BODY_LIMIT: usize = 10 * 1024 * 1024; pub struct Api { pub solvers: Vec, diff --git a/crates/driver/src/infra/api/routes/solve/mod.rs b/crates/driver/src/infra/api/routes/solve/mod.rs index 6ed72ffef5..b378dae435 100644 --- a/crates/driver/src/infra/api/routes/solve/mod.rs +++ b/crates/driver/src/infra/api/routes/solve/mod.rs @@ -5,7 +5,7 @@ use { crate::{ domain::competition, infra::{ - api::{Error, REQUEST_BODY_LIMIT, State}, + api::{Error, State}, observe, }, }, @@ -58,13 +58,12 @@ async fn collect_request_body( tracing::trace!("start streaming request body"); let start = Instant::now(); - // accepting the raw request bypasses axum's request body limiting layer - // so we have to manually ensure the body has a reasonable size. - let limited_body = http_body::Limited::new(request.into_body(), REQUEST_BODY_LIMIT); - let body_bytes = hyper::body::to_bytes(limited_body).await.map_err(|err| { - tracing::warn!(?err, "failed to stream request body"); - competition::Error::MalformedRequest - })?; + let body_bytes = hyper::body::to_bytes(request.into_body()) + .await + .map_err(|err| { + tracing::warn!(?err, "failed to stream request body"); + competition::Error::MalformedRequest + })?; let duration = start.elapsed(); Metrics::measure_solve_transfer_time(solver, duration); From de28ec157233dee6e96dec1d7ac55ac18de0b6e0 Mon Sep 17 00:00:00 2001 From: Marcin Szymczak Date: Tue, 27 Jan 2026 16:25:01 +0100 Subject: [PATCH 010/219] =?UTF-8?q?Revert=20"Optimize=20live=20orders=20qu?= =?UTF-8?q?eries=20based=20on=20confirmed=5Fvalid=5Fto=20colu=E2=80=A6=20(?= =?UTF-8?q?#4094)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description This reverts commit 7081b03f66c5154cc84ae93f79542425803b4639. (PR https://github.com/cowprotocol/services/pull/4055) The migrations will be revisited as they could not be applied to prod due to lockup and long duration. --- crates/database/src/orders.rs | 96 +++++++++++-------- database/README.md | 2 - .../sql/V098__set_true_valid_to_non_null.sql | 8 -- .../V099__create_true_valid_to_indexes.sql | 4 - 4 files changed, 54 insertions(+), 56 deletions(-) delete mode 100644 database/sql/V098__set_true_valid_to_non_null.sql delete mode 100644 database/sql/V099__create_true_valid_to_indexes.sql diff --git a/crates/database/src/orders.rs b/crates/database/src/orders.rs index 78e2d73b86..d1f77cb052 100644 --- a/crates/database/src/orders.rs +++ b/crates/database/src/orders.rs @@ -721,23 +721,26 @@ pub fn solvable_orders( /// - pending pre-signature /// - ethflow specific invalidation conditions const OPEN_ORDERS: &str = r#" - WITH live_orders AS MATERIALIZED ( + WITH live_orders AS ( SELECT o.* FROM orders o + LEFT JOIN ethflow_orders e ON e.uid = o.uid WHERE o.cancellation_timestamp IS NULL - AND o.true_valid_to >= $1 - AND NOT EXISTS (SELECT 1 FROM invalidations i WHERE i.order_uid = o.uid) - AND NOT EXISTS (SELECT 1 FROM onchain_order_invalidations oi WHERE oi.uid = o.uid) - AND NOT EXISTS (SELECT 1 FROM onchain_placed_orders op WHERE op.uid = o.uid AND op.placement_error IS NOT NULL) + AND o.valid_to >= $1 + AND (e.valid_to IS NULL OR e.valid_to >= $1) + AND NOT EXISTS (SELECT 1 FROM invalidations i WHERE i.order_uid = o.uid) + AND NOT EXISTS (SELECT 1 FROM onchain_order_invalidations oi WHERE oi.uid = o.uid) + AND NOT EXISTS (SELECT 1 FROM onchain_placed_orders op WHERE op.uid = o.uid + AND op.placement_error IS NOT NULL) ), trades_agg AS ( - SELECT t.order_uid, - SUM(t.buy_amount) AS sum_buy, - SUM(t.sell_amount) AS sum_sell, - SUM(t.fee_amount) AS sum_fee - FROM trades t - JOIN live_orders lo ON lo.uid = t.order_uid - GROUP BY t.order_uid + SELECT t.order_uid, + SUM(t.buy_amount) AS sum_buy, + SUM(t.sell_amount) AS sum_sell, + SUM(t.fee_amount) AS sum_fee + FROM trades t + JOIN live_orders lo ON lo.uid = t.order_uid + GROUP BY t.order_uid ) SELECT lo.uid, @@ -957,37 +960,46 @@ pub async fn user_orders_with_quote( owner: &Address, ) -> Result, sqlx::Error> { // Optimized version following the same pattern as OPEN_ORDERS + #[rustfmt::skip] const QUERY: &str = r#" - WITH live_orders AS MATERIALIZED ( - SELECT o.* - FROM orders o - WHERE o.cancellation_timestamp IS NULL - AND o.true_valid_to >= $1 - AND NOT EXISTS (SELECT 1 FROM invalidations i WHERE i.order_uid = o.uid) - AND NOT EXISTS (SELECT 1 FROM onchain_order_invalidations oi WHERE oi.uid = o.uid) - AND NOT EXISTS (SELECT 1 FROM onchain_placed_orders op WHERE op.uid = o.uid AND op.placement_error IS NOT NULL) - AND o.owner = $2 - AND o.class = 'limit' - ) - SELECT - o_quotes.sell_amount AS quote_sell_amount, - lo.sell_amount AS order_sell_amount, - o_quotes.buy_amount AS quote_buy_amount, - lo.buy_amount AS order_buy_amount, - lo.kind AS order_kind, - o_quotes.gas_amount AS quote_gas_amount, - o_quotes.gas_price AS quote_gas_price, - o_quotes.sell_token_price AS quote_sell_token_price - FROM live_orders lo - INNER JOIN order_quotes o_quotes ON lo.uid = o_quotes.order_uid - WHERE ( - lo.kind = 'sell' - AND COALESCE((SELECT SUM(sell_amount) FROM trades WHERE order_uid = lo.uid), 0) < lo.sell_amount - ) OR ( - lo.kind = 'buy' - AND COALESCE((SELECT SUM(buy_amount) FROM trades WHERE order_uid = lo.uid), 0) < lo.buy_amount - ); - "#; +WITH live_orders AS ( + SELECT o.* + FROM orders o + LEFT JOIN ethflow_orders e ON e.uid = o.uid + WHERE o.cancellation_timestamp IS NULL + AND o.valid_to >= $1 + AND (e.valid_to IS NULL OR e.valid_to >= $1) + AND NOT EXISTS (SELECT 1 FROM invalidations i WHERE i.order_uid = o.uid) + AND NOT EXISTS (SELECT 1 FROM onchain_order_invalidations oi WHERE oi.uid = o.uid) + AND NOT EXISTS (SELECT 1 FROM onchain_placed_orders op WHERE op.uid = o.uid + AND op.placement_error IS NOT NULL) + AND o.owner = $2 + AND o.class = 'limit' +), +trades_agg AS ( + SELECT t.order_uid, + SUM(t.buy_amount) AS sum_buy, + SUM(t.sell_amount) AS sum_sell, + SUM(t.fee_amount) AS sum_fee + FROM trades t + JOIN live_orders lo ON lo.uid = t.order_uid + GROUP BY t.order_uid +) +SELECT + o_quotes.sell_amount as quote_sell_amount, + lo.sell_amount as order_sell_amount, + o_quotes.buy_amount as quote_buy_amount, + lo.buy_amount as order_buy_amount, + lo.kind as order_kind, + o_quotes.gas_amount as quote_gas_amount, + o_quotes.gas_price as quote_gas_price, + o_quotes.sell_token_price as quote_sell_token_price +FROM live_orders lo +LEFT JOIN trades_agg ta ON ta.order_uid = lo.uid +INNER JOIN order_quotes o_quotes ON lo.uid = o_quotes.order_uid +WHERE ((lo.kind = 'sell' AND COALESCE(ta.sum_sell,0) < lo.sell_amount) OR + (lo.kind = 'buy' AND COALESCE(ta.sum_buy ,0) < lo.buy_amount)) +"#; sqlx::query_as::<_, OrderWithQuote>(QUERY) .bind(min_valid_to) .bind(owner) diff --git a/database/README.md b/database/README.md index 8176269204..32819cf1d5 100644 --- a/database/README.md +++ b/database/README.md @@ -273,8 +273,6 @@ Indexes: - order_sell_buy_tokens: btree(`sell_token`, `buy_token`) - user_order_creation_timestamp: btree(`owner`, `creation_timestamp` DESC) - version_idx: btree(`settlement_contract`) -- orders\_true\_valid\_to: btree(`true_valid_to`) -- okay\_onchain\_orders: btree(`uid`) WHERE placement\_error IS NOT NULL ### fee_policies diff --git a/database/sql/V098__set_true_valid_to_non_null.sql b/database/sql/V098__set_true_valid_to_non_null.sql deleted file mode 100644 index 76a0928527..0000000000 --- a/database/sql/V098__set_true_valid_to_non_null.sql +++ /dev/null @@ -1,8 +0,0 @@ --- migrate any remaining orders -UPDATE orders -SET true_valid_to = COALESCE( - (SELECT ethflow_orders.valid_to FROM ethflow_orders WHERE ethflow_orders.uid = orders.uid), - orders.valid_to -); --- at this point every order has the true_valid_to filled in -ALTER TABLE orders ALTER COLUMN true_valid_to SET NOT NULL; diff --git a/database/sql/V099__create_true_valid_to_indexes.sql b/database/sql/V099__create_true_valid_to_indexes.sql deleted file mode 100644 index 1b4754f29b..0000000000 --- a/database/sql/V099__create_true_valid_to_indexes.sql +++ /dev/null @@ -1,4 +0,0 @@ ---index on `true_valid_to` for quickly discarding expired orders -CREATE INDEX CONCURRENTLY orders_true_valid_to ON orders USING btree (true_valid_to); --- further drops the query from 100ms to 80ms (warmed cache) -CREATE INDEX CONCURRENTLY okay_onchain_orders ON onchain_placed_orders USING btree (uid) WHERE placement_error IS NOT NULL; From 8d6851c992879731ea2ed4c3c29bf19fc62d9e84 Mon Sep 17 00:00:00 2001 From: Marcin Szymczak Date: Wed, 28 Jan 2026 10:48:51 +0100 Subject: [PATCH 011/219] Add no-op placeholder migrations for the numbering to be continuous (#4096) # Description We needed to revert migrations V098, V099, and V097 was spelled wrongly (lowercase v). Since then V100 has been added and it makes flyway complain about missing interim migrations. Adding no-op migrations is enough to keep the continuity. # Changes Adds no-op migrations V097, V098 and V099 --- database/sql/V097__no_op_placeholder.sql | 1 + database/sql/V098__no_op_placeholder.sql | 1 + database/sql/V099__no_op_placeholder.sql | 1 + database/sql/v097__drop_old_cancellation_index.sql | 6 ------ 4 files changed, 3 insertions(+), 6 deletions(-) create mode 100644 database/sql/V097__no_op_placeholder.sql create mode 100644 database/sql/V098__no_op_placeholder.sql create mode 100644 database/sql/V099__no_op_placeholder.sql delete mode 100644 database/sql/v097__drop_old_cancellation_index.sql diff --git a/database/sql/V097__no_op_placeholder.sql b/database/sql/V097__no_op_placeholder.sql new file mode 100644 index 0000000000..e670655000 --- /dev/null +++ b/database/sql/V097__no_op_placeholder.sql @@ -0,0 +1 @@ +-- Migration was omitted to keep numbering continuity, for more information see PR #4096 \ No newline at end of file diff --git a/database/sql/V098__no_op_placeholder.sql b/database/sql/V098__no_op_placeholder.sql new file mode 100644 index 0000000000..e670655000 --- /dev/null +++ b/database/sql/V098__no_op_placeholder.sql @@ -0,0 +1 @@ +-- Migration was omitted to keep numbering continuity, for more information see PR #4096 \ No newline at end of file diff --git a/database/sql/V099__no_op_placeholder.sql b/database/sql/V099__no_op_placeholder.sql new file mode 100644 index 0000000000..e670655000 --- /dev/null +++ b/database/sql/V099__no_op_placeholder.sql @@ -0,0 +1 @@ +-- Migration was omitted to keep numbering continuity, for more information see PR #4096 \ No newline at end of file diff --git a/database/sql/v097__drop_old_cancellation_index.sql b/database/sql/v097__drop_old_cancellation_index.sql deleted file mode 100644 index 4615206ee2..0000000000 --- a/database/sql/v097__drop_old_cancellation_index.sql +++ /dev/null @@ -1,6 +0,0 @@ --- drop index over (creation_timestamp, cancellation timestamp) --- since it can only be used optimally for queries on the creation_timestamp --- and a new index over the cancellation timestamp was created in the previous --- migration -DROP INDEX IF EXISTS order_creation_cancellation; - From d2e2e2d5b6eaab7c9bd869b1e64bab361e59eb82 Mon Sep 17 00:00:00 2001 From: Marcin Szymczak Date: Wed, 28 Jan 2026 15:50:38 +0100 Subject: [PATCH 012/219] Rewrite migration V100 to be optional (#4098) # Description The migration V100 creates index on competition_auction_deadline on competition_auctions. To make the prod deployment viable it needs to be optional (IF NOT EXISTS) which will enable to apply it manually beforehand. # Changes Update the V100 migration to specify IF NOT EXISTS. ## How to test Will apply the migration manually and deploy on staging to verify. --- database/sql/V100__add_auction_deadline.sql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/sql/V100__add_auction_deadline.sql b/database/sql/V100__add_auction_deadline.sql index 46e29b8ae3..629aa3cfc8 100644 --- a/database/sql/V100__add_auction_deadline.sql +++ b/database/sql/V100__add_auction_deadline.sql @@ -1,2 +1,2 @@ -- adds index on the deadline of an auction to quickly look up inflight orders from the db -CREATE INDEX CONCURRENTLY competition_auction_deadline ON competition_auctions USING BTREE(deadline); +CREATE INDEX CONCURRENTLY IF NOT EXISTS competition_auction_deadline ON competition_auctions USING BTREE(deadline); From 3fefcc03489535a3b2e1efef8d7979f9163a7571 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Duarte?= Date: Wed, 28 Jan 2026 14:52:55 +0000 Subject: [PATCH 013/219] Move order outside market log to callsites (#4090) # Description The log inside the unwrap does not provide an actionable info, the lack or order ID, from address, quote ID, make it extremely hard to follow up on. More context in https://cowservices.slack.com/archives/C0375NV72SC/p1769440848303459 # Changes - [ ] Remove the log from the unwrap - [ ] Place it in the (seemingly) more relevant callsites ## How to test NA --- crates/shared/src/order_validation.rs | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/crates/shared/src/order_validation.rs b/crates/shared/src/order_validation.rs index 70eb6897d3..9db1e46594 100644 --- a/crates/shared/src/order_validation.rs +++ b/crates/shared/src/order_validation.rs @@ -755,6 +755,7 @@ impl OrderValidating for OrderValidator { }, data.kind, ) { + tracing::debug!(%uid, ?owner, ?class, "order being flagged as outside market price"); self.check_max_limit_orders(owner).await?; } (class, Some(quote)) @@ -786,6 +787,7 @@ impl OrderValidating for OrderValidator { }, data.kind, ) { + tracing::debug!(%uid, ?owner, ?class, "order being flagged as outside market price"); self.check_max_limit_orders(owner).await?; } (OrderClass::Limit, None) @@ -1002,14 +1004,7 @@ pub fn is_order_outside_market_price( } }; - check().unwrap_or_else(|| { - tracing::warn!( - ?order, - ?quote, - "failed to check if order is outside market price" - ); - true - }) + check().unwrap_or(true) } pub struct InvalidSigningScheme; From ff38a3eb1801c52df891c5daeba31c327eccfaef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Duarte?= Date: Wed, 28 Jan 2026 15:14:14 +0000 Subject: [PATCH 014/219] Add configurable database connection pool size (#4097) # Description We had an incident where latency increase due to queries waiting for available connections. This PR provides a configuration for that. Adds `--db-max-connections` (env: DB_MAX_CONNECTIONS) flag to configure the maximum database pool size. Default is 10. # Changes - [ ] New config for DB connection pool size - [ ] Add it to autopilot, orderbook, refunder ## How to test E2E + staging (?) --------- Co-authored-by: Claude --- crates/autopilot/src/arguments.rs | 9 +++-- crates/autopilot/src/database/mod.rs | 34 ++++++++++++++----- .../src/database/onchain_order_events/mod.rs | 6 +--- crates/autopilot/src/run.rs | 12 +++++-- crates/orderbook/src/arguments.rs | 5 +++ crates/orderbook/src/database/mod.rs | 25 ++++++++++++-- crates/orderbook/src/database/orders.rs | 12 +++---- .../src/database/solver_competition.rs | 2 +- crates/orderbook/src/orderbook.rs | 3 +- crates/orderbook/src/run.rs | 10 ++++-- crates/refunder/src/arguments.rs | 5 +++ crates/refunder/src/lib.rs | 7 ++-- crates/shared/src/arguments.rs | 19 ++++++++++- 13 files changed, 113 insertions(+), 36 deletions(-) diff --git a/crates/autopilot/src/arguments.rs b/crates/autopilot/src/arguments.rs index 193be1a5a9..1d0b89897d 100644 --- a/crates/autopilot/src/arguments.rs +++ b/crates/autopilot/src/arguments.rs @@ -1,5 +1,5 @@ use { - crate::infra, + crate::{database::INSERT_BATCH_SIZE_DEFAULT, infra}, alloy::primitives::{Address, U256}, anyhow::{Context, anyhow, ensure}, chrono::{DateTime, Utc}, @@ -37,6 +37,9 @@ pub struct Arguments { #[clap(flatten)] pub price_estimation: price_estimation::Arguments, + #[clap(flatten)] + pub database_pool: shared::arguments::DatabasePoolConfig, + /// Address of the ethflow contracts. If not specified, eth-flow orders are /// disabled. /// In general, one contract is sufficient for the service to function. @@ -69,7 +72,7 @@ pub struct Arguments { pub db_write_url: Url, /// The number of order events to insert in a single batch. - #[clap(long, env, default_value = "500")] + #[clap(long, env, default_value_t = INSERT_BATCH_SIZE_DEFAULT)] pub insert_batch_size: NonZeroUsize, /// Skip syncing past events (useful for local deployments) @@ -370,6 +373,7 @@ impl std::fmt::Display for Arguments { http_client, token_owner_finder, price_estimation, + database_pool, tracing_node_url, ethflow_contracts, ethflow_indexing_start, @@ -418,6 +422,7 @@ impl std::fmt::Display for Arguments { write!(f, "{http_client}")?; write!(f, "{token_owner_finder}")?; write!(f, "{price_estimation}")?; + write!(f, "{database_pool}")?; display_option(f, "tracing_node_url", tracing_node_url)?; writeln!(f, "ethflow_contracts: {ethflow_contracts:?}")?; writeln!(f, "ethflow_indexing_start: {ethflow_indexing_start:?}")?; diff --git a/crates/autopilot/src/database/mod.rs b/crates/autopilot/src/database/mod.rs index f6a981c5ee..98f7c4bbb6 100644 --- a/crates/autopilot/src/database/mod.rs +++ b/crates/autopilot/src/database/mod.rs @@ -1,7 +1,11 @@ use { num::ToPrimitive, - sqlx::{Executor, PgConnection, PgPool}, - std::{num::NonZeroUsize, time::Duration}, + shared::arguments::DB_MAX_CONNECTIONS_DEFAULT, + sqlx::{Executor, PgConnection, PgPool, postgres::PgPoolOptions}, + std::{ + num::{NonZeroU32, NonZeroUsize}, + time::Duration, + }, tracing::Instrument, }; @@ -15,9 +19,21 @@ pub mod onchain_order_events; pub mod order_events; mod quotes; +pub const INSERT_BATCH_SIZE_DEFAULT: NonZeroUsize = NonZeroUsize::new(500).unwrap(); + #[derive(Debug, Clone)] pub struct Config { pub insert_batch_size: NonZeroUsize, + pub max_pool_size: NonZeroU32, +} + +impl Default for Config { + fn default() -> Self { + Self { + insert_batch_size: INSERT_BATCH_SIZE_DEFAULT, + max_pool_size: DB_MAX_CONNECTIONS_DEFAULT, + } + } } #[derive(Debug, Clone)] @@ -27,15 +43,15 @@ pub struct Postgres { } impl Postgres { - pub async fn new(url: &str, insert_batch_size: NonZeroUsize) -> sqlx::Result { - let pool = PgPool::connect(url).await?; + pub async fn new(url: &str, config: Config) -> sqlx::Result { + let pool = PgPoolOptions::new() + .max_connections(config.max_pool_size.get()) + .connect(url) + .await?; Self::start_db_metrics_job(pool.clone()); - Ok(Self { - pool, - config: Config { insert_batch_size }, - }) + Ok(Self { pool, config }) } fn start_db_metrics_job(pool: PgPool) { @@ -57,7 +73,7 @@ impl Postgres { } pub async fn with_defaults() -> sqlx::Result { - Self::new("postgresql://", NonZeroUsize::new(500).unwrap()).await + Self::new("postgresql://", Default::default()).await } pub async fn update_database_metrics(&self) -> sqlx::Result<()> { diff --git a/crates/autopilot/src/database/onchain_order_events/mod.rs b/crates/autopilot/src/database/onchain_order_events/mod.rs index d4af7be9b1..bf4d352aff 100644 --- a/crates/autopilot/src/database/onchain_order_events/mod.rs +++ b/crates/autopilot/src/database/onchain_order_events/mod.rs @@ -780,7 +780,6 @@ mod test { use { super::*, - crate::database::Config, alloy::primitives::U256, contracts::alloy::CoWSwapOnchainOrders, database::{byte_array::ByteArray, onchain_broadcasted_orders::OnchainOrderPlacement}, @@ -802,7 +801,6 @@ mod test { order_quoting::{MockOrderQuoting, Quote, QuoteData}, }, sqlx::PgPool, - std::num::NonZeroUsize, }; #[test] @@ -1250,9 +1248,7 @@ mod test { let onchain_order_parser = OnchainOrderParser { db: Postgres { pool: PgPool::connect_lazy("postgresql://").unwrap(), - config: Config { - insert_batch_size: NonZeroUsize::new(500).unwrap(), - }, + config: Default::default(), }, trampoline: HooksTrampoline::Instance::deployed(&web3.alloy) .await diff --git a/crates/autopilot/src/run.rs b/crates/autopilot/src/run.rs index 14e6ac8731..38ba0bd965 100644 --- a/crates/autopilot/src/run.rs +++ b/crates/autopilot/src/run.rs @@ -163,9 +163,15 @@ pub async fn start(args: impl Iterator) { /// Assumes tracing and metrics registry have already been set up. pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { assert!(args.shadow.is_none(), "cannot run in shadow mode"); - let db_write = Postgres::new(args.db_write_url.as_str(), args.insert_batch_size) - .await - .unwrap(); + let db_write = Postgres::new( + args.db_write_url.as_str(), + crate::database::Config { + insert_batch_size: args.insert_batch_size, + max_pool_size: args.database_pool.db_max_connections, + }, + ) + .await + .unwrap(); // If the DB is in read-only mode, running ANALYZE is not possible and will // trigger and error https://www.postgresql.org/docs/current/hot-standby.html diff --git a/crates/orderbook/src/arguments.rs b/crates/orderbook/src/arguments.rs index f788d3541a..a5dcad6de6 100644 --- a/crates/orderbook/src/arguments.rs +++ b/crates/orderbook/src/arguments.rs @@ -28,6 +28,9 @@ pub struct Arguments { #[clap(flatten)] pub price_estimation: price_estimation::Arguments, + #[clap(flatten)] + pub database_pool: shared::arguments::DatabasePoolConfig, + /// A tracing Ethereum node URL to connect to, allowing a separate node URL /// to be used exclusively for tracing calls. #[clap(long, env)] @@ -181,6 +184,7 @@ impl std::fmt::Display for Arguments { http_client, token_owner_finder, price_estimation, + database_pool, tracing_node_url, bind_address, min_order_validity_period, @@ -211,6 +215,7 @@ impl std::fmt::Display for Arguments { write!(f, "{http_client}")?; write!(f, "{token_owner_finder}")?; write!(f, "{price_estimation}")?; + write!(f, "{database_pool}")?; display_option(f, "tracing_node_url", tracing_node_url)?; writeln!(f, "bind_address: {bind_address}")?; let _intentionally_ignored = db_url; diff --git a/crates/orderbook/src/database/mod.rs b/crates/orderbook/src/database/mod.rs index fa6f1092df..76a44ad7cc 100644 --- a/crates/orderbook/src/database/mod.rs +++ b/crates/orderbook/src/database/mod.rs @@ -14,26 +14,45 @@ use { anyhow::Result, database::byte_array::ByteArray, model::order::Order, - sqlx::{PgConnection, PgPool}, + shared::arguments::DB_MAX_CONNECTIONS_DEFAULT, + sqlx::{PgConnection, PgPool, postgres::PgPoolOptions}, }; // TODO: There is remaining optimization potential by implementing sqlx encoding // and decoding for U256 directly instead of going through BigDecimal. This is // not very important as this is fast enough anyway. +#[derive(Debug, Clone)] +pub struct Config { + pub max_pool_size: u32, +} + +impl Default for Config { + fn default() -> Self { + Self { + // Match SQLx default pool size + max_pool_size: DB_MAX_CONNECTIONS_DEFAULT.get(), + } + } +} + // The pool uses an Arc internally. #[derive(Clone)] pub struct Postgres { pub pool: PgPool, + pub config: Config, } // The implementation is split up into several modules which contain more public // methods. impl Postgres { - pub fn try_new(uri: &str) -> Result { + pub fn try_new(uri: &str, config: Config) -> Result { Ok(Self { - pool: PgPool::connect_lazy(uri)?, + pool: PgPoolOptions::new() + .max_connections(config.max_pool_size) + .connect_lazy(uri)?, + config, }) } diff --git a/crates/orderbook/src/database/orders.rs b/crates/orderbook/src/database/orders.rs index aa115e33dc..067f444a66 100644 --- a/crates/orderbook/src/database/orders.rs +++ b/crates/orderbook/src/database/orders.rs @@ -867,7 +867,7 @@ mod tests { async fn postgres_replace_order() { let owner = Address::repeat_byte(0x77); - let db = Postgres::try_new("postgresql://").unwrap(); + let db = Postgres::try_new("postgresql://", Default::default()).unwrap(); database::clear_DANGER(&db.pool).await.unwrap(); let old_order = Order { @@ -933,7 +933,7 @@ mod tests { async fn postgres_replace_order_no_cancellation_on_error() { let owner = Address::repeat_byte(0x77); - let db = Postgres::try_new("postgresql://").unwrap(); + let db = Postgres::try_new("postgresql://", Default::default()).unwrap(); database::clear_DANGER(&db.pool).await.unwrap(); let old_order = Order { @@ -977,7 +977,7 @@ mod tests { #[tokio::test] #[ignore] async fn postgres_presignature_status() { - let db = Postgres::try_new("postgresql://").unwrap(); + let db = Postgres::try_new("postgresql://", Default::default()).unwrap(); database::clear_DANGER(&db.pool).await.unwrap(); let uid = OrderUid([0u8; 56]); let order = Order { @@ -1050,7 +1050,7 @@ mod tests { #[tokio::test] #[ignore] async fn postgres_cancel_orders() { - let db = Postgres::try_new("postgresql://").unwrap(); + let db = Postgres::try_new("postgresql://", Default::default()).unwrap(); database::clear_DANGER(&db.pool).await.unwrap(); // Define some helper closures to make the test easier to read. @@ -1099,7 +1099,7 @@ mod tests { #[tokio::test] #[ignore] async fn postgres_insert_orders_with_interactions() { - let db = Postgres::try_new("postgresql://").unwrap(); + let db = Postgres::try_new("postgresql://", Default::default()).unwrap(); database::clear_DANGER(&db.pool).await.unwrap(); let interaction = |byte: u8| InteractionData { @@ -1156,7 +1156,7 @@ mod tests { #[tokio::test] #[ignore] async fn postgres_insert_orders_with_interactions_and_verified() { - let db = Postgres::try_new("postgresql://").unwrap(); + let db = Postgres::try_new("postgresql://", Default::default()).unwrap(); database::clear_DANGER(&db.pool).await.unwrap(); let quote = Quote { diff --git a/crates/orderbook/src/database/solver_competition.rs b/crates/orderbook/src/database/solver_competition.rs index 8aba3a057d..a04ece3e1d 100644 --- a/crates/orderbook/src/database/solver_competition.rs +++ b/crates/orderbook/src/database/solver_competition.rs @@ -124,7 +124,7 @@ mod tests { #[tokio::test] #[ignore] async fn not_found_error() { - let db = Postgres::try_new("postgresql://").unwrap(); + let db = Postgres::try_new("postgresql://", Default::default()).unwrap(); database::clear_DANGER(&db.pool).await.unwrap(); let result = db diff --git a/crates/orderbook/src/orderbook.rs b/crates/orderbook/src/orderbook.rs index 3c77c6515a..2b91f941b1 100644 --- a/crates/orderbook/src/orderbook.rs +++ b/crates/orderbook/src/orderbook.rs @@ -684,7 +684,8 @@ mod tests { )) }); - let database = crate::database::Postgres::try_new("postgresql://").unwrap(); + let database = + crate::database::Postgres::try_new("postgresql://", Default::default()).unwrap(); database::clear_DANGER(&database.pool).await.unwrap(); database.insert_order(&old_order).await.unwrap(); diff --git a/crates/orderbook/src/run.rs b/crates/orderbook/src/run.rs index 9229cd8b20..04e65e5fd3 100644 --- a/crates/orderbook/src/run.rs +++ b/crates/orderbook/src/run.rs @@ -172,13 +172,17 @@ pub async fn run(args: Arguments) { .await .expect("Deployed contract constants don't match the ones in this binary"); let domain_separator = DomainSeparator::new(chain_id, *settlement_contract.address()); - let postgres_write = - Postgres::try_new(args.db_write_url.as_str()).expect("failed to create database"); + let db_config = crate::database::Config { + max_pool_size: args.database_pool.db_max_connections.get(), + }; + let postgres_write = Postgres::try_new(args.db_write_url.as_str(), db_config.clone()) + .expect("failed to create database"); let postgres_read = if let Some(db_read_url) = args.db_read_url && args.db_write_url != db_read_url { - Postgres::try_new(db_read_url.as_str()).expect("failed to create read replica databaseR") + Postgres::try_new(db_read_url.as_str(), db_config) + .expect("failed to create read replica database") } else { postgres_write.clone() }; diff --git a/crates/refunder/src/arguments.rs b/crates/refunder/src/arguments.rs index 2dccf273cb..5e3c09d9bc 100644 --- a/crates/refunder/src/arguments.rs +++ b/crates/refunder/src/arguments.rs @@ -19,6 +19,9 @@ pub struct Arguments { #[clap(flatten)] pub logging: LoggingArguments, + #[clap(flatten)] + pub database_pool: shared::arguments::DatabasePoolConfig, + /// Minimum time in seconds an order must have been valid for /// to be eligible for refunding #[clap( @@ -94,6 +97,7 @@ impl std::fmt::Display for Arguments { ethflow_contracts, metrics_port, logging, + database_pool, db_url, refunder_pk, max_gas_price, @@ -104,6 +108,7 @@ impl std::fmt::Display for Arguments { write!(f, "{http_client}")?; write!(f, "{ethrpc}")?; write!(f, "{logging}")?; + write!(f, "{database_pool}")?; writeln!(f, "min_validity_duration: {min_validity_duration:?}")?; writeln!(f, "min_price_deviation_bps: {min_price_deviation_bps}")?; let _intentionally_ignored = db_url; diff --git a/crates/refunder/src/lib.rs b/crates/refunder/src/lib.rs index a3132063db..4fc9a59675 100644 --- a/crates/refunder/src/lib.rs +++ b/crates/refunder/src/lib.rs @@ -12,7 +12,7 @@ use { observe::metrics::LivenessChecking, refund_service::RefundService, shared::http_client::HttpClientFactory, - sqlx::PgPool, + sqlx::postgres::PgPoolOptions, std::{ sync::{Arc, RwLock}, time::{Duration, Instant}, @@ -54,7 +54,10 @@ pub async fn run(args: arguments::Arguments) { ); } - let pg_pool = PgPool::connect_lazy(args.db_url.as_str()).expect("failed to create database"); + let pg_pool = PgPoolOptions::new() + .max_connections(args.database_pool.db_max_connections.get()) + .connect_lazy(args.db_url.as_str()) + .expect("failed to create database"); let liveness = Arc::new(Liveness { // Program will be healthy at the start even if no loop was ran yet. diff --git a/crates/shared/src/arguments.rs b/crates/shared/src/arguments.rs index 59e72a6d69..46f542e195 100644 --- a/crates/shared/src/arguments.rs +++ b/crates/shared/src/arguments.rs @@ -13,7 +13,7 @@ use { std::{ collections::HashSet, fmt::{self, Display, Formatter}, - num::NonZeroU64, + num::{NonZeroU32, NonZeroU64}, str::FromStr, time::Duration, }, @@ -126,6 +126,23 @@ pub fn tracing_config(args: &TracingArguments, service_name: String) -> Option 0 +pub const DB_MAX_CONNECTIONS_DEFAULT: NonZeroU32 = NonZeroU32::new(10).unwrap(); + +#[derive(Debug, Clone, clap::Parser)] +pub struct DatabasePoolConfig { + /// Maximum number of connections in the database connection pool. + #[clap(long, env, default_value_t = DB_MAX_CONNECTIONS_DEFAULT)] + pub db_max_connections: NonZeroU32, +} + +impl Display for DatabasePoolConfig { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + writeln!(f, "db_max_connections: {}", self.db_max_connections) + } +} + #[derive(clap::Parser)] #[group(skip)] pub struct Arguments { From 2507e915d509c08b58e21394b9062a6cfa68476f Mon Sep 17 00:00:00 2001 From: Marcin Szymczak Date: Wed, 28 Jan 2026 16:53:42 +0100 Subject: [PATCH 015/219] Fix migrations introducing indexes on true_valid_to (#4095) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description Reinstates https://github.com/cowprotocol/services/pull/4055 with improved migrations that should successfully apply to prod. The previous migrations tended to lock-up indefinitely on prod database when running UPDATE on all rows in the `orders` table to ensure `true_valid_to` is not null. This was done as an additional safety layer as these rows have been manually backfilled previously, so it is no longer needed and turned out to be problematic. Additionally, the index creation can take more time than is allowed for release deployment which causes them to be aborted. It is easier to create them manually and have the migration CREATE INDEX IF NOT EXISTS. # Changes - Had to move migrations from 098, 099 to 101, 102 as there was a migration 100 merged in the meantime. - Moves migration 098 to 101. Removes conservative backfill of empty `true_valid_to` which caused a lock-up on the prod database. - Moves migration 099 to 102 Makes index creation optional (CREATE INDEX IF NOT EXISTS) as they will be created manually, to ensure smooth deployment. --------- Co-authored-by: ilya Co-authored-by: José Duarte Co-authored-by: Claude Co-authored-by: Martin Magnus --- database/sql/V101__set_true_valid_to_non_null.sql | 1 + database/sql/V102__create_true_valid_to_indexes.sql | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 database/sql/V101__set_true_valid_to_non_null.sql create mode 100644 database/sql/V102__create_true_valid_to_indexes.sql diff --git a/database/sql/V101__set_true_valid_to_non_null.sql b/database/sql/V101__set_true_valid_to_non_null.sql new file mode 100644 index 0000000000..a75a1afb7a --- /dev/null +++ b/database/sql/V101__set_true_valid_to_non_null.sql @@ -0,0 +1 @@ +ALTER TABLE orders ALTER COLUMN true_valid_to SET NOT NULL; diff --git a/database/sql/V102__create_true_valid_to_indexes.sql b/database/sql/V102__create_true_valid_to_indexes.sql new file mode 100644 index 0000000000..66baebc5be --- /dev/null +++ b/database/sql/V102__create_true_valid_to_indexes.sql @@ -0,0 +1,4 @@ +--index on `true_valid_to` for quickly discarding expired orders +CREATE INDEX CONCURRENTLY IF NOT EXISTS orders_true_valid_to ON orders USING btree (true_valid_to); +-- further drops the query from 100ms to 80ms (warmed cache) +CREATE INDEX CONCURRENTLY IF NOT EXISTS okay_onchain_orders ON onchain_placed_orders USING btree (uid) WHERE placement_error IS NOT NULL; From ff190ad02d91a87da0cf30167b030bd4f6f0cdfc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Duarte?= Date: Thu, 29 Jan 2026 09:39:44 +0000 Subject: [PATCH 016/219] Migrate gas estimation to match alloy's type (#4054) > [!CAUTION] > Review with care! The changes are non trivial and there is one breaking change, the gas price returned by the driver no longer includes the base fee! # Description Refactors gas price handling to use integer arithmetic and alloy's native types instead of floating-point calculations. This eliminates precision loss in gas price calculations and better aligns with alloy's conventions. The removal of the base fee is not a true removal, before this change, the base fee was either 0 or the max value available, both leading to inaccurate results. The new code removes the base fee from the type that was being used to describe the estimations (because the base_fee isn't an estimate, it comes in the previous block); but starts querying the chain for the latest block so it's able to get the proper base_fee (if available). The gas estimates themselves should suffer a big change (since the estimate code is the same) but the effective gas price should become more accurate due to the inclusion of the base fee in the calculations. # Changes - Replace custom GasPrice1559 with alloy's Eip1559Estimation throughout codebase - Change GasPrice::base from FeePerGas to Option to match alloy's base fee representation - Migrate gas calculations from f64 to u128/U256 integer arithmetic - Implement calc_effective_gas_price from alloy for effective gas price calculations - Add base_fee: Option to BlockInfo for proper EIP-1559 support - Update API responses to return u128 directly instead of wrapped U256 - Add scaling helper methods (scaled_by_pct, scaled_by_pml) for clearer gas price adjustments # How to test > [!NOTE] > Tested on staging, starting at Mon, 19 Jan 2026 12:10:18 +0000. > Performed a successful trade: https://staging.explorer.cow.fi/lens/orders/0x06677572a2715cc28241a34f5d669247fba167c8d9adc3fcd338e40a3c52ea4109fbad1ea29c36dfe4f8f7baa87c5edf85e0d9f3696e28f5 1. Run existing test suite: cargo test 2. Verify gas price estimation endpoints return expected values 3. Test refunder gas price calculations with various scenarios (new tx, replacement tx, max gas price limits) 4. Verify settlement submissions use correct gas parameters --------- Co-authored-by: Claude Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- .../driver/src/domain/competition/auction.rs | 13 +- .../domain/competition/solution/settlement.rs | 9 +- crates/driver/src/domain/eth/gas.rs | 30 +-- crates/driver/src/domain/mempools.rs | 37 ++-- .../driver/src/infra/api/routes/gasprice.rs | 31 +-- crates/driver/src/infra/blockchain/gas.rs | 35 +++- crates/driver/src/infra/blockchain/mod.rs | 15 +- crates/driver/src/infra/mempool/mod.rs | 20 +- crates/driver/src/tests/setup/solver.rs | 14 +- crates/ethrpc/src/block_stream/mod.rs | 19 +- crates/refunder/src/submitter.rs | 84 ++++---- crates/shared/src/gas_price.rs | 37 +++- .../configurable_alloy.rs | 24 ++- .../shared/src/gas_price_estimation/driver.rs | 54 +++-- .../src/gas_price_estimation/eth_node.rs | 27 ++- .../shared/src/gas_price_estimation/fake.rs | 23 ++- crates/shared/src/gas_price_estimation/mod.rs | 53 ++++- .../shared/src/gas_price_estimation/price.rs | 192 ------------------ .../src/gas_price_estimation/priority.rs | 47 ++--- crates/shared/src/order_quoting.rs | 44 ++-- .../src/price_estimation/competition/quote.rs | 16 +- 21 files changed, 334 insertions(+), 490 deletions(-) delete mode 100644 crates/shared/src/gas_price_estimation/price.rs diff --git a/crates/driver/src/domain/competition/auction.rs b/crates/driver/src/domain/competition/auction.rs index c91779d029..10bc830db7 100644 --- a/crates/driver/src/domain/competition/auction.rs +++ b/crates/driver/src/domain/competition/auction.rs @@ -2,12 +2,13 @@ use { crate::{ domain::{ competition::{self}, - eth, + eth::{self, GasPrice}, liquidity, time, }, infra::{Ethereum, blockchain, solver::Timeouts}, }, + alloy::primitives::U256, std::collections::{HashMap, HashSet}, thiserror::Error, }; @@ -56,11 +57,19 @@ impl Auction { true }); + let gas_est = eth.gas_price().await?; + let base_fee = eth.current_block().borrow().base_fee; + let gas_price = GasPrice::new( + U256::from(gas_est.max_fee_per_gas).into(), + U256::from(gas_est.max_priority_fee_per_gas).into(), + base_fee, + ); + Ok(Self { id, orders, tokens, - gas_price: eth.gas_price().await?, + gas_price, deadline, surplus_capturing_jit_order_owners, }) diff --git a/crates/driver/src/domain/competition/solution/settlement.rs b/crates/driver/src/domain/competition/solution/settlement.rs index a53cb14a0b..63bf90be3c 100644 --- a/crates/driver/src/domain/competition/solution/settlement.rs +++ b/crates/driver/src/domain/competition/solution/settlement.rs @@ -12,6 +12,7 @@ use { }, infra::{Simulator, blockchain::Ethereum, observe, solver::ManageNativeToken}, }, + alloy::primitives::U256, futures::future::try_join_all, std::collections::{BTreeSet, HashMap, HashSet}, tracing::instrument, @@ -176,7 +177,9 @@ impl Settlement { // Ensure that the solver has sufficient balance for the settlement to be mined // even if the gas price keeps climbing during the tx submission. - let required_eth_balance = gas.required_balance(price * 2.); + let required_eth_balance = + // Converting to U256 first avoids possible overflow + gas.required_balance(U256::from(price.max_fee_per_gas).saturating_mul(U256::from(2))); if eth.balance(solution.solver().address()).await? < required_eth_balance { return Err(Error::SolverAccountInsufficientBalance( required_eth_balance, @@ -400,7 +403,7 @@ impl Gas { /// The balance required to ensure settlement execution with the given gas /// parameters. - pub fn required_balance(&self, price: eth::GasPrice) -> eth::Ether { - self.limit * price.max() + pub fn required_balance(&self, max_fee_per_gas: U256) -> eth::Ether { + self.limit * max_fee_per_gas.into() } } diff --git a/crates/driver/src/domain/eth/gas.rs b/crates/driver/src/domain/eth/gas.rs index 1f93bf672e..efb72ef7ae 100644 --- a/crates/driver/src/domain/eth/gas.rs +++ b/crates/driver/src/domain/eth/gas.rs @@ -1,7 +1,8 @@ use { super::{Ether, U256}, + alloy::eips::eip1559::calc_effective_gas_price, derive_more::{Display, From, Into}, - std::{ops, ops::Add}, + std::ops::{self, Add}, }; /// Gas amount in gas units. @@ -37,16 +38,18 @@ pub struct GasPrice { tip: FeePerGas, /// The current base gas price that will be charged to all accounts on the /// next block. - base: FeePerGas, + base: u64, } impl GasPrice { /// Returns the estimated [`EffectiveGasPrice`] for the gas price estimate. pub fn effective(&self) -> EffectiveGasPrice { - let max = self.max.0.0; - let base = self.base.0.0; - let tip = self.tip.0.0; - max.min(base.saturating_add(tip)).into() + U256::from(calc_effective_gas_price( + u128::try_from(self.max.0.0).expect("max fee per gas should fit in a u128"), + u128::try_from(self.tip.0.0).expect("max priority fee per gas should fit in a u128"), + Some(self.base), + )) + .into() } pub fn max(&self) -> FeePerGas { @@ -57,11 +60,11 @@ impl GasPrice { self.tip } - pub fn base(&self) -> FeePerGas { + pub fn base(&self) -> u64 { self.base } - pub fn new(max: FeePerGas, tip: FeePerGas, base: FeePerGas) -> Self { + pub fn new(max: FeePerGas, tip: FeePerGas, base: u64) -> Self { Self { max, tip, base } } } @@ -80,17 +83,6 @@ impl std::ops::Mul for GasPrice { } } -impl From for GasPrice { - fn from(value: EffectiveGasPrice) -> Self { - let value = value.0.0; - Self { - max: value.into(), - tip: value.into(), - base: value.into(), - } - } -} - /// The amount of ETH to pay as fees for a single unit of gas. This is /// `{max,max_priority,base}_fee_per_gas` as defined by EIP-1559. /// diff --git a/crates/driver/src/domain/mempools.rs b/crates/driver/src/domain/mempools.rs index afd6fec97a..b5928a4f5e 100644 --- a/crates/driver/src/domain/mempools.rs +++ b/crates/driver/src/domain/mempools.rs @@ -8,7 +8,7 @@ use { }, infra::{self, Ethereum, observe, solver::Solver}, }, - alloy::consensus::Transaction, + alloy::{consensus::Transaction, eips::eip1559::Eip1559Estimation}, anyhow::Context, ethrpc::block_stream::into_stream, futures::{FutureExt, StreamExt, future::select_ok}, @@ -18,9 +18,9 @@ use { /// Factor by how much a transaction fee needs to be increased to override a /// pending transaction at the same nonce. The correct factor is actually -/// 1.125 but to avoid rounding issues on chains with very low gas prices +/// 12.5% but to avoid rounding issues on chains with very low gas prices /// we increase slightly more. -const GAS_PRICE_BUMP: f64 = 1.13; +const GAS_PRICE_BUMP_PCT: u64 = 13; /// The gas amount required to cancel a transaction. const CANCELLATION_GAS_AMOUNT: u64 = 21000; @@ -146,7 +146,7 @@ impl Mempools { .await; let final_gas_price = match &replacement_gas_price { Some(replacement_gas_price) - if replacement_gas_price.max() > current_gas_price.max() => + if replacement_gas_price.max_fee_per_gas > current_gas_price.max_fee_per_gas => { *replacement_gas_price } @@ -268,11 +268,11 @@ impl Mempools { async fn cancel( &self, mempool: &infra::mempool::Mempool, - original_tx_gas_price: eth::GasPrice, + original_tx_gas_price: Eip1559Estimation, solver: &Solver, nonce: u64, ) -> Result { - let fallback_gas_price = original_tx_gas_price * GAS_PRICE_BUMP; + let fallback_gas_price = original_tx_gas_price.scaled_by_pct(GAS_PRICE_BUMP_PCT); let replacement_gas_price = self .minimum_replacement_gas_price(mempool, solver, nonce) .await; @@ -314,15 +314,19 @@ impl Mempools { /// Computes minimum price to replace the last tx that was submitted /// with the given nonce. Returns `None` if no tx was submitted with /// that nonce yet. + #[tracing::instrument(skip_all)] async fn minimum_replacement_gas_price( &self, mempool: &infra::Mempool, solver: &Solver, next_nonce: u64, - ) -> Option { + ) -> Option { if let Some(last_submission) = mempool.last_submission(solver.address()) { - (last_submission.nonce == next_nonce) - .then_some(last_submission.gas_price * GAS_PRICE_BUMP) + if last_submission.nonce == next_nonce { + Some(last_submission.gas_price.scaled_by_pct(GAS_PRICE_BUMP_PCT)) + } else { + None + } } else { // If we don't have the last submission in-memory (i.e. first submission // attempt after a restart) we try to inspect the nodes transaction mempool. @@ -334,16 +338,15 @@ impl Mempools { .inspect_err(|err| tracing::debug!(?err, "could not inspect tx mempool")) .ok()??; - let pending_tx_gas_price = eth::GasPrice::new( - eth::U256::from(pending_tx.max_fee_per_gas()).into(), - eth::U256::from(pending_tx.max_priority_fee_per_gas().or_else(|| { + let pending_tx_gas_price = Eip1559Estimation { + max_fee_per_gas: pending_tx.max_fee_per_gas(), + max_priority_fee_per_gas: pending_tx.max_priority_fee_per_gas().or_else(|| { tracing::error!(tx = ?pending_tx.inner.tx_hash(), "pending tx is not EIP 1559"); None - })?) - .into(), - eth::U256::from(pending_tx.max_fee_per_gas()).into(), - ); - Some(pending_tx_gas_price * GAS_PRICE_BUMP) + })?, + }; + + Some(pending_tx_gas_price.scaled_by_pct(GAS_PRICE_BUMP_PCT)) } } } diff --git a/crates/driver/src/infra/api/routes/gasprice.rs b/crates/driver/src/infra/api/routes/gasprice.rs index 2c66f40d7a..175d13dec6 100644 --- a/crates/driver/src/infra/api/routes/gasprice.rs +++ b/crates/driver/src/infra/api/routes/gasprice.rs @@ -1,12 +1,7 @@ use { - crate::{ - domain::eth, - infra::{Ethereum, api::error::Error}, - util::serialize, - }, + crate::infra::{Ethereum, api::error::Error}, + alloy::eips::eip1559::Eip1559Estimation, axum::Json, - serde::{Deserialize, Serialize}, - serde_with::serde_as, tracing::instrument, }; @@ -14,29 +9,15 @@ pub(in crate::infra::api) fn gasprice(app: axum::Router) -> axum::Rout app.route("/gasprice", axum::routing::get(route)) } -/// Gas price components in EIP-1559 format. -#[serde_as] -#[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct GasPriceResponse { - #[serde_as(as = "serialize::U256")] - pub max_fee_per_gas: eth::U256, - #[serde_as(as = "serialize::U256")] - pub max_priority_fee_per_gas: eth::U256, - #[serde_as(as = "serialize::U256")] - pub base_fee_per_gas: eth::U256, -} - #[instrument(skip(eth))] async fn route( eth: axum::extract::State, -) -> Result, (hyper::StatusCode, axum::Json)> { +) -> Result, (hyper::StatusCode, axum::Json)> { // For simplicity we use the default time limit (None) let gas_price = eth.gas_price().await?; - Ok(Json(GasPriceResponse { - max_fee_per_gas: gas_price.max().0.0, - max_priority_fee_per_gas: gas_price.tip().0.0, - base_fee_per_gas: gas_price.base().0.0, + Ok(Json(Eip1559Estimation { + max_fee_per_gas: gas_price.max_fee_per_gas, + max_priority_fee_per_gas: gas_price.max_priority_fee_per_gas, })) } diff --git a/crates/driver/src/infra/blockchain/gas.rs b/crates/driver/src/infra/blockchain/gas.rs index 510eb1ace0..8c8741fdd5 100644 --- a/crates/driver/src/infra/blockchain/gas.rs +++ b/crates/driver/src/infra/blockchain/gas.rs @@ -8,6 +8,8 @@ use { domain::eth, infra::{config::file::GasEstimatorType, mempool}, }, + alloy::eips::eip1559::Eip1559Estimation, + anyhow::anyhow, ethrpc::Web3, shared::gas_price_estimation::{ GasPriceEstimating, @@ -84,20 +86,34 @@ impl GasPriceEstimator { /// If additional tip is configured, it will be added to the gas price. This /// is to increase the chance of a transaction being included in a block, in /// case private submission networks are used. - pub async fn estimate(&self) -> Result { + pub async fn estimate(&self) -> Result { let estimate = self.gas.estimate().await.map_err(Error::GasPrice)?; let max_priority_fee_per_gas = { // the driver supports tweaking the tx gas price tip in case the gas // price estimator is systematically too low => compute configured tip bump let (max_additional_tip, tip_percentage_increase) = self.additional_tip; - let additional_tip = f64::from(max_additional_tip) - .min(estimate.max_priority_fee_per_gas * tip_percentage_increase); + + // Calculate additional tip in integer space to avoid precision loss + // Convert percentage to basis points (multiply by 10000) to maintain precision + // e.g., tip_percentage_increase = 0.125 (12.5%) becomes 1250 + let overflow_err = || { + Error::GasPrice(anyhow!( + "overflow on multiplication (max_priority_fee_per_gas * tip_percentage_as_bps)" + )) + }; + let tip_percentage_as_bps = tip_percentage_increase * 10000.0; + let calculated_tip = eth::U256::from(estimate.max_priority_fee_per_gas) + .checked_mul(eth::U256::from(tip_percentage_as_bps)) + .ok_or_else(overflow_err)? + / eth::U256::from(10000u128); + + let additional_tip = max_additional_tip.min(calculated_tip); // make sure we tip at least some configurable minimum amount std::cmp::max( self.min_priority_fee, - eth::U256::from(estimate.max_priority_fee_per_gas + additional_tip), + eth::U256::from(estimate.max_priority_fee_per_gas) + additional_tip, ) }; @@ -113,10 +129,11 @@ impl GasPriceEstimator { ))); } - Ok(eth::GasPrice::new( - suggested_max_fee_per_gas.into(), - max_priority_fee_per_gas.into(), - eth::U256::from(estimate.base_fee_per_gas).into(), - )) + Ok(Eip1559Estimation { + max_fee_per_gas: u128::try_from(suggested_max_fee_per_gas) + .map_err(|err| Error::GasPrice(err.into()))?, + max_priority_fee_per_gas: u128::try_from(max_priority_fee_per_gas) + .map_err(|err| Error::GasPrice(err.into()))?, + }) } } diff --git a/crates/driver/src/infra/blockchain/mod.rs b/crates/driver/src/infra/blockchain/mod.rs index 0a0595ae4e..d3d58dcfb8 100644 --- a/crates/driver/src/infra/blockchain/mod.rs +++ b/crates/driver/src/infra/blockchain/mod.rs @@ -4,6 +4,7 @@ use { domain::{eth, eth::U256}, }, alloy::{ + eips::eip1559::Eip1559Estimation, network::TransactionBuilder, providers::Provider, rpc::types::{TransactionReceipt, TransactionRequest}, @@ -15,6 +16,7 @@ use { ethrpc::{Web3, block_stream::CurrentBlockWatcher}, shared::{ account_balances::{BalanceSimulator, SimulationError}, + gas_price_estimation::Eip1559EstimationExt, price_estimation::trade_verifier::balance_overrides::{ BalanceOverrides, BalanceOverriding, @@ -236,7 +238,7 @@ impl Ethereum { /// The gas price is determined based on the deadline by which the /// transaction must be included on-chain. A shorter deadline requires a /// higher gas price to increase the likelihood of timely inclusion. - pub async fn gas_price(&self) -> Result { + pub async fn gas_price(&self) -> Result { self.inner.gas.estimate().await } @@ -291,6 +293,7 @@ impl Ethereum { #[instrument(skip(self), ret(level = Level::DEBUG))] pub(super) async fn simulation_gas_price(&self) -> Option { + let base_fee = self.current_block().borrow().base_fee; // Some nodes don't pick a reasonable default value when you don't specify a gas // price and default to 0. Additionally some sneaky tokens have special code // paths that detect that case to try to behave differently during simulations @@ -298,15 +301,7 @@ impl Ethereum { // default value we estimate the current gas price upfront. But because it's // extremely rare that tokens behave that way we are fine with falling back to // the node specific fallback value instead of failing the whole call. - let gas_price = self.inner.gas.estimate().await.ok()?.effective().0.0; - u128::try_from(gas_price) - .inspect_err(|err| { - tracing::debug!( - ?err, - "failed to convert gas estimate to u128, returning None" - ); - }) - .ok() + Some(self.inner.gas.estimate().await.ok()?.effective(base_fee)) } pub fn web3(&self) -> &Web3 { diff --git a/crates/driver/src/infra/mempool/mod.rs b/crates/driver/src/infra/mempool/mod.rs index 00d017908a..1b120a7e5c 100644 --- a/crates/driver/src/infra/mempool/mod.rs +++ b/crates/driver/src/infra/mempool/mod.rs @@ -6,7 +6,7 @@ use { }, alloy::{ consensus::Transaction, - eips::BlockNumberOrTag, + eips::{BlockNumberOrTag, eip1559::Eip1559Estimation}, primitives::Address, providers::{Provider, ext::TxPoolApi}, rpc::types::TransactionRequest, @@ -73,7 +73,7 @@ pub struct Mempool { #[derive(Debug, Clone)] pub struct Submission { pub nonce: u64, - pub gas_price: eth::GasPrice, + pub gas_price: Eip1559Estimation, } impl std::fmt::Display for Mempool { @@ -121,23 +121,13 @@ impl Mempool { pub async fn submit( &self, tx: eth::Tx, - gas_price: eth::GasPrice, + gas_price: Eip1559Estimation, gas_limit: eth::Gas, solver: &infra::Solver, nonce: u64, ) -> Result { - let max_fee_per_gas = gas_price - .max() - .0 - .0 - .try_into() - .map_err(anyhow::Error::from)?; - let max_priority_fee_per_gas = gas_price - .tip() - .0 - .0 - .try_into() - .map_err(anyhow::Error::from)?; + let max_fee_per_gas = gas_price.max_fee_per_gas; + let max_priority_fee_per_gas = gas_price.max_priority_fee_per_gas; let gas_limit = gas_limit.0.try_into().map_err(anyhow::Error::from)?; let tx_request = TransactionRequest::default() diff --git a/crates/driver/src/tests/setup/solver.rs b/crates/driver/src/tests/setup/solver.rs index 15e89414dc..79f147657b 100644 --- a/crates/driver/src/tests/setup/solver.rs +++ b/crates/driver/src/tests/setup/solver.rs @@ -20,6 +20,7 @@ use { number::testing::ApproxEq, serde_json::{Value, json}, serde_with::{DisplayFromStr, serde_as}, + shared::gas_price_estimation::Eip1559EstimationExt, solvers_dto::auction::FlashloanHint, std::{ collections::{HashMap, HashSet}, @@ -488,14 +489,8 @@ impl Solver { axum::routing::post( move |axum::extract::State(state): axum::extract::State, axum::extract::Json(req): axum::extract::Json| async move { - let effective_gas_price = eth - .gas_price() - .await - .unwrap() - .effective() - .0 - .0 - .to_string(); + let base_fee = eth.current_block().borrow().base_fee; + let effective_gas_price = eth.gas_price().await.unwrap().effective(base_fee).to_string(); let expected = json!({ "id": (!config.quote).then_some("1"), "tokens": tokens_json, @@ -558,11 +553,10 @@ fn check_solve_request(request: Value, expected: Value) { request.rest, expected.rest, "/solve request body does not match expectation" ); - assert!( request .effective_gas_price - .is_approx_eq(&expected.effective_gas_price, Some(15.0)), + .is_approx_eq(&expected.effective_gas_price, Some(1.0)), // 1.0% ); } diff --git a/crates/ethrpc/src/block_stream/mod.rs b/crates/ethrpc/src/block_stream/mod.rs index f746ad7fe4..c1538574fa 100644 --- a/crates/ethrpc/src/block_stream/mod.rs +++ b/crates/ethrpc/src/block_stream/mod.rs @@ -56,6 +56,7 @@ pub struct BlockInfo { pub timestamp: u64, pub gas_limit: U256, pub gas_price: U256, + pub base_fee: u64, /// When the system noticed the new block. pub observed_at: Instant, } @@ -69,6 +70,7 @@ impl Default for BlockInfo { timestamp: Default::default(), gas_limit: Default::default(), gas_price: Default::default(), + base_fee: Default::default(), observed_at: Instant::now(), } } @@ -89,19 +91,7 @@ impl TryFrom for BlockInfo { type Error = anyhow::Error; fn try_from(value: Block) -> std::result::Result { - Ok(Self { - number: value.header.number, - hash: value.header.hash, - parent_hash: value.header.parent_hash, - timestamp: value.header.timestamp, - gas_limit: U256::from(value.header.gas_limit), - gas_price: value - .header - .base_fee_per_gas - .map(U256::from) - .context("no gas price")?, - observed_at: Instant::now(), - }) + value.header.try_into() } } @@ -119,6 +109,9 @@ impl TryFrom for BlockInfo { .base_fee_per_gas .map(U256::from) .context("no gas price")?, + base_fee: value + .base_fee_per_gas + .ok_or_else(|| anyhow!("no base fee available"))?, observed_at: Instant::now(), }) } diff --git a/crates/refunder/src/submitter.rs b/crates/refunder/src/submitter.rs index 4872f1ab55..73d8a074af 100644 --- a/crates/refunder/src/submitter.rs +++ b/crates/refunder/src/submitter.rs @@ -9,13 +9,13 @@ // In the re-newed attempt for submission the same nonce is used as before. use { - alloy::{primitives::Address, providers::Provider}, + alloy::{eips::eip1559::Eip1559Estimation, primitives::Address, providers::Provider}, anyhow::{Context, Result}, contracts::alloy::CoWSwapEthFlow::{self, EthFlowOrder}, database::OrderUid, shared::{ ethrpc::Web3, - gas_price_estimation::{GasPriceEstimating, price::GasPrice1559}, + gas_price_estimation::{Eip1559EstimationExt, GasPriceEstimating}, }, std::time::Duration, }; @@ -24,22 +24,17 @@ use { // send out EIP1559 txs. // Example: If the prevailing gas is 10Gwei and the buffer factor is 1.20 // then the gas_price used will be 12. -const GAS_PRICE_BUFFER_FACTOR: f64 = 1.3; +const GAS_PRICE_BUFFER_PCT: u64 = 30; // In order to resubmit a new tx with the same nonce, the gas tip and // max_fee_per_gas needs to be increased by at least 10 percent. -const GAS_PRICE_BUMP: f64 = 1.125; - -/// Type safe cast to avoid unexpected issues due to type changes. -const fn f64_to_u128(n: f64) -> u128 { - n as u128 -} +const GAS_PRICE_BUMP_PERMIL: u64 = 125; pub struct Submitter { pub web3: Web3, pub signer_address: Address, pub gas_estimator: Box, - pub gas_parameters_of_last_tx: Option, + pub gas_parameters_of_last_tx: Option, pub nonce_of_last_submission: Option, pub max_gas_price: u64, pub start_priority_fee_tip: u64, @@ -88,8 +83,8 @@ impl Submitter { let tx_result = ethflow_contract .invalidateOrdersIgnoringNotAllowed(encoded_ethflow_orders) // Gas conversions are lossy but technically the should not have decimal points even though they're floats - .max_priority_fee_per_gas(f64_to_u128(gas_price.max_priority_fee_per_gas)) - .max_fee_per_gas(f64_to_u128(gas_price.max_fee_per_gas)) + .max_priority_fee_per_gas(gas_price.max_priority_fee_per_gas) + .max_fee_per_gas(gas_price.max_fee_per_gas) .from(self.signer_address) .nonce(nonce) .send() @@ -110,19 +105,19 @@ impl Submitter { } fn calculate_submission_gas_price( - gas_price_of_last_submission: Option, - web3_gas_estimation: GasPrice1559, + gas_price_of_last_submission: Option, + web3_gas_estimation: Eip1559Estimation, newest_nonce: u64, nonce_of_last_submission: Option, max_gas_price: u64, start_priority_fee_tip: u64, -) -> Result { +) -> Result { // The gas price of the refund tx is the current prevailing gas price // of the web3 gas estimation plus a buffer. - let mut new_gas_price = web3_gas_estimation.bump(GAS_PRICE_BUFFER_FACTOR); + let mut new_gas_price = web3_gas_estimation.scaled_by_pct(GAS_PRICE_BUFFER_PCT); // limit the prio_fee to max_fee_per_gas as otherwise tx is invalid new_gas_price.max_priority_fee_per_gas = - (start_priority_fee_tip as f64).min(new_gas_price.max_fee_per_gas); + (start_priority_fee_tip as u128).min(new_gas_price.max_fee_per_gas); // If tx from the previous submission was not mined, // we incease the tip and max_gas_fee for miners @@ -130,7 +125,8 @@ fn calculate_submission_gas_price( if Some(newest_nonce) == nonce_of_last_submission && let Some(gas_price_of_last_submission) = gas_price_of_last_submission { - let gas_price_of_last_submission = gas_price_of_last_submission.bump(GAS_PRICE_BUMP); + let gas_price_of_last_submission = + gas_price_of_last_submission.scaled_by_pml(GAS_PRICE_BUMP_PERMIL); new_gas_price.max_fee_per_gas = new_gas_price .max_fee_per_gas .max(gas_price_of_last_submission.max_fee_per_gas); @@ -139,7 +135,7 @@ fn calculate_submission_gas_price( .max(gas_price_of_last_submission.max_priority_fee_per_gas); } - if new_gas_price.max_fee_per_gas > max_gas_price as f64 { + if new_gas_price.max_fee_per_gas > max_gas_price as u128 { tracing::warn!( "Refunding txs are likely not mined in time, as the current gas price {:?} is higher \ than MAX_GAS_PRICE specified {:?}", @@ -147,9 +143,9 @@ fn calculate_submission_gas_price( max_gas_price ); new_gas_price.max_fee_per_gas = - f64::min(max_gas_price as f64, new_gas_price.max_fee_per_gas); + u128::min(max_gas_price as u128, new_gas_price.max_fee_per_gas); } - new_gas_price.max_priority_fee_per_gas = f64::min( + new_gas_price.max_priority_fee_per_gas = u128::min( new_gas_price.max_priority_fee_per_gas, new_gas_price.max_fee_per_gas, ); @@ -166,11 +162,10 @@ mod tests { const TEST_START_PRIORITY_FEE_TIP: u64 = 2_000_000_000; // First case: previous tx was successful - let max_fee_per_gas = 4_000_000_000f64; - let web3_gas_estimation = GasPrice1559 { - base_fee_per_gas: 2_000_000_000f64, + let max_fee_per_gas = 4_000_000_000_u128; + let web3_gas_estimation = Eip1559Estimation { max_fee_per_gas, - max_priority_fee_per_gas: 3_000_000_000f64, + max_priority_fee_per_gas: 3_000_000_000_u128, }; let newest_nonce = 1; let nonce_of_last_submission = None; @@ -184,19 +179,18 @@ mod tests { TEST_START_PRIORITY_FEE_TIP, ) .unwrap(); - let expected_result = GasPrice1559 { - max_fee_per_gas: max_fee_per_gas * GAS_PRICE_BUFFER_FACTOR, - max_priority_fee_per_gas: TEST_START_PRIORITY_FEE_TIP as f64, - base_fee_per_gas: 2_000_000_000f64, + + let expected_result = Eip1559Estimation { + max_fee_per_gas: max_fee_per_gas * (100 + GAS_PRICE_BUFFER_PCT as u128) / 100, + max_priority_fee_per_gas: TEST_START_PRIORITY_FEE_TIP as u128, }; assert_eq!(result, expected_result); // Second case: Previous tx was not successful let nonce_of_last_submission = Some(newest_nonce); - let max_fee_per_gas_of_last_tx = max_fee_per_gas * 2f64; - let gas_price_of_last_submission = GasPrice1559 { + let max_fee_per_gas_of_last_tx = max_fee_per_gas * 2; + let gas_price_of_last_submission = Eip1559Estimation { max_fee_per_gas: max_fee_per_gas_of_last_tx, - max_priority_fee_per_gas: TEST_START_PRIORITY_FEE_TIP as f64, - base_fee_per_gas: 2_000_000_000f64, + max_priority_fee_per_gas: TEST_START_PRIORITY_FEE_TIP as u128, }; let result = calculate_submission_gas_price( Some(gas_price_of_last_submission), @@ -207,18 +201,19 @@ mod tests { TEST_START_PRIORITY_FEE_TIP, ) .unwrap(); - let expected_result = GasPrice1559 { - max_fee_per_gas: max_fee_per_gas_of_last_tx * GAS_PRICE_BUMP, - max_priority_fee_per_gas: TEST_START_PRIORITY_FEE_TIP as f64 * GAS_PRICE_BUMP, - base_fee_per_gas: 2_000_000_000f64, + let expected_result = Eip1559Estimation { + max_fee_per_gas: max_fee_per_gas_of_last_tx * (1000 + GAS_PRICE_BUMP_PERMIL as u128) + / 1000, + max_priority_fee_per_gas: (TEST_START_PRIORITY_FEE_TIP as u128) + * (1000 + GAS_PRICE_BUMP_PERMIL as u128) + / 1000, }; assert_eq!(result, expected_result); // Thrid case: MAX_GAS_PRICE is not exceeded - let max_fee_per_gas = TEST_MAX_GAS_PRICE as f64 + 1000f64; - let web3_gas_estimation = GasPrice1559 { - base_fee_per_gas: 2_000_000_000f64, + let max_fee_per_gas = TEST_MAX_GAS_PRICE as u128 + 1000_u128; + let web3_gas_estimation = Eip1559Estimation { max_fee_per_gas, - max_priority_fee_per_gas: 3_000_000_000f64, + max_priority_fee_per_gas: 3_000_000_000_u128, }; let nonce_of_last_submission = None; let gas_price_of_last_submission = None; @@ -231,10 +226,9 @@ mod tests { TEST_START_PRIORITY_FEE_TIP, ) .unwrap(); - let expected_result = GasPrice1559 { - base_fee_per_gas: 2_000_000_000f64, - max_fee_per_gas: TEST_MAX_GAS_PRICE as f64, - max_priority_fee_per_gas: TEST_START_PRIORITY_FEE_TIP as f64, + let expected_result = Eip1559Estimation { + max_fee_per_gas: TEST_MAX_GAS_PRICE as u128, + max_priority_fee_per_gas: TEST_START_PRIORITY_FEE_TIP as u128, }; assert_eq!(result, expected_result); } diff --git a/crates/shared/src/gas_price.rs b/crates/shared/src/gas_price.rs index a3c8fa1d9b..69ec87eaa2 100644 --- a/crates/shared/src/gas_price.rs +++ b/crates/shared/src/gas_price.rs @@ -5,9 +5,9 @@ //! anomalies. use { - crate::gas_price_estimation::{GasPriceEstimating, price::GasPrice1559}, + crate::gas_price_estimation::GasPriceEstimating, + alloy::eips::eip1559::{Eip1559Estimation, calc_effective_gas_price}, anyhow::Result, - tracing::instrument, }; /// An instrumented gas price estimator that wraps an inner one. @@ -33,18 +33,37 @@ impl GasPriceEstimating for InstrumentedGasEstimator where T: GasPriceEstimating, { - #[instrument(skip_all)] - async fn estimate(&self) -> Result { - let estimate = self.inner.estimate().await?; + async fn estimate(&self) -> Result { + self.inner.estimate().await + } + + async fn base_fee(&self) -> Result> { + self.inner.base_fee().await + } + + #[tracing::instrument(skip_all)] + async fn effective_gas_price(&self) -> Result { + let estimate = self.estimate().await?; + let base_fee = self.inner.base_fee().await?; self.metrics - .gas_price - .set(estimate.effective_gas_price() / 1e9); - Ok(estimate) + .base_fee + .set(base_fee.unwrap_or(0).cast_signed()); + + let effective_gas_price = calc_effective_gas_price( + estimate.max_fee_per_gas, + estimate.max_priority_fee_per_gas, + base_fee, + ); + + self.metrics.gas_price.set(effective_gas_price as f64 / 1e9); + Ok(effective_gas_price) } } #[derive(prometheus_metric_storage::MetricStorage)] struct Metrics { - /// Last measured gas price in gwei + /// Last measured effective gas price in gwei gas_price: prometheus::Gauge, + /// Last measured base fee + base_fee: prometheus::IntGauge, } diff --git a/crates/shared/src/gas_price_estimation/configurable_alloy.rs b/crates/shared/src/gas_price_estimation/configurable_alloy.rs index 671497d7cf..5a0840b257 100644 --- a/crates/shared/src/gas_price_estimation/configurable_alloy.rs +++ b/crates/shared/src/gas_price_estimation/configurable_alloy.rs @@ -6,9 +6,9 @@ //! - Reward percentile to use use { - crate::gas_price_estimation::{GasPriceEstimating, price::GasPrice1559, u128_to_f64}, + crate::gas_price_estimation::GasPriceEstimating, alloy::{ - eips::BlockNumberOrTag, + eips::{BlockId, BlockNumberOrTag, eip1559::Eip1559Estimation}, providers::{Provider, utils::eip1559_default_estimator}, }, anyhow::{Context, Result}, @@ -49,11 +49,19 @@ impl ConfigurableGasPriceEstimator { #[async_trait::async_trait] impl GasPriceEstimating for ConfigurableGasPriceEstimator { + async fn base_fee(&self) -> Result> { + Ok(self + .provider + .get_block(BlockId::latest()) + .await? + .and_then(|block| block.header.base_fee_per_gas)) + } + #[instrument(skip(self), fields( past_blocks = %self.config.past_blocks, reward_percentile = %self.config.reward_percentile ))] - async fn estimate(&self) -> Result { + async fn estimate(&self) -> Result { // Fetch fee history with our configured parameters let fee_history = self .provider @@ -90,13 +98,9 @@ impl GasPriceEstimating for ConfigurableGasPriceEstimator { let estimation = eip1559_default_estimator(base_fee_per_gas, &fee_history.reward.unwrap_or_default()); - Ok(GasPrice1559 { - base_fee_per_gas: u128_to_f64(base_fee_per_gas) - .context("could not convert base_fee_per_gas to f64")?, - max_fee_per_gas: u128_to_f64(estimation.max_fee_per_gas) - .context("could not convert max_fee_per_gas to f64")?, - max_priority_fee_per_gas: u128_to_f64(estimation.max_priority_fee_per_gas) - .context("could not convert max_priority_fee_per_gas to f64")?, + Ok(Eip1559Estimation { + max_fee_per_gas: estimation.max_fee_per_gas, + max_priority_fee_per_gas: estimation.max_priority_fee_per_gas, }) } } diff --git a/crates/shared/src/gas_price_estimation/driver.rs b/crates/shared/src/gas_price_estimation/driver.rs index 8c3b98f85b..5e1d7b1c16 100644 --- a/crates/shared/src/gas_price_estimation/driver.rs +++ b/crates/shared/src/gas_price_estimation/driver.rs @@ -1,11 +1,11 @@ use { - crate::gas_price_estimation::{GasPriceEstimating, price::GasPrice1559}, - alloy::primitives::U256, - anyhow::{Context, Result}, - number::serialization::HexOrDecimalU256, + crate::gas_price_estimation::GasPriceEstimating, + alloy::{ + eips::{BlockId, eip1559::Eip1559Estimation}, + providers::{DynProvider, Provider}, + }, + anyhow::{Context, Result, anyhow}, reqwest::Url, - serde::Deserialize, - serde_with::serde_as, std::{ sync::Arc, time::{Duration, Instant}, @@ -21,40 +21,29 @@ pub struct DriverGasEstimator { client: reqwest::Client, url: Url, cache: Arc>>, + provider: DynProvider, } #[derive(Debug, Clone)] struct CachedGasPrice { - price: GasPrice1559, + price: Eip1559Estimation, timestamp: Instant, } -#[serde_as] -#[derive(Debug, Deserialize)] -#[serde(rename_all = "camelCase")] -/// Gas price components in EIP-1559 format. -struct GasPriceResponse { - #[serde_as(as = "HexOrDecimalU256")] - max_fee_per_gas: U256, - #[serde_as(as = "HexOrDecimalU256")] - max_priority_fee_per_gas: U256, - #[serde_as(as = "HexOrDecimalU256")] - base_fee_per_gas: U256, -} - const CACHE_DURATION: Duration = Duration::from_secs(5); impl DriverGasEstimator { - pub fn new(client: reqwest::Client, driver_url: Url) -> Self { + pub fn new(client: reqwest::Client, driver_url: Url, provider: DynProvider) -> Self { Self { client, url: driver_url, cache: Arc::new(Mutex::new(None)), + provider, } } #[instrument(skip(self))] - async fn fetch_gas_price(&self) -> Result { + async fn fetch_gas_price(&self) -> Result { let response = self .client .get(self.url.clone()) @@ -63,14 +52,13 @@ impl DriverGasEstimator { .context("failed to send request to driver")? .error_for_status() .context("driver returned error status")? - .json::() + .json::() .await .context("failed to parse driver response")?; - Ok(GasPrice1559 { - base_fee_per_gas: f64::from(response.base_fee_per_gas), - max_fee_per_gas: f64::from(response.max_fee_per_gas), - max_priority_fee_per_gas: f64::from(response.max_priority_fee_per_gas), + Ok(Eip1559Estimation { + max_fee_per_gas: response.max_fee_per_gas, + max_priority_fee_per_gas: response.max_priority_fee_per_gas, }) } } @@ -78,7 +66,7 @@ impl DriverGasEstimator { #[async_trait::async_trait] impl GasPriceEstimating for DriverGasEstimator { #[instrument(skip(self))] - async fn estimate(&self) -> Result { + async fn estimate(&self) -> Result { // Lock cache for entire duration of this method to prevent concurrent network // requests let mut cache = self.cache.lock().await; @@ -98,4 +86,14 @@ impl GasPriceEstimating for DriverGasEstimator { Ok(price) } + + async fn base_fee(&self) -> Result> { + Ok(self + .provider + .get_block(BlockId::latest()) + .await? + .ok_or_else(|| anyhow!("fecthed block does not have header"))? + .header + .base_fee_per_gas) + } } diff --git a/crates/shared/src/gas_price_estimation/eth_node.rs b/crates/shared/src/gas_price_estimation/eth_node.rs index 6082c45db2..fa7fad9508 100644 --- a/crates/shared/src/gas_price_estimation/eth_node.rs +++ b/crates/shared/src/gas_price_estimation/eth_node.rs @@ -4,9 +4,12 @@ //! This approach is ported from the [`cowprotocol/gas-estimation`](https://github.com/cowprotocol/gas-estimation/tree/v0.7.3) crate's legacy estimation. use { - crate::gas_price_estimation::{GasPriceEstimating, price::GasPrice1559, u128_to_f64}, - alloy::providers::Provider, - anyhow::{Context, Result}, + crate::gas_price_estimation::GasPriceEstimating, + alloy::{ + eips::{BlockId, eip1559::Eip1559Estimation}, + providers::Provider, + }, + anyhow::{Context, Result, anyhow}, ethrpc::AlloyProvider, }; @@ -23,18 +26,26 @@ impl NodeGasPriceEstimator { impl GasPriceEstimating for NodeGasPriceEstimator { /// Returns the result of calling the `eth_gasPrice` endpoint as the gas /// estimation. - async fn estimate(&self) -> Result { + async fn estimate(&self) -> Result { let legacy = self .0 .get_gas_price() .await - .context("failed to get web3 gas price") - .map(u128_to_f64)??; + .context("failed to get web3 gas price")?; - Ok(GasPrice1559 { - base_fee_per_gas: 0.0, + Ok(Eip1559Estimation { max_fee_per_gas: legacy, max_priority_fee_per_gas: legacy, }) } + + async fn base_fee(&self) -> Result> { + Ok(self + .0 + .get_block(BlockId::latest()) + .await? + .ok_or_else(|| anyhow!("fecthed block does not have header"))? + .header + .base_fee_per_gas) + } } diff --git a/crates/shared/src/gas_price_estimation/fake.rs b/crates/shared/src/gas_price_estimation/fake.rs index 19f8e56de9..d19facce0f 100644 --- a/crates/shared/src/gas_price_estimation/fake.rs +++ b/crates/shared/src/gas_price_estimation/fake.rs @@ -1,20 +1,33 @@ use { - crate::gas_price_estimation::{GasPriceEstimating, price::GasPrice1559}, + crate::gas_price_estimation::GasPriceEstimating, + alloy::eips::eip1559::Eip1559Estimation, anyhow::Result, }; -#[derive(Default)] -pub struct FakeGasPriceEstimator(pub GasPrice1559); +pub struct FakeGasPriceEstimator(pub Eip1559Estimation); + +impl Default for FakeGasPriceEstimator { + fn default() -> Self { + Self(Eip1559Estimation { + max_fee_per_gas: Default::default(), + max_priority_fee_per_gas: Default::default(), + }) + } +} impl FakeGasPriceEstimator { - pub fn new(gas_price: GasPrice1559) -> Self { + pub fn new(gas_price: Eip1559Estimation) -> Self { Self(gas_price) } } #[async_trait::async_trait] impl GasPriceEstimating for FakeGasPriceEstimator { - async fn estimate(&self) -> Result { + async fn estimate(&self) -> Result { Ok(self.0) } + + async fn base_fee(&self) -> Result> { + Ok(Default::default()) + } } diff --git a/crates/shared/src/gas_price_estimation/mod.rs b/crates/shared/src/gas_price_estimation/mod.rs index 2aad505340..b7755cc1fd 100644 --- a/crates/shared/src/gas_price_estimation/mod.rs +++ b/crates/shared/src/gas_price_estimation/mod.rs @@ -2,7 +2,6 @@ pub mod configurable_alloy; pub mod driver; pub mod eth_node; pub mod fake; -pub mod price; pub mod priority; use { @@ -20,7 +19,10 @@ use { }, http_client::HttpClientFactory, }, - ::alloy::providers::Provider, + ::alloy::{ + eips::eip1559::{Eip1559Estimation, calc_effective_gas_price}, + providers::Provider, + }, anyhow::Result, std::str::FromStr, tracing::instrument, @@ -32,7 +34,19 @@ pub use {driver::DriverGasEstimator, fake::FakeGasPriceEstimator}; #[async_trait::async_trait] pub trait GasPriceEstimating: Send + Sync { /// Estimate the gas price for a transaction to be mined "quickly". - async fn estimate(&self) -> Result; + async fn estimate(&self) -> Result; + + async fn base_fee(&self) -> Result>; + + async fn effective_gas_price(&self) -> Result { + let estimate = self.estimate().await?; + let base_fee = self.base_fee().await?; + Ok(calc_effective_gas_price( + estimate.max_fee_per_gas, + estimate.max_priority_fee_per_gas, + base_fee, + )) + } } #[derive(Clone, Debug)] @@ -72,6 +86,7 @@ pub async fn create_priority_estimator( estimators.push(Box::new(DriverGasEstimator::new( http_factory.create(), url.clone(), + web3.alloy.clone(), ))); } GasEstimatorType::Web3 => { @@ -96,9 +111,33 @@ pub async fn create_priority_estimator( Ok(PriorityGasPriceEstimating::new(estimators)) } -fn u128_to_f64(val: u128) -> Result { - if val > 2u128.pow(f64::MANTISSA_DIGITS) { - anyhow::bail!(format!("could not convert u128 to f64: {val}")); +/// Extension trait for EIP-1559 gas price estimations. +pub trait Eip1559EstimationExt { + /// Calculates the effective gas price that will be paid given the base fee. + fn effective(self, base_fee: u64) -> u128; + + /// Scales fees by a multiplier in parts per thousand (e.g., 100 = +10%). + fn scaled_by_pml(self, pml: u64) -> Self; +} + +impl Eip1559EstimationExt for Eip1559Estimation { + fn effective(self, base_fee: u64) -> u128 { + calc_effective_gas_price( + self.max_fee_per_gas, + self.max_priority_fee_per_gas, + Some(base_fee), + ) + } + + fn scaled_by_pml(mut self, pml: u64) -> Self { + self.max_fee_per_gas = { + let n = self.max_fee_per_gas; + n * (1000 + pml as u128) / 1000 + }; + self.max_priority_fee_per_gas = { + let n = self.max_priority_fee_per_gas; + n * (1000 + pml as u128) / 1000 + }; + self } - Ok(val as f64) } diff --git a/crates/shared/src/gas_price_estimation/price.rs b/crates/shared/src/gas_price_estimation/price.rs deleted file mode 100644 index 8d1e3ec04f..0000000000 --- a/crates/shared/src/gas_price_estimation/price.rs +++ /dev/null @@ -1,192 +0,0 @@ -// Vendored implementation of GasPrice1559 to start removing the dependency on -// the gas_estimation crate -use serde::Serialize; - -/// EIP1559 gas price -#[derive(Debug, Default, Clone, Copy, PartialEq, PartialOrd, Serialize)] -pub struct GasPrice1559 { - // Estimated base fee for the pending block (block currently being mined) - pub base_fee_per_gas: f64, - // Maximum gas price willing to pay for the transaction. - pub max_fee_per_gas: f64, - // Priority fee used to incentivize miners to include the tx in case of network congestion. - pub max_priority_fee_per_gas: f64, -} - -impl GasPrice1559 { - // Estimate the effective gas price based on the current network conditions - // (base_fee_per_gas) Beware that gas price for mined transaction could be - // different from estimated value in case of 1559 tx - // (because base_fee_per_gas can change between estimation and mining the tx). - pub fn effective_gas_price(&self) -> f64 { - std::cmp::min_by( - self.max_fee_per_gas, - self.max_priority_fee_per_gas + self.base_fee_per_gas, - |a, b| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal), - ) - } - - // Bump gas price by factor. - pub fn bump(self, factor: f64) -> Self { - Self { - max_fee_per_gas: self.max_fee_per_gas * factor, - max_priority_fee_per_gas: self.max_priority_fee_per_gas * factor, - ..self - } - } - - // Ceil gas price (since its defined as float). - pub fn ceil(self) -> Self { - Self { - max_fee_per_gas: self.max_fee_per_gas.ceil(), - max_priority_fee_per_gas: self.max_priority_fee_per_gas.ceil(), - ..self - } - } - - // If current cap if higher then the input, set to input. - pub fn limit_cap(self, cap: f64) -> Self { - Self { - max_fee_per_gas: self.max_fee_per_gas.min(cap), - max_priority_fee_per_gas: self - .max_priority_fee_per_gas - .min(self.max_fee_per_gas.min(cap)), /* enforce max_priority_fee_per_gas <= - * max_fee_per_gas */ - ..self - } - } -} - -impl std::fmt::Display for GasPrice1559 { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let format_unit = |wei| { - let gwei: f64 = wei / 1e9; - if gwei >= 1.0 { - format!("{:.2} Gwei", gwei) - } else { - format!("{wei} wei") - } - }; - write!( - f, - "{{ max_fee: {}, max_priority_fee: {}, base_fee: {} }}", - format_unit(self.max_fee_per_gas), - format_unit(self.max_priority_fee_per_gas), - format_unit(self.base_fee_per_gas), - ) - } -} - -#[cfg(test)] -mod tests { - use crate::gas_price_estimation::price::GasPrice1559; - - // Copied from the source: https://github.com/ashleygwilliams/assert_approx_eq/blob/master/src/lib.rs - // should be removed as we move away from expressing gas in f64 - macro_rules! assert_approx_eq { - ($a:expr, $b:expr) => {{ - let eps = 1.0e-6; - let (a, b) = (&$a, &$b); - assert!( - (*a - *b).abs() < eps, - "assertion failed: `(left !== right)` (left: `{:?}`, right: `{:?}`, expect diff: \ - `{:?}`, real diff: `{:?}`)", - *a, - *b, - eps, - (*a - *b).abs() - ); - }}; - } - - #[test] - fn bump_and_ceil() { - let gas_price = GasPrice1559 { - max_fee_per_gas: 2.0, - max_priority_fee_per_gas: 3.0, - ..Default::default() - }; - - let gas_price_bumped = GasPrice1559 { - max_fee_per_gas: 2.25, - max_priority_fee_per_gas: 3.375, - ..Default::default() - }; - - let gas_price_bumped_and_ceiled = GasPrice1559 { - max_fee_per_gas: 3.0, - max_priority_fee_per_gas: 4.0, - ..Default::default() - }; - - assert_eq!(gas_price.bump(1.125), gas_price_bumped); - assert_eq!(gas_price.bump(1.125).ceil(), gas_price_bumped_and_ceiled); - } - - #[test] - fn limit_cap_only_max_fee_capped() { - let gas_price = GasPrice1559 { - max_fee_per_gas: 5.0, - max_priority_fee_per_gas: 3.0, - ..Default::default() - }; - - let gas_price_capped = GasPrice1559 { - max_fee_per_gas: 4.0, - max_priority_fee_per_gas: 3.0, - ..Default::default() - }; - - assert_eq!(gas_price.limit_cap(4.0), gas_price_capped); - } - - #[test] - fn limit_cap_max_fee_and_max_priority_capped() { - let gas_price = GasPrice1559 { - max_fee_per_gas: 5.0, - max_priority_fee_per_gas: 3.0, - ..Default::default() - }; - - let gas_price_capped = GasPrice1559 { - max_fee_per_gas: 2.0, - max_priority_fee_per_gas: 2.0, - ..Default::default() - }; - - assert_eq!(gas_price.limit_cap(2.0), gas_price_capped); - } - - #[test] - fn estimate_eip1559() { - assert_approx_eq!( - GasPrice1559 { - max_fee_per_gas: 10.0, - max_priority_fee_per_gas: 5.0, - base_fee_per_gas: 2.0 - } - .effective_gas_price(), - 7.0 - ); - - assert_approx_eq!( - GasPrice1559 { - max_fee_per_gas: 10.0, - max_priority_fee_per_gas: 8.0, - base_fee_per_gas: 2.0 - } - .effective_gas_price(), - 10.0 - ); - - assert_approx_eq!( - GasPrice1559 { - max_fee_per_gas: 10.0, - max_priority_fee_per_gas: 10.0, - base_fee_per_gas: 2.0 - } - .effective_gas_price(), - 10.0 - ); - } -} diff --git a/crates/shared/src/gas_price_estimation/priority.rs b/crates/shared/src/gas_price_estimation/priority.rs index ba04669d69..c6ad5fca19 100644 --- a/crates/shared/src/gas_price_estimation/priority.rs +++ b/crates/shared/src/gas_price_estimation/priority.rs @@ -1,5 +1,6 @@ use { - crate::gas_price_estimation::{GasPriceEstimating, price::GasPrice1559}, + crate::gas_price_estimation::GasPriceEstimating, + alloy::eips::eip1559::Eip1559Estimation, anyhow::{Result, anyhow}, std::{ future::Future, @@ -35,10 +36,10 @@ impl PriorityGasPriceEstimating { Self { estimators } } - async fn prioritize<'a, T, F>(&'a self, operation: T) -> Result + async fn prioritize<'a, T, F, O>(&'a self, operation: T) -> Result where T: Fn(&'a dyn GasPriceEstimating) -> F, - F: Future>, + F: Future>, { for (i, estimator) in self.estimators.iter().enumerate() { match operation(estimator.estimator.as_ref()).await { @@ -62,9 +63,13 @@ impl PriorityGasPriceEstimating { #[async_trait::async_trait] impl GasPriceEstimating for PriorityGasPriceEstimating { - async fn estimate(&self) -> Result { + async fn estimate(&self) -> Result { self.prioritize(|estimator| estimator.estimate()).await } + + async fn base_fee(&self) -> Result> { + self.prioritize(|estimator| estimator.base_fee()).await + } } #[cfg(test)] @@ -73,40 +78,22 @@ mod tests { crate::gas_price_estimation::{ GasPriceEstimating, MockGasPriceEstimating, - price::GasPrice1559, priority::PriorityGasPriceEstimating, }, + alloy::eips::eip1559::Eip1559Estimation, anyhow::anyhow, futures::future::FutureExt, }; - // Copied from the source: https://github.com/ashleygwilliams/assert_approx_eq/blob/master/src/lib.rs - // should be removed as we move away from expressing gas in f64 - macro_rules! assert_approx_eq { - ($a:expr, $b:expr) => {{ - let eps = 1.0e-6; - let (a, b) = (&$a, &$b); - assert!( - (*a - *b).abs() < eps, - "assertion failed: `(left !== right)` (left: `{:?}`, right: `{:?}`, expect diff: \ - `{:?}`, real diff: `{:?}`)", - *a, - *b, - eps, - (*a - *b).abs() - ); - }}; - } - #[test] fn prioritize_picks_first_if_first_succeeds() { let mut estimator_0 = MockGasPriceEstimating::new(); let estimator_1 = MockGasPriceEstimating::new(); estimator_0.expect_estimate().times(1).returning(|| { - Ok(GasPrice1559 { - base_fee_per_gas: 1.0, - ..Default::default() + Ok(Eip1559Estimation { + max_fee_per_gas: 10, + max_priority_fee_per_gas: 0, }) }); @@ -126,16 +113,16 @@ mod tests { .times(1) .returning(|| Err(anyhow!(""))); estimator_1.expect_estimate().times(1).returning(|| { - Ok(GasPrice1559 { - base_fee_per_gas: 2.0, - ..Default::default() + Ok(Eip1559Estimation { + max_fee_per_gas: 10, + max_priority_fee_per_gas: 0, }) }); let priority = PriorityGasPriceEstimating::new(vec![Box::new(estimator_0), Box::new(estimator_1)]); let result = priority.estimate().now_or_never().unwrap().unwrap(); - assert_approx_eq!(result.base_fee_per_gas, 2.0); + assert_eq!(result.max_fee_per_gas, 10); } #[test] diff --git a/crates/shared/src/order_quoting.rs b/crates/shared/src/order_quoting.rs index 7f844f4e6c..dd47e3f9d0 100644 --- a/crates/shared/src/order_quoting.rs +++ b/crates/shared/src/order_quoting.rs @@ -462,9 +462,9 @@ impl OrderQuoter { }; let trade_query = Arc::new(parameters.to_price_query(self.default_quote_timeout)); - let (gas_estimate, trade_estimate, sell_token_price, _) = futures::try_join!( + let (effective_gas_price, trade_estimate, sell_token_price, _) = futures::try_join!( self.gas_estimator - .estimate() + .effective_gas_price() .map_err(|err| CalculateQuoteError::from(( EstimatorKind::Gas, PriceEstimationError::ProtocolInternal(err) @@ -496,7 +496,7 @@ impl OrderQuoter { }; let fee_parameters = FeeParameters { gas_amount: trade_estimate.gas as _, - gas_price: gas_estimate.effective_gas_price(), + gas_price: effective_gas_price as f64, sell_token_price, }; @@ -786,7 +786,7 @@ mod tests { super::*, crate::{ account_balances::MockBalanceFetching, - gas_price_estimation::{FakeGasPriceEstimator, price::GasPrice1559}, + gas_price_estimation::FakeGasPriceEstimator, price_estimation::{ HEALTHY_PRICE_ESTIMATION_TIME, MockPriceEstimating, @@ -795,6 +795,7 @@ mod tests { }, Address, U256 as AlloyU256, + alloy::eips::eip1559::Eip1559Estimation, chrono::Utc, futures::FutureExt, mockall::{Sequence, predicate::eq}, @@ -853,10 +854,9 @@ mod tests { additional_gas: 0, timeout: None, }; - let gas_price = GasPrice1559 { - base_fee_per_gas: 1.5, - max_fee_per_gas: 3.0, - max_priority_fee_per_gas: 0.5, + let gas_price = Eip1559Estimation { + max_fee_per_gas: 2, + max_priority_fee_per_gas: 1, }; let mut price_estimator = MockPriceEstimating::new(); @@ -994,10 +994,9 @@ mod tests { additional_gas: 2, timeout: None, }; - let gas_price = GasPrice1559 { - base_fee_per_gas: 1.5, - max_fee_per_gas: 3.0, - max_priority_fee_per_gas: 0.5, + let gas_price = Eip1559Estimation { + max_fee_per_gas: 2, + max_priority_fee_per_gas: 1, }; let mut price_estimator = MockPriceEstimating::new(); @@ -1130,10 +1129,9 @@ mod tests { additional_gas: 0, timeout: None, }; - let gas_price = GasPrice1559 { - base_fee_per_gas: 1.5, - max_fee_per_gas: 3.0, - max_priority_fee_per_gas: 0.5, + let gas_price = Eip1559Estimation { + max_fee_per_gas: 2, + max_priority_fee_per_gas: 1, }; let mut price_estimator = MockPriceEstimating::new(); @@ -1267,10 +1265,9 @@ mod tests { additional_gas: 0, timeout: None, }; - let gas_price = GasPrice1559 { - base_fee_per_gas: 1., - max_fee_per_gas: 2., - max_priority_fee_per_gas: 0., + let gas_price = Eip1559Estimation { + max_fee_per_gas: 1, + max_priority_fee_per_gas: 0, }; let mut price_estimator = MockPriceEstimating::new(); @@ -1341,10 +1338,9 @@ mod tests { additional_gas: 0, timeout: None, }; - let gas_price = GasPrice1559 { - base_fee_per_gas: 1., - max_fee_per_gas: 2., - max_priority_fee_per_gas: 0., + let gas_price = Eip1559Estimation { + max_fee_per_gas: 2, + max_priority_fee_per_gas: 0, }; let mut price_estimator = MockPriceEstimating::new(); diff --git a/crates/shared/src/price_estimation/competition/quote.rs b/crates/shared/src/price_estimation/competition/quote.rs index b0ef01a748..8c6be7c544 100644 --- a/crates/shared/src/price_estimation/competition/quote.rs +++ b/crates/shared/src/price_estimation/competition/quote.rs @@ -112,8 +112,7 @@ impl PriceRanking { let gas = gas.clone(); let native = native.clone(); let gas = gas - .estimate() - .map_ok(|gas| gas.effective_gas_price()) + .effective_gas_price() .map_err(PriceEstimationError::ProtocolInternal); let (native_price, gas_price) = futures::try_join!( native.estimate_native_price(token.into_alloy(), timeout), @@ -122,7 +121,7 @@ impl PriceRanking { Ok(RankingContext { native_price, - gas_price, + gas_price: gas_price as f64, }) } } @@ -159,14 +158,14 @@ mod tests { use { super::*, crate::{ - gas_price_estimation::{FakeGasPriceEstimator, price::GasPrice1559}, + gas_price_estimation::FakeGasPriceEstimator, price_estimation::{ MockPriceEstimating, QuoteVerificationMode, native::MockNativePriceEstimating, }, }, - alloy::primitives::U256, + alloy::{eips::eip1559::Eip1559Estimation, primitives::U256}, model::order::OrderKind, }; @@ -193,10 +192,9 @@ mod tests { native .expect_estimate_native_price() .returning(move |_, _| async { Ok(0.5) }.boxed()); - let gas = Arc::new(FakeGasPriceEstimator::new(GasPrice1559 { - base_fee_per_gas: 2.0, - max_fee_per_gas: 2.0, - max_priority_fee_per_gas: 2.0, + let gas = Arc::new(FakeGasPriceEstimator::new(Eip1559Estimation { + max_fee_per_gas: 2, + max_priority_fee_per_gas: 2, })); PriceRanking::BestBangForBuck { native: Arc::new(native), From 0519c04f9584e461f059b81eb9709a72e3c4a72a Mon Sep 17 00:00:00 2001 From: ilya Date: Thu, 29 Jan 2026 13:39:44 +0300 Subject: [PATCH 017/219] [TRIVIAL] Add Plasma to OpenApi (#4100) Adds Plasma URLs to the orderbook's OpenAPI. --- crates/orderbook/openapi.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/crates/orderbook/openapi.yml b/crates/orderbook/openapi.yml index 3027e8fbd6..79a07a488a 100644 --- a/crates/orderbook/openapi.yml +++ b/crates/orderbook/openapi.yml @@ -39,6 +39,10 @@ servers: url: "https://api.cow.fi/bnb" - description: BNB (Staging) url: "https://barn.api.cow.fi/bnb" + - description: Plasma (Prod) + url: "https://api.cow.fi/plasma" + - description: Plasma (Staging) + url: "https://barn.api.cow.fi/plasma" - description: Sepolia (Prod) url: "https://api.cow.fi/sepolia" - description: Sepolia (Staging) From 0e65c61b39d18b56c46c38c44872f4c5f2f9390d Mon Sep 17 00:00:00 2001 From: Marcin Szymczak Date: Thu, 29 Jan 2026 12:16:26 +0100 Subject: [PATCH 018/219] Optimize order queries by using true_valid_to (#4104) # Description Reinstates queries from https://github.com/cowprotocol/services/pull/4055 to accelerate live order queries based on the newly introduced `true_valid_to` column and its indexes. Tested on a test-db created by @MartinquaXD which contains a snapshot of prod data. The optimized queries run significantly faster due to changes in `orders` table and new indices. # Changes - [x] Adapt user_orders_with_quote query to use new column - [x] Adapt solvable_orders query to use new column ## How to test Tested on the test database created by @MartinquaXD by analyzing query plan (`EXPLAIN (ANALYZE, BUFFERS)`). In the worst case, the latency has improved 40x (from 20s to 0.5s). --- crates/database/src/orders.rs | 96 +++++++++++++++-------------------- database/README.md | 2 + 2 files changed, 44 insertions(+), 54 deletions(-) diff --git a/crates/database/src/orders.rs b/crates/database/src/orders.rs index d1f77cb052..8da392897a 100644 --- a/crates/database/src/orders.rs +++ b/crates/database/src/orders.rs @@ -721,26 +721,23 @@ pub fn solvable_orders( /// - pending pre-signature /// - ethflow specific invalidation conditions const OPEN_ORDERS: &str = r#" - WITH live_orders AS ( + WITH live_orders AS MATERIALIZED ( SELECT o.* FROM orders o - LEFT JOIN ethflow_orders e ON e.uid = o.uid WHERE o.cancellation_timestamp IS NULL - AND o.valid_to >= $1 - AND (e.valid_to IS NULL OR e.valid_to >= $1) - AND NOT EXISTS (SELECT 1 FROM invalidations i WHERE i.order_uid = o.uid) - AND NOT EXISTS (SELECT 1 FROM onchain_order_invalidations oi WHERE oi.uid = o.uid) - AND NOT EXISTS (SELECT 1 FROM onchain_placed_orders op WHERE op.uid = o.uid - AND op.placement_error IS NOT NULL) + AND o.true_valid_to >= $1 + AND NOT EXISTS (SELECT 1 FROM invalidations i WHERE i.order_uid = o.uid) + AND NOT EXISTS (SELECT 1 FROM onchain_order_invalidations oi WHERE oi.uid = o.uid) + AND NOT EXISTS (SELECT 1 FROM onchain_placed_orders op WHERE op.uid = o.uid AND op.placement_error IS NOT NULL) ), trades_agg AS ( - SELECT t.order_uid, - SUM(t.buy_amount) AS sum_buy, - SUM(t.sell_amount) AS sum_sell, - SUM(t.fee_amount) AS sum_fee - FROM trades t - JOIN live_orders lo ON lo.uid = t.order_uid - GROUP BY t.order_uid + SELECT t.order_uid, + SUM(t.buy_amount) AS sum_buy, + SUM(t.sell_amount) AS sum_sell, + SUM(t.fee_amount) AS sum_fee + FROM trades t + JOIN live_orders lo ON lo.uid = t.order_uid + GROUP BY t.order_uid ) SELECT lo.uid, @@ -960,46 +957,37 @@ pub async fn user_orders_with_quote( owner: &Address, ) -> Result, sqlx::Error> { // Optimized version following the same pattern as OPEN_ORDERS - #[rustfmt::skip] const QUERY: &str = r#" -WITH live_orders AS ( - SELECT o.* - FROM orders o - LEFT JOIN ethflow_orders e ON e.uid = o.uid - WHERE o.cancellation_timestamp IS NULL - AND o.valid_to >= $1 - AND (e.valid_to IS NULL OR e.valid_to >= $1) - AND NOT EXISTS (SELECT 1 FROM invalidations i WHERE i.order_uid = o.uid) - AND NOT EXISTS (SELECT 1 FROM onchain_order_invalidations oi WHERE oi.uid = o.uid) - AND NOT EXISTS (SELECT 1 FROM onchain_placed_orders op WHERE op.uid = o.uid - AND op.placement_error IS NOT NULL) - AND o.owner = $2 - AND o.class = 'limit' -), -trades_agg AS ( - SELECT t.order_uid, - SUM(t.buy_amount) AS sum_buy, - SUM(t.sell_amount) AS sum_sell, - SUM(t.fee_amount) AS sum_fee - FROM trades t - JOIN live_orders lo ON lo.uid = t.order_uid - GROUP BY t.order_uid -) -SELECT - o_quotes.sell_amount as quote_sell_amount, - lo.sell_amount as order_sell_amount, - o_quotes.buy_amount as quote_buy_amount, - lo.buy_amount as order_buy_amount, - lo.kind as order_kind, - o_quotes.gas_amount as quote_gas_amount, - o_quotes.gas_price as quote_gas_price, - o_quotes.sell_token_price as quote_sell_token_price -FROM live_orders lo -LEFT JOIN trades_agg ta ON ta.order_uid = lo.uid -INNER JOIN order_quotes o_quotes ON lo.uid = o_quotes.order_uid -WHERE ((lo.kind = 'sell' AND COALESCE(ta.sum_sell,0) < lo.sell_amount) OR - (lo.kind = 'buy' AND COALESCE(ta.sum_buy ,0) < lo.buy_amount)) -"#; + WITH live_orders AS MATERIALIZED ( + SELECT o.* + FROM orders o + WHERE o.cancellation_timestamp IS NULL + AND o.true_valid_to >= $1 + AND NOT EXISTS (SELECT 1 FROM invalidations i WHERE i.order_uid = o.uid) + AND NOT EXISTS (SELECT 1 FROM onchain_order_invalidations oi WHERE oi.uid = o.uid) + AND NOT EXISTS (SELECT 1 FROM onchain_placed_orders op WHERE op.uid = o.uid AND op.placement_error IS NOT NULL) + AND o.owner = $2 + AND o.class = 'limit' + ) + SELECT + o_quotes.sell_amount AS quote_sell_amount, + lo.sell_amount AS order_sell_amount, + o_quotes.buy_amount AS quote_buy_amount, + lo.buy_amount AS order_buy_amount, + lo.kind AS order_kind, + o_quotes.gas_amount AS quote_gas_amount, + o_quotes.gas_price AS quote_gas_price, + o_quotes.sell_token_price AS quote_sell_token_price + FROM live_orders lo + INNER JOIN order_quotes o_quotes ON lo.uid = o_quotes.order_uid + WHERE ( + lo.kind = 'sell' + AND COALESCE((SELECT SUM(sell_amount) FROM trades WHERE order_uid = lo.uid), 0) < lo.sell_amount + ) OR ( + lo.kind = 'buy' + AND COALESCE((SELECT SUM(buy_amount) FROM trades WHERE order_uid = lo.uid), 0) < lo.buy_amount + ); + "#; sqlx::query_as::<_, OrderWithQuote>(QUERY) .bind(min_valid_to) .bind(owner) diff --git a/database/README.md b/database/README.md index 32819cf1d5..155d6731f7 100644 --- a/database/README.md +++ b/database/README.md @@ -176,6 +176,7 @@ Indexes: - PRIMARY KEY: btree(`uid`) - event\_index: btree(`block_number`, `index`) - order\_sender: hash(sender) +- okay\_onchain\_orders: btree(`uid`) WHERE placement\_error IS NOT NULL ### order\_events @@ -273,6 +274,7 @@ Indexes: - order_sell_buy_tokens: btree(`sell_token`, `buy_token`) - user_order_creation_timestamp: btree(`owner`, `creation_timestamp` DESC) - version_idx: btree(`settlement_contract`) +- orders\_true\_valid\_to: btree(`true_valid_to`) ### fee_policies From 7fb5cc43cfcfb78709117b69a4f831ea95bf6e48 Mon Sep 17 00:00:00 2001 From: ilya Date: Thu, 29 Jan 2026 18:04:25 +0300 Subject: [PATCH 019/219] Fix haircut logic (#4093) # Description The haircut feature had a critical bug where the driver-reported `sell_amount` would exceed the user's signed one. For example: - User signed: sell_amount = 5 ETH - Solver proposed a solution with the same sell amount - Driver reported: sellAmount = 5.25 ETH (with 5% haircut added) - The settlement executed onchain, but autopilot couldn't make sense of it due to the unexpected sell amount - The circuit breaker also detects this and bans the solver # Changes 1. Removed haircut from `sell_amount()` - Now returns only executed + fee, which is the actual amount that left the user's wallet 2. Added `haircut_in_sell_token()` helper - Computes haircut amount converted to sell token 3. Updated `custom_prices()` - Applies haircut only for quotes/scoring purposes, making bids more conservative without affecting reported amounts 4. Added `Jit::custom_prices()` - JIT orders don't have a haircut (for now), so they use simple sell/buy amount derivation ## How to test Adjusted existing and added new tests that fail on the `main` branch, but work with the fix. --- .../src/domain/competition/solution/trade.rs | 64 +++-- crates/driver/src/tests/cases/haircut.rs | 99 +++++++- crates/driver/src/tests/setup/mod.rs | 2 +- crates/e2e/tests/e2e/limit_orders.rs | 239 +++++++++++++++++- 4 files changed, 370 insertions(+), 34 deletions(-) diff --git a/crates/driver/src/domain/competition/solution/trade.rs b/crates/driver/src/domain/competition/solution/trade.rs index 89495728f4..40a97ebf2c 100644 --- a/crates/driver/src/domain/competition/solution/trade.rs +++ b/crates/driver/src/domain/competition/solution/trade.rs @@ -96,10 +96,10 @@ impl Trade { &self, prices: &ClearingPrices, ) -> Result { - Ok(CustomClearingPrices { - sell: self.buy_amount(prices)?.into(), - buy: self.sell_amount(prices)?.into(), - }) + match self { + Trade::Fulfillment(fulfillment) => fulfillment.custom_prices(prices), + Trade::Jit(jit) => jit.custom_prices(prices), + } } pub fn receiver(&self) -> eth::Address { @@ -224,25 +224,8 @@ impl Fulfillment { .ok_or(Math::DivisionByZero)?, }; - // haircut_fee is denominated in the order's target token (sell token for - // sell orders, buy token for buy orders). Convert to sell token for buy - // orders. - let haircut_in_sell_token = match self.order.side { - order::Side::Sell => self.haircut_fee, - order::Side::Buy => self - .haircut_fee - .checked_mul(prices.buy) - .ok_or(Math::Overflow)? - .checked_div(prices.sell) - .ok_or(Math::DivisionByZero)?, - }; - Ok(eth::TokenAmount( - before_fee - .checked_add(self.fee().0) - .ok_or(Math::Overflow)? - .checked_add(haircut_in_sell_token) - .ok_or(Math::Overflow)?, + before_fee.checked_add(self.fee().0).ok_or(Math::Overflow)?, )) } @@ -263,13 +246,36 @@ impl Fulfillment { Ok(eth::TokenAmount(amount)) } + /// Computes the haircut amount in sell token for use in custom_prices(). + /// This applies haircut to pricing while keeping sell_amount() clean for + /// reporting. + fn haircut_in_sell_token(&self, prices: &ClearingPrices) -> Result { + match self.order.side { + order::Side::Sell => Ok(self.haircut_fee), + order::Side::Buy => self + .haircut_fee + .checked_mul(prices.buy) + .ok_or(Math::Overflow)? + .checked_div(prices.sell) + .ok_or(Math::DivisionByZero), + } + } + pub fn custom_prices( &self, prices: &ClearingPrices, ) -> Result { + // Include haircut in custom prices for quotes/scoring. + // This makes bids more conservative without affecting the actual + // reported sell_amount (which is used for user-facing reporting). + let haircut = self.haircut_in_sell_token(prices)?; Ok(CustomClearingPrices { sell: self.buy_amount(prices)?.into(), - buy: self.sell_amount(prices)?.into(), + buy: self + .sell_amount(prices)? + .0 + .checked_add(haircut) + .ok_or(Math::Overflow)?, }) } @@ -509,6 +515,18 @@ impl Jit { }; Ok(eth::TokenAmount(amount)) } + + pub fn custom_prices( + &self, + prices: &ClearingPrices, + ) -> Result { + // JIT orders don't have haircut, so custom prices are simply derived + // from sell_amount and buy_amount. + Ok(CustomClearingPrices { + sell: self.buy_amount(prices)?.into(), + buy: self.sell_amount(prices)?.into(), + }) + } } /// The amounts executed by a trade. diff --git a/crates/driver/src/tests/cases/haircut.rs b/crates/driver/src/tests/cases/haircut.rs index 770a39731e..86093659e9 100644 --- a/crates/driver/src/tests/cases/haircut.rs +++ b/crates/driver/src/tests/cases/haircut.rs @@ -13,9 +13,12 @@ use { number::units::EthUnit, }; -/// Test that haircut correctly reduces the solution score for orders in real -/// auctions. The haircut adjusts clearing prices to report lower output -/// amounts, making the bid more conservative. +/// Test that haircut correctly reduces the solution score for sell orders. +/// The haircut adjusts clearing prices to report lower output amounts, making +/// the bid more conservative. +/// +/// Also verifies that the reported sell amount matches the user's signed +/// sell amount exactly (fill-or-kill requires exact execution). #[tokio::test] #[ignore] async fn order_haircut_reduces_score() { @@ -24,6 +27,7 @@ async fn order_haircut_reduces_score() { // We set a generous buy_amount limit (e.g., 2 B) to create slack let side = order::Side::Sell; let kind = order::Kind::Limit; + let signed_sell_amount = ab_order().sell_amount; // First, get baseline without haircut let test_no_haircut = tests::setup() @@ -77,6 +81,37 @@ async fn order_haircut_reduces_score() { score_no_haircut, percentage ); + + // Verify that reported sell amount matches signed amount exactly. + // Fill-or-kill orders require exact execution. + let solution = solve_with_haircut.solution(); + let orders = solution.get("orders").unwrap().as_object().unwrap(); + for (_uid, order_data) in orders { + let executed_sell = order_data + .get("executedSell") + .and_then(|v| v.as_str()) + .and_then(|s| eth::U256::from_str_radix(s, 10).ok()) + .unwrap(); + let limit_sell = order_data + .get("limitSell") + .and_then(|v| v.as_str()) + .and_then(|s| eth::U256::from_str_radix(s, 10).ok()) + .unwrap(); + + assert!( + executed_sell == signed_sell_amount, + "Sell order: executedSell {} does not match signed sell amount {} (fill-or-kill \ + requires exact execution)", + executed_sell, + signed_sell_amount + ); + assert!( + executed_sell <= limit_sell, + "executedSell {} exceeds limitSell {}", + executed_sell, + limit_sell + ); + } } /// Test that haircut is properly applied for buy orders. @@ -85,11 +120,17 @@ async fn order_haircut_reduces_score() { /// score. Note: The percentage reduction for buy orders differs from sell /// orders because the haircut is applied to the executed buy amount, not /// directly to surplus. +/// +/// Also verifies that: +/// - `executedBuy == signedBuyAmount` (fill-or-kill must execute exactly) +/// - `executedSell <= sellLimit` (don't take more than user's maximum) #[tokio::test] #[ignore] async fn buy_order_haircut() { let side = order::Side::Buy; let kind = order::Kind::Limit; + let signed_buy_amount = 2u64.eth(); + let sell_limit = 100u64.ether().into_wei(); // For buy orders, we need to set a buy_amount that creates enough surplus. // The pool has 100000:6000 ratio. For a buy order wanting 2.97 B, @@ -101,8 +142,8 @@ async fn buy_order_haircut() { ab_order() .side(side) .kind(kind) - .buy_amount(2u64.eth()) // Target buy amount - .sell_amount(100u64.ether().into_wei()) // Generous sell limit creates surplus + .buy_amount(signed_buy_amount) // Target buy amount (what user signs for) + .sell_amount(sell_limit) // Generous sell limit creates surplus .solver_fee(Some(eth::U256::from(100))), ) .solution(ab_solution()) @@ -120,8 +161,8 @@ async fn buy_order_haircut() { ab_order() .side(side) .kind(kind) - .buy_amount(2u64.eth()) // Same target buy amount - .sell_amount(100u64.ether().into_wei()) // Same generous sell limit + .buy_amount(signed_buy_amount) // Same target buy amount + .sell_amount(sell_limit) // Same generous sell limit .solver_fee(Some(eth::U256::from(100))), ) .solution(ab_solution()) @@ -152,4 +193,48 @@ async fn buy_order_haircut() { score_no_haircut, percentage ); + + // Verify buy order constraints: + // - Fill-or-kill must execute exactly (executedBuy == signedBuyAmount) + // - Don't take more than user's maximum (executedSell <= sellLimit) + let solution = solve_with_haircut.solution(); + let orders = solution.get("orders").unwrap().as_object().unwrap(); + for (_uid, order_data) in orders { + let executed_sell = order_data + .get("executedSell") + .and_then(|v| v.as_str()) + .and_then(|s| eth::U256::from_str_radix(s, 10).ok()) + .unwrap(); + let executed_buy = order_data + .get("executedBuy") + .and_then(|v| v.as_str()) + .and_then(|s| eth::U256::from_str_radix(s, 10).ok()) + .unwrap(); + let limit_sell = order_data + .get("limitSell") + .and_then(|v| v.as_str()) + .and_then(|s| eth::U256::from_str_radix(s, 10).ok()) + .unwrap(); + + assert!( + executed_buy == signed_buy_amount, + "Buy order: executedBuy {} does not match signed buy amount {} (fill-or-kill requires \ + exact execution)", + executed_buy, + signed_buy_amount + ); + assert!( + executed_sell <= sell_limit, + "Buy order: executedSell {} exceeds sell limit {}. Haircut should reduce surplus, not \ + inflate sell amount!", + executed_sell, + sell_limit + ); + assert!( + executed_sell <= limit_sell, + "executedSell {} exceeds limitSell {}", + executed_sell, + limit_sell + ); + } } diff --git a/crates/driver/src/tests/setup/mod.rs b/crates/driver/src/tests/setup/mod.rs index 09f62f2ca0..e336fe7945 100644 --- a/crates/driver/src/tests/setup/mod.rs +++ b/crates/driver/src/tests/setup/mod.rs @@ -1266,7 +1266,7 @@ impl SolveOk<'_> { /// Extracts the first solution from the response. This is expected to be /// always valid if there is a valid solution, as we expect from driver to /// not send multiple solutions (yet). - fn solution(&self) -> serde_json::Value { + pub fn solution(&self) -> serde_json::Value { let solutions = self.solutions(); assert_eq!(solutions.len(), 1); let solution = solutions[0].clone(); diff --git a/crates/e2e/tests/e2e/limit_orders.rs b/crates/e2e/tests/e2e/limit_orders.rs index ca278b097d..ed71e6e609 100644 --- a/crates/e2e/tests/e2e/limit_orders.rs +++ b/crates/e2e/tests/e2e/limit_orders.rs @@ -57,13 +57,23 @@ async fn local_node_no_liquidity_limit_order() { run_test(no_liquidity_limit_order).await; } -/// Test that orders with haircut configured still execute on-chain. +/// Test that sell orders with haircut configured still execute on-chain. /// The haircut reduces the reported surplus but the order should still be /// fillable and execute successfully. #[tokio::test] #[ignore] async fn local_node_limit_order_with_haircut() { - run_test(limit_order_with_haircut_test).await; + run_test(sell_order_with_haircut_test).await; +} + +/// Test that buy orders with haircut configured still execute on-chain. +/// For buy orders, verifies that: +/// - executedBuy >= signedBuyAmount (user gets at least what they signed for) +/// - executedSell <= sellLimit (don't take more than user's maximum) +#[tokio::test] +#[ignore] +async fn local_node_buy_order_with_haircut() { + run_test(buy_order_with_haircut_test).await; } /// The block number from which we will fetch state for the forked tests. @@ -1175,7 +1185,7 @@ async fn no_liquidity_limit_order(web3: Web3) { /// Test that a limit order with haircut configured still executes on-chain. /// The haircut adjusts clearing prices to report lower surplus, but the order /// should still be fillable since the limit price allows for enough slack. -async fn limit_order_with_haircut_test(web3: Web3) { +async fn sell_order_with_haircut_test(web3: Web3) { let mut onchain = OnchainComponents::deploy(web3.clone()).await; let [solver] = onchain.make_solvers(1u64.eth()).await; @@ -1322,4 +1332,227 @@ async fn limit_order_with_haircut_test(web3: Web3) { "Trader should have received between 9 and 9.5 ETH (AMM output minus haircut), but got {}", trader_received ); + + // Wait for solver competition data to be indexed + tracing::info!("Waiting for solver competition to be indexed"); + let indexed = || async { + onchain.mint_block().await; + match services.get_trades(&order_id).await.unwrap().first() { + Some(trade) => services + .get_solver_competition(trade.tx_hash.unwrap()) + .await + .is_ok(), + None => false, + } + }; + wait_for_condition(TIMEOUT, indexed).await.unwrap(); + + let trades = services.get_trades(&order_id).await.unwrap(); + let tx_hash = trades[0].tx_hash.unwrap(); + let competition = services.get_solver_competition(tx_hash).await.unwrap(); + + // Find our order in the winning solution + let winner = competition + .solutions + .iter() + .find(|s| s.is_winner) + .expect("Should have winning solution"); + + let reported_order = winner + .orders + .iter() + .find(|o| o.id == order_id) + .expect("Order should be in solution"); + + let signed_sell_amount = U256::from(order.sell_amount); + let reported_sell_amount = reported_order.sell_amount; + + assert!( + reported_sell_amount <= signed_sell_amount, + "Driver reported sell_amount {} exceeds signed sell_amount {}. Haircut should reduce \ + surplus/score, not inflate the reported sell amount!", + reported_sell_amount, + signed_sell_amount + ); +} + +/// Test that a buy order with haircut configured executes correctly. +/// For buy orders, the user signs for a specific buy_amount they want to +/// receive, and sell_amount is the maximum they're willing to pay. +/// Verifies that reported amounts respect these constraints. +async fn buy_order_with_haircut_test(web3: Web3) { + let mut onchain = OnchainComponents::deploy(web3.clone()).await; + + let [solver] = onchain.make_solvers(1u64.eth()).await; + let [trader] = onchain.make_accounts(1u64.eth()).await; + let [token_a, token_b] = onchain + .deploy_tokens_with_weth_uni_v2_pools(1_000u64.eth(), 1_000u64.eth()) + .await; + + // Create and fund Uniswap pool (1:1 ratio) + token_a.mint(solver.address(), 1000u64.eth()).await; + token_b.mint(solver.address(), 1000u64.eth()).await; + onchain + .contracts() + .uniswap_v2_factory + .createPair(*token_a.address(), *token_b.address()) + .from(solver.address()) + .send_and_watch() + .await + .unwrap(); + + token_a + .approve( + *onchain.contracts().uniswap_v2_router.address(), + 1000u64.eth(), + ) + .from(solver.address()) + .send_and_watch() + .await + .unwrap(); + + token_b + .approve( + *onchain.contracts().uniswap_v2_router.address(), + 1000u64.eth(), + ) + .from(solver.address()) + .send_and_watch() + .await + .unwrap(); + + onchain + .contracts() + .uniswap_v2_router + .addLiquidity( + *token_a.address(), + *token_b.address(), + 1000u64.eth(), + 1000u64.eth(), + U256::ZERO, + U256::ZERO, + solver.address(), + U256::MAX, + ) + .from(solver.address()) + .send_and_watch() + .await + .unwrap(); + + // Fund trader with token_a for selling + token_a.mint(trader.address(), 100u64.eth()).await; + token_a + .approve(onchain.contracts().allowance, 100u64.eth()) + .from(trader.address()) + .send_and_watch() + .await + .unwrap(); + + // Start protocol with 500 bps (5%) haircut + let services = Services::new(&onchain).await; + services + .start_protocol_with_args_and_haircut(Default::default(), solver, 500) + .await; + + // Create BUY order: want to buy 5 token_b, willing to sell up to 10 token_a. + // At 1:1 ratio (with ~0.3% AMM fee), we'd need ~5.04 token_a. + // We use a generous sell_limit to ensure the order executes, then verify + // that the driver's reported sell_amount doesn't exceed reasonable bounds. + let signed_buy_amount = 5u64.eth(); + let sell_limit = 10u64.eth(); // Generous limit to ensure execution + let order = OrderCreation { + sell_token: *token_a.address(), + sell_amount: sell_limit, + buy_token: *token_b.address(), + buy_amount: signed_buy_amount, + valid_to: model::time::now_in_epoch_seconds() + 300, + kind: OrderKind::Buy, + ..Default::default() + } + .sign( + EcdsaSigningScheme::Eip712, + &onchain.contracts().domain_separator, + &trader.signer, + ); + let order_id = services.create_order(&order).await.unwrap(); + + onchain.mint_block().await; + let limit_order = services.get_order(&order_id).await.unwrap(); + assert_eq!(limit_order.metadata.class, OrderClass::Limit); + + // Wait for trade to execute + tracing::info!("Waiting for buy order trade with haircut."); + let trader_b_balance_before = token_b.balanceOf(trader.address()).call().await.unwrap(); + wait_for_condition(TIMEOUT, || async { + let balance_after = token_b.balanceOf(trader.address()).call().await.unwrap(); + balance_after > trader_b_balance_before + }) + .await + .unwrap(); + + // Wait for solver competition data to be indexed + tracing::info!("Waiting for solver competition to be indexed"); + let indexed = || async { + onchain.mint_block().await; + match services.get_trades(&order_id).await.unwrap().first() { + Some(trade) => services + .get_solver_competition(trade.tx_hash.unwrap()) + .await + .is_ok(), + None => false, + } + }; + wait_for_condition(TIMEOUT, indexed).await.unwrap(); + + let trades = services.get_trades(&order_id).await.unwrap(); + let tx_hash = trades[0].tx_hash.unwrap(); + let competition = services.get_solver_competition(tx_hash).await.unwrap(); + + // Find our order in the winning solution + let winner = competition + .solutions + .iter() + .find(|s| s.is_winner) + .expect("Should have winning solution"); + + let reported_order = winner + .orders + .iter() + .find(|o| o.id == order_id) + .expect("Order should be in solution"); + + let signed_buy_amount_u256 = U256::from(signed_buy_amount); + let sell_limit_u256 = U256::from(sell_limit); + let reported_sell_amount = reported_order.sell_amount; + let reported_buy_amount = reported_order.buy_amount; + + // For buy orders: + // 1. User should get at least what they signed for + assert!( + reported_buy_amount >= signed_buy_amount_u256, + "Buy order: reported buy_amount {} is less than signed buy_amount {}", + reported_buy_amount, + signed_buy_amount_u256 + ); + + // 2. Don't take more than user's maximum + assert!( + reported_sell_amount <= sell_limit_u256, + "Driver reported sell_amount {} exceeds sell limit {}", + reported_sell_amount, + sell_limit_u256 + ); + + // 3. Reported sell_amount should be close to what's actually needed (~5.04 ETH + // for buying 5 ETH at 1:1 with 0.3% fee). + // We check that sell_amount is less than 5.2 ETH (5.0 ETH + 5% haircut = 5.25 + // ETH). + let reasonable_max_sell = U256::from(5_200_000_000_000_000_000u128); // 5.2 ETH + assert!( + reported_sell_amount <= reasonable_max_sell, + "Driver reported sell_amount {} exceeds expected max {} (actual needed is ~5.04 ETH). \ + Haircut should reduce surplus/score, not inflate the reported sell amount!", + reported_sell_amount, + reasonable_max_sell + ); } From d63e52f480ed6273234c9b4fbfe13605c71be54f Mon Sep 17 00:00:00 2001 From: ilya Date: Fri, 30 Jan 2026 15:55:55 +0300 Subject: [PATCH 020/219] [TRIVIAL] Drop the db solver participation guard (#4099) # Description Cleans up the codebase by removing the DB solver participation guard. It's been used in a log-only mode for a while. Given the lack of demand for this functionality, it doesn't make sense to keep it. Also, even if it were decided to enable it, the logic would need to be reworked to cover some edge cases, which would take some time to implement. --- crates/autopilot/src/arguments.rs | 151 +----- .../autopilot/src/domain/competition/mod.rs | 6 +- .../competition/participation_guard/db.rs | 210 -------- .../competition/participation_guard/mod.rs | 67 --- .../participation_guard/onchain.rs | 20 - .../domain/competition/winner_selection.rs | 1 - crates/autopilot/src/domain/mod.rs | 5 - crates/autopilot/src/infra/mod.rs | 2 +- crates/autopilot/src/infra/persistence/mod.rs | 57 --- crates/autopilot/src/infra/solvers/dto/mod.rs | 1 - .../autopilot/src/infra/solvers/dto/notify.rs | 34 -- crates/autopilot/src/infra/solvers/mod.rs | 27 +- crates/autopilot/src/run.rs | 17 +- crates/autopilot/src/run_loop.rs | 37 +- crates/database/src/solver_competition_v2.rs | 454 ------------------ crates/driver/src/infra/api/mod.rs | 1 - crates/driver/src/infra/api/routes/mod.rs | 2 - .../src/infra/api/routes/notify/dto/mod.rs | 3 - .../api/routes/notify/dto/notify_request.rs | 42 -- .../driver/src/infra/api/routes/notify/mod.rs | 17 - crates/driver/src/infra/notify/mod.rs | 9 +- .../driver/src/infra/notify/notification.rs | 14 - .../src/infra/solver/dto/notification.rs | 11 - crates/e2e/src/setup/services.rs | 2 - crates/e2e/tests/e2e/main.rs | 1 - .../tests/e2e/solver_participation_guard.rs | 359 -------------- crates/solvers-dto/src/notification.rs | 12 - 27 files changed, 15 insertions(+), 1547 deletions(-) delete mode 100644 crates/autopilot/src/domain/competition/participation_guard/db.rs delete mode 100644 crates/autopilot/src/domain/competition/participation_guard/mod.rs delete mode 100644 crates/autopilot/src/domain/competition/participation_guard/onchain.rs delete mode 100644 crates/autopilot/src/infra/solvers/dto/notify.rs delete mode 100644 crates/driver/src/infra/api/routes/notify/dto/mod.rs delete mode 100644 crates/driver/src/infra/api/routes/notify/dto/notify_request.rs delete mode 100644 crates/driver/src/infra/api/routes/notify/mod.rs delete mode 100644 crates/e2e/tests/e2e/solver_participation_guard.rs diff --git a/crates/autopilot/src/arguments.rs b/crates/autopilot/src/arguments.rs index 1d0b89897d..a6cb1cf8ca 100644 --- a/crates/autopilot/src/arguments.rs +++ b/crates/autopilot/src/arguments.rs @@ -248,10 +248,6 @@ pub struct Arguments { #[clap(long, env)] pub archive_node_url: Option, - /// Configuration for the solver participation guard. - #[clap(flatten)] - pub db_based_solver_participation_guard: DbBasedSolverParticipationGuardConfig, - /// Configures whether the autopilot filters out orders with insufficient /// balances. #[clap(long, env, default_value = "false", action = clap::ArgAction::Set)] @@ -282,89 +278,6 @@ pub struct Arguments { pub max_maintenance_timeout: Duration, } -#[derive(Debug, clap::Parser)] -pub struct DbBasedSolverParticipationGuardConfig { - /// Enables or disables the solver participation guard - #[clap( - id = "db_enabled", - long = "db-based-solver-participation-guard-enabled", - env = "DB_BASED_SOLVER_PARTICIPATION_GUARD_ENABLED", - default_value = "true" - )] - pub enabled: bool, - - /// Sets the duration for which the solver remains blacklisted. - /// Technically, the time-to-live for the solver participation blacklist - /// cache. - #[clap(long, env, default_value = "5m", value_parser = humantime::parse_duration)] - pub solver_blacklist_cache_ttl: Duration, - - #[clap(flatten)] - pub non_settling_solvers_finder_config: NonSettlingSolversFinderConfig, - - #[clap(flatten)] - pub low_settling_solvers_finder_config: LowSettlingSolversFinderConfig, -} - -#[derive(Debug, clap::Parser)] -pub struct NonSettlingSolversFinderConfig { - /// Enables search of non-settling solvers. - #[clap( - id = "non_settling_solvers_blacklisting_enabled", - long = "non-settling-solvers-blacklisting-enabled", - env = "NON_SETTLING_SOLVERS_BLACKLISTING_ENABLED", - default_value = "true", - action = clap::ArgAction::Set, - )] - pub enabled: bool, - - /// The number of last auctions to check solver participation eligibility. - #[clap( - id = "non_settling_last_auctions_participation_count", - long = "non-settling-last-auctions-participation-count", - env = "NON_SETTLING_LAST_AUCTIONS_PARTICIPATION_COUNT", - default_value = "3" - )] - pub last_auctions_participation_count: u32, -} - -#[derive(Debug, clap::Parser)] -pub struct LowSettlingSolversFinderConfig { - /// Enables search of non-settling solvers. - #[clap( - id = "low_settling_solvers_blacklisting_enabled", - long = "low-settling-solvers-blacklisting-enabled", - env = "LOW_SETTLING_SOLVERS_BLACKLISTING_ENABLED", - default_value = "true", - action = clap::ArgAction::Set, - )] - pub enabled: bool, - - /// The number of last auctions to check solver participation eligibility. - #[clap( - id = "low_settling_last_auctions_participation_count", - long = "low-settling-last-auctions-participation-count", - env = "LOW_SETTLING_LAST_AUCTIONS_PARTICIPATION_COUNT", - default_value = "100" - )] - pub last_auctions_participation_count: u32, - - /// The minimum number of winning solutions to start considering the solver. - #[clap( - id = "low_settling_min_wins_threshold", - long = "low-settling-min-wins-threshold", - env = "LOW_SETTLING_MIN_WINS_THRESHOLD", - default_value = "3" - )] - pub min_wins_threshold: u32, - - /// A max failure rate for a solver to remain eligible for - /// participation in the competition. Otherwise, the solver will be - /// banned. - #[clap(long, env, default_value = "0.9")] - pub solver_max_settlement_failure_rate: f64, -} - impl std::fmt::Display for Arguments { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let Self { @@ -409,7 +322,6 @@ impl std::fmt::Display for Arguments { max_winners_per_auction, archive_node_url, max_solutions_per_solver, - db_based_solver_participation_guard, disable_order_balance_filter, disable_1271_order_balance_filter, disable_1271_order_sig_filter, @@ -482,10 +394,6 @@ impl std::fmt::Display for Arguments { writeln!(f, "max_winners_per_auction: {max_winners_per_auction:?}")?; writeln!(f, "archive_node_url: {archive_node_url:?}")?; writeln!(f, "max_solutions_per_solver: {max_solutions_per_solver:?}")?; - writeln!( - f, - "db_based_solver_participation_guard: {db_based_solver_participation_guard:?}" - )?; writeln!( f, "disable_order_balance_filter: {disable_order_balance_filter}" @@ -511,7 +419,6 @@ pub struct Solver { pub url: Url, pub submission_account: Account, pub fairness_threshold: Option, - pub requested_timeout_on_problems: bool, } #[derive(Debug, Clone, PartialEq, Eq, Hash)] @@ -561,31 +468,15 @@ impl FromStr for Solver { } }; - let mut fairness_threshold: Option = Default::default(); - let mut requested_timeout_on_problems = false; - - if let Some(value) = parts.get(3) { - match U256::from_str_radix(value, 10) { - Ok(parsed_fairness_threshold) => { - fairness_threshold = Some(parsed_fairness_threshold); - } - Err(_) => { - requested_timeout_on_problems = - value.to_lowercase() == "requested-timeout-on-problems"; - } - } - }; - - if let Some(value) = parts.get(4) { - requested_timeout_on_problems = value.to_lowercase() == "requested-timeout-on-problems"; - } + let fairness_threshold = parts + .get(3) + .and_then(|value| U256::from_str_radix(value, 10).ok()); Ok(Self { name: name.to_owned(), url, fairness_threshold, submission_account, - requested_timeout_on_problems, }) } } @@ -818,7 +709,6 @@ mod test { name: "name1".into(), url: Url::parse("http://localhost:8080").unwrap(), fairness_threshold: None, - requested_timeout_on_problems: false, submission_account: Account::Address(address!( "C02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2" )), @@ -834,7 +724,6 @@ mod test { name: "name1".into(), url: Url::parse("http://localhost:8080").unwrap(), fairness_threshold: None, - requested_timeout_on_problems: false, submission_account: Account::Kms( Arn::from_str("arn:aws:kms:supersecretstuff").unwrap(), ), @@ -853,40 +742,6 @@ mod test { "C02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2" )), fairness_threshold: Some(U256::from(10).pow(U256::from(18))), - requested_timeout_on_problems: false, - }; - assert_eq!(driver, expected); - } - - #[test] - fn parse_driver_with_accepts_unsettled_blocking_flag() { - let argument = - "name1|http://localhost:8080|0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2|requested-timeout-on-problems"; - let driver = Solver::from_str(argument).unwrap(); - let expected = Solver { - name: "name1".into(), - url: Url::parse("http://localhost:8080").unwrap(), - submission_account: Account::Address(address!( - "C02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2" - )), - fairness_threshold: None, - requested_timeout_on_problems: true, - }; - assert_eq!(driver, expected); - } - - #[test] - fn parse_driver_with_threshold_and_accepts_unsettled_blocking_flag() { - let argument = "name1|http://localhost:8080|0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2|1000000000000000000|requested-timeout-on-problems"; - let driver = Solver::from_str(argument).unwrap(); - let expected = Solver { - name: "name1".into(), - url: Url::parse("http://localhost:8080").unwrap(), - submission_account: Account::Address(address!( - "C02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2" - )), - fairness_threshold: Some(U256::from(10).pow(U256::from(18))), - requested_timeout_on_problems: true, }; assert_eq!(driver, expected); } diff --git a/crates/autopilot/src/domain/competition/mod.rs b/crates/autopilot/src/domain/competition/mod.rs index a5ea589a9c..0a3a25e51a 100644 --- a/crates/autopilot/src/domain/competition/mod.rs +++ b/crates/autopilot/src/domain/competition/mod.rs @@ -8,13 +8,9 @@ use { }; mod bid; -mod participation_guard; pub mod winner_selection; -pub use { - bid::{Bid, RankType, Ranked, Scored, Unscored}, - participation_guard::SolverParticipationGuard, -}; +pub use bid::{Bid, RankType, Ranked, Scored, Unscored}; type SolutionId = u64; diff --git a/crates/autopilot/src/domain/competition/participation_guard/db.rs b/crates/autopilot/src/domain/competition/participation_guard/db.rs deleted file mode 100644 index 4c7b0ba53b..0000000000 --- a/crates/autopilot/src/domain/competition/participation_guard/db.rs +++ /dev/null @@ -1,210 +0,0 @@ -use { - crate::{ - arguments::{ - DbBasedSolverParticipationGuardConfig, - LowSettlingSolversFinderConfig, - NonSettlingSolversFinderConfig, - }, - domain::{Metrics, eth}, - infra::{self, solvers::dto}, - }, - chrono::{DateTime, Utc}, - ethrpc::block_stream::CurrentBlockWatcher, - std::{ - collections::{HashMap, HashSet}, - sync::Arc, - time::{Duration, Instant}, - }, - tokio::join, -}; - -/// Checks the DB by searching for solvers that won N last consecutive auctions -/// and either never settled any of them or their settlement success rate is -/// lower than `min_settlement_success_rate`. -#[derive(Clone)] -pub(super) struct SolverValidator(Arc); - -struct Inner { - persistence: infra::Persistence, - banned_solvers: dashmap::DashMap, - ttl: Duration, - non_settling_config: NonSettlingSolversFinderConfig, - low_settling_config: LowSettlingSolversFinderConfig, - drivers_by_address: HashMap>, -} - -impl SolverValidator { - pub fn new( - persistence: infra::Persistence, - current_block: CurrentBlockWatcher, - competition_updates_receiver: tokio::sync::mpsc::UnboundedReceiver<()>, - db_based_validator_config: DbBasedSolverParticipationGuardConfig, - drivers_by_address: HashMap>, - ) -> Self { - let self_ = Self(Arc::new(Inner { - persistence, - banned_solvers: Default::default(), - ttl: db_based_validator_config.solver_blacklist_cache_ttl, - non_settling_config: db_based_validator_config.non_settling_solvers_finder_config, - low_settling_config: db_based_validator_config.low_settling_solvers_finder_config, - drivers_by_address, - })); - - self_.start_maintenance(competition_updates_receiver, current_block); - - self_ - } - - /// Update the internal cache only once the competition auctions table is - /// updated to avoid redundant DB queries on each block or any other - /// timeout. - fn start_maintenance( - &self, - mut competition_updates_receiver: tokio::sync::mpsc::UnboundedReceiver<()>, - current_block: CurrentBlockWatcher, - ) { - let self_ = self.clone(); - tokio::spawn(async move { - while competition_updates_receiver.recv().await.is_some() { - let current_block = current_block.borrow().number; - - let (non_settling_solvers, mut low_settling_solvers) = join!( - self_.find_non_settling_solvers(current_block), - self_.find_low_settling_solvers(current_block) - ); - // Non-settling issue has a higher priority, remove duplicates from low-settling - // solvers. - low_settling_solvers.retain(|solver| !non_settling_solvers.contains(solver)); - - let found_at = Instant::now(); - let banned_until = Utc::now() + self_.0.ttl; - - self_.post_process( - &non_settling_solvers, - dto::notify::BanReason::UnsettledConsecutiveAuctions, - found_at, - current_block, - banned_until, - ); - self_.post_process( - &low_settling_solvers, - dto::notify::BanReason::HighSettleFailureRate, - found_at, - current_block, - banned_until, - ); - } - tracing::error!("stream of settlement updates terminated unexpectedly"); - }); - } - - async fn find_non_settling_solvers(&self, current_block: u64) -> HashSet { - if !self.0.non_settling_config.enabled { - return Default::default(); - } - - match self - .0 - .persistence - .find_non_settling_solvers( - self.0.non_settling_config.last_auctions_participation_count, - current_block, - ) - .await - { - Ok(solvers) => solvers.into_iter().collect(), - Err(err) => { - tracing::warn!(?err, "error while searching for non-settling solvers"); - Default::default() - } - } - } - - async fn find_low_settling_solvers(&self, current_block: u64) -> HashSet { - if !self.0.low_settling_config.enabled { - return Default::default(); - } - - match self - .0 - .persistence - .find_low_settling_solvers( - self.0.low_settling_config.last_auctions_participation_count, - current_block, - self.0 - .low_settling_config - .solver_max_settlement_failure_rate, - self.0.low_settling_config.min_wins_threshold, - ) - .await - { - Ok(solvers) => solvers.into_iter().collect(), - Err(err) => { - tracing::warn!(?err, "error while searching for low-settling solvers"); - Default::default() - } - } - } - - /// Updates the cache and notifies the solvers. - fn post_process( - &self, - solvers: &HashSet, - ban_reason: dto::notify::BanReason, - found_at_timestamp: Instant, - found_at_block: u64, - banned_until: DateTime, - ) { - let non_settling_solver_names: Vec<&str> = solvers - .iter() - .filter_map(|solver| self.0.drivers_by_address.get(solver)) - .map(|driver| { - Metrics::get() - .banned_solver - .with_label_values(&[driver.name.as_ref(), ban_reason.as_str()]) - .inc(); - // Check if solver accepted this feature. This should be removed once the - // CIP making this mandatory has been approved. - if driver.requested_timeout_on_problems { - let is_absent_or_expired = self - .0 - .banned_solvers - .get(&driver.submission_address) - .is_none_or(|entry| entry.elapsed() >= self.0.ttl); - // The solver should try again once the cache is expired. - if is_absent_or_expired { - tracing::debug!(solver = ?driver.name, "disabling solver temporarily"); - infra::notify_banned_solver(driver.clone(), ban_reason, banned_until); - self.0 - .banned_solvers - .insert(driver.submission_address, found_at_timestamp); - } - } - driver.name.as_ref() - }) - .collect(); - - if non_settling_solver_names.is_empty() { - return; - } - - let log_message = match ban_reason { - dto::notify::BanReason::UnsettledConsecutiveAuctions => "found non-settling solvers", - dto::notify::BanReason::HighSettleFailureRate => { - "found high-failure-settlement solvers" - } - }; - tracing::debug!(solvers = ?non_settling_solver_names, ?found_at_block, log_message); - } -} - -#[async_trait::async_trait] -impl super::SolverValidator for SolverValidator { - async fn is_allowed(&self, solver: ð::Address) -> anyhow::Result { - if let Some(entry) = self.0.banned_solvers.get(solver) { - return Ok(entry.elapsed() >= self.0.ttl); - } - - Ok(true) - } -} diff --git a/crates/autopilot/src/domain/competition/participation_guard/mod.rs b/crates/autopilot/src/domain/competition/participation_guard/mod.rs deleted file mode 100644 index dcff407581..0000000000 --- a/crates/autopilot/src/domain/competition/participation_guard/mod.rs +++ /dev/null @@ -1,67 +0,0 @@ -mod db; -mod onchain; - -use { - crate::{arguments::DbBasedSolverParticipationGuardConfig, domain::eth, infra}, - std::sync::Arc, -}; - -/// This struct checks whether a solver can participate in the competition by -/// using different validators. -#[derive(Clone)] -pub struct SolverParticipationGuard(Arc); - -struct Inner { - /// Stores the validators in order they will be called. - validators: Vec>, -} - -impl SolverParticipationGuard { - pub fn new( - eth: infra::Ethereum, - persistence: infra::Persistence, - competition_updates_receiver: tokio::sync::mpsc::UnboundedReceiver<()>, - db_based_validator_config: DbBasedSolverParticipationGuardConfig, - drivers: impl IntoIterator>, - ) -> Self { - let mut validators: Vec> = Vec::new(); - - let current_block = eth.current_block().clone(); - let database_solver_participation_validator = db::SolverValidator::new( - persistence, - current_block, - competition_updates_receiver, - db_based_validator_config, - drivers - .into_iter() - .map(|driver| (driver.submission_address, driver.clone())) - .collect(), - ); - validators.push(Box::new(database_solver_participation_validator)); - - let onchain_solver_participation_validator = onchain::Validator { eth }; - validators.push(Box::new(onchain_solver_participation_validator)); - - Self(Arc::new(Inner { validators })) - } - - /// Checks if a solver can participate in the competition. - /// Sequentially asks internal validators to avoid redundant RPC calls in - /// the following order: - /// 1. DB-based validator: operates fast since it uses in-memory cache. - /// 2. Onchain-based validator: only then calls the Authenticator contract. - pub async fn can_participate(&self, solver: ð::Address) -> anyhow::Result { - for validator in &self.0.validators { - if !validator.is_allowed(solver).await? { - return Ok(false); - } - } - - Ok(true) - } -} - -#[async_trait::async_trait] -trait SolverValidator: Send + Sync { - async fn is_allowed(&self, solver: ð::Address) -> anyhow::Result; -} diff --git a/crates/autopilot/src/domain/competition/participation_guard/onchain.rs b/crates/autopilot/src/domain/competition/participation_guard/onchain.rs deleted file mode 100644 index 70ef3b16f2..0000000000 --- a/crates/autopilot/src/domain/competition/participation_guard/onchain.rs +++ /dev/null @@ -1,20 +0,0 @@ -use crate::{domain::eth, infra}; - -/// Calls Authenticator contract to check if a solver has a sufficient -/// permission. -pub(super) struct Validator { - pub eth: infra::Ethereum, -} - -#[async_trait::async_trait] -impl super::SolverValidator for Validator { - async fn is_allowed(&self, solver: ð::Address) -> anyhow::Result { - Ok(self - .eth - .contracts() - .authenticator() - .isSolver(*solver) - .call() - .await?) - } -} diff --git a/crates/autopilot/src/domain/competition/winner_selection.rs b/crates/autopilot/src/domain/competition/winner_selection.rs index 976692e06b..2a5b6f12dd 100644 --- a/crates/autopilot/src/domain/competition/winner_selection.rs +++ b/crates/autopilot/src/domain/competition/winner_selection.rs @@ -1219,7 +1219,6 @@ mod tests { solver_address.to_string(), None, crate::arguments::Account::Address(solver_address), - false, ) .await .unwrap(); diff --git a/crates/autopilot/src/domain/mod.rs b/crates/autopilot/src/domain/mod.rs index df25fa082b..deb7d0da9c 100644 --- a/crates/autopilot/src/domain/mod.rs +++ b/crates/autopilot/src/domain/mod.rs @@ -18,11 +18,6 @@ pub use { #[derive(prometheus_metric_storage::MetricStorage)] #[metric(subsystem = "domain")] pub struct Metrics { - /// How many times the solver marked as non-settling based on the database - /// statistics. - #[metric(labels("solver", "reason"))] - pub banned_solver: prometheus::IntCounterVec, - /// Tracks settlements that couldn't be matched to the database solutions. #[metric(labels("solver_address"))] pub inconsistent_settlements: prometheus::IntCounterVec, diff --git a/crates/autopilot/src/infra/mod.rs b/crates/autopilot/src/infra/mod.rs index 11c2d58857..4d34b214b7 100644 --- a/crates/autopilot/src/infra/mod.rs +++ b/crates/autopilot/src/infra/mod.rs @@ -8,5 +8,5 @@ pub use { blockchain::Ethereum, order_validation::banned, persistence::Persistence, - solvers::{Driver, notify_banned_solver}, + solvers::Driver, }; diff --git a/crates/autopilot/src/infra/persistence/mod.rs b/crates/autopilot/src/infra/persistence/mod.rs index b816358fda..c6fcf7e4d7 100644 --- a/crates/autopilot/src/infra/persistence/mod.rs +++ b/crates/autopilot/src/infra/persistence/mod.rs @@ -946,63 +946,6 @@ impl Persistence { Ok(()) } - /// Finds solvers that won `last_auctions_count` consecutive auctions but - /// never settled any of them. The current block is used to prevent - /// selecting auctions with deadline after the current block since they - /// still can be settled. - pub async fn find_non_settling_solvers( - &self, - last_auctions_count: u32, - current_block: u64, - ) -> anyhow::Result> { - let mut ex = self.postgres.pool.acquire().await.context("acquire")?; - let _timer = Metrics::get() - .database_queries - .with_label_values(&["find_non_settling_solvers"]) - .start_timer(); - - Ok(database::solver_competition_v2::find_non_settling_solvers( - &mut ex, - last_auctions_count, - current_block, - ) - .await - .context("failed to fetch non-settling solvers")? - .into_iter() - .map(|solver| eth::Address(solver.0.into())) - .collect()) - } - - /// Finds solvers that have a failure settling rate above the given - /// ratio. The current block is used to prevent selecting auctions with - /// deadline after the current block since they still can be settled. - pub async fn find_low_settling_solvers( - &self, - last_auctions_count: u32, - current_block: u64, - max_failure_rate: f64, - min_wins_threshold: u32, - ) -> anyhow::Result> { - let mut ex = self.postgres.pool.acquire().await.context("acquire")?; - let _timer = Metrics::get() - .database_queries - .with_label_values(&["find_low_settling_solvers"]) - .start_timer(); - - Ok(database::solver_competition_v2::find_low_settling_solvers( - &mut ex, - last_auctions_count, - current_block, - max_failure_rate, - min_wins_threshold, - ) - .await - .context("solver_competition::find_low_settling_solvers")? - .into_iter() - .map(|solver| eth::Address(solver.0.into())) - .collect()) - } - pub async fn get_solver_winning_solutions( &self, auction_id: domain::auction::Id, diff --git a/crates/autopilot/src/infra/solvers/dto/mod.rs b/crates/autopilot/src/infra/solvers/dto/mod.rs index 5365700ce8..d3a156294c 100644 --- a/crates/autopilot/src/infra/solvers/dto/mod.rs +++ b/crates/autopilot/src/infra/solvers/dto/mod.rs @@ -1,7 +1,6 @@ //! Types for communicating with drivers as defined in //! `crates/driver/openapi.yml`. -pub mod notify; pub mod reveal; pub mod settle; pub mod solve; diff --git a/crates/autopilot/src/infra/solvers/dto/notify.rs b/crates/autopilot/src/infra/solvers/dto/notify.rs deleted file mode 100644 index 7a63a0bc4a..0000000000 --- a/crates/autopilot/src/infra/solvers/dto/notify.rs +++ /dev/null @@ -1,34 +0,0 @@ -use { - chrono::{DateTime, Utc}, - serde::Serialize, - serde_with::serde_as, -}; - -#[serde_as] -#[derive(Clone, Debug, Serialize)] -#[serde(rename_all = "camelCase")] -pub enum Request { - Banned { - reason: BanReason, - until: DateTime, - }, -} - -#[serde_as] -#[derive(Clone, Copy, Debug, Serialize)] -#[serde(rename_all = "camelCase")] -pub enum BanReason { - /// The driver won multiple consecutive auctions but never settled them. - UnsettledConsecutiveAuctions, - /// Driver's settle failure rate is above the threshold. - HighSettleFailureRate, -} - -impl BanReason { - pub fn as_str(&self) -> &'static str { - match self { - BanReason::UnsettledConsecutiveAuctions => "non_settling", - BanReason::HighSettleFailureRate => "high_settle_failure_rate", - } - } -} diff --git a/crates/autopilot/src/infra/solvers/mod.rs b/crates/autopilot/src/infra/solvers/mod.rs index 430ed70de3..38334631b8 100644 --- a/crates/autopilot/src/infra/solvers/mod.rs +++ b/crates/autopilot/src/infra/solvers/mod.rs @@ -1,12 +1,11 @@ use { self::dto::{reveal, settle, solve}, - crate::{arguments::Account, domain::eth, infra::solvers::dto::notify, util}, + crate::{arguments::Account, domain::eth, util}, alloy::signers::{Signer, aws::AwsSigner}, anyhow::{Context, Result, anyhow}, - chrono::{DateTime, Utc}, observe::tracing::tracing_headers, reqwest::{Client, StatusCode}, - std::{sync::Arc, time::Duration}, + std::time::Duration, thiserror::Error, tracing::instrument, url::Url, @@ -25,7 +24,6 @@ pub struct Driver { // another driver solved with surplus exceeding this driver's surplus by `threshold` pub fairness_threshold: Option, pub submission_address: eth::Address, - pub requested_timeout_on_problems: bool, client: Client, } @@ -44,7 +42,6 @@ impl Driver { name: String, fairness_threshold: Option, submission_account: Account, - requested_timeout_on_problems: bool, ) -> Result { let submission_address = match submission_account { Account::Kms(key_id) => { @@ -78,7 +75,6 @@ impl Driver { .build() .map_err(Error::FailedToBuildClient)?, submission_address, - requested_timeout_on_problems, }) } @@ -123,10 +119,6 @@ impl Driver { Ok(()) } - pub async fn notify(&self, request: notify::Request) -> Result<()> { - self.request_response("notify", request).await - } - async fn request_response( &self, path: &str, @@ -189,18 +181,3 @@ pub async fn response_body_with_size_limit( } Ok(bytes) } - -/// Notifies the non-settling driver in a fire-and-forget manner. -pub fn notify_banned_solver( - non_settling_driver: Arc, - reason: notify::BanReason, - banned_until: DateTime, -) { - let request = notify::Request::Banned { - reason, - until: banned_until, - }; - tokio::spawn(async move { - let _ = non_settling_driver.notify(request).await; - }); -} diff --git a/crates/autopilot/src/run.rs b/crates/autopilot/src/run.rs index 38ba0bd965..9784938a5f 100644 --- a/crates/autopilot/src/run.rs +++ b/crates/autopilot/src/run.rs @@ -15,7 +15,7 @@ use { event_retriever::CoWSwapOnchainOrdersContract, }, }, - domain::{self, competition::SolverParticipationGuard}, + domain, event_updater::EventUpdater, infra, maintenance::Maintenance, @@ -430,9 +430,6 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { None }; - let (competition_updates_sender, competition_updates_receiver) = - tokio::sync::mpsc::unbounded_channel(); - let persistence = infra::persistence::Persistence::new(args.s3.into().unwrap(), Arc::new(db_write.clone())) .instrument(info_span!("persistence_init")) @@ -669,7 +666,6 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { driver.name.clone(), driver.fairness_threshold.map(Into::into), driver.submission_account, - driver.requested_timeout_on_problems, ) .await .map(Arc::new) @@ -683,20 +679,11 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { .into_iter() .collect(); - let solver_participation_guard = SolverParticipationGuard::new( - eth.clone(), - persistence.clone(), - competition_updates_receiver, - args.db_based_solver_participation_guard, - drivers.iter().cloned(), - ); - let run = RunLoop::new( run_loop_config, eth, persistence.clone(), drivers, - solver_participation_guard, solvable_orders_cache, trusted_tokens, run_loop::Probes { @@ -704,7 +691,6 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { startup, }, Arc::new(maintenance), - competition_updates_sender, ); run.run_forever(shutdown_controller).await; @@ -737,7 +723,6 @@ async fn shadow_mode(args: Arguments) -> ! { // this address for anything important so we // can simply generate random addresses here. Account::Address(Address::random()), - driver.requested_timeout_on_problems, ) .await .map(Arc::new) diff --git a/crates/autopilot/src/run_loop.rs b/crates/autopilot/src/run_loop.rs index 937f50000f..0b2ef60f33 100644 --- a/crates/autopilot/src/run_loop.rs +++ b/crates/autopilot/src/run_loop.rs @@ -8,7 +8,6 @@ use { self, Solution, SolutionError, - SolverParticipationGuard, Unscored, winner_selection::{self, Ranking}, }, @@ -78,14 +77,12 @@ pub struct RunLoop { eth: infra::Ethereum, persistence: infra::Persistence, drivers: Vec>, - solver_participation_guard: SolverParticipationGuard, solvable_orders_cache: Arc, trusted_tokens: AutoUpdatingTokenList, probes: Probes, /// Maintenance tasks that should run before every runloop to have /// the most recent data available. maintenance: Arc, - competition_updates_sender: tokio::sync::mpsc::UnboundedSender<()>, winner_selection: winner_selection::Arbitrator, /// Notifier that wakes the main loop on new blocks or orders wake_notify: Arc, @@ -98,12 +95,10 @@ impl RunLoop { eth: infra::Ethereum, persistence: infra::Persistence, drivers: Vec>, - solver_participation_guard: SolverParticipationGuard, solvable_orders_cache: Arc, trusted_tokens: AutoUpdatingTokenList, probes: Probes, maintenance: Arc, - competition_updates_sender: tokio::sync::mpsc::UnboundedSender<()>, ) -> Self { let max_winners = config.max_winners_per_auction.get(); let weth = eth.contracts().wrapped_native_token(); @@ -120,12 +115,10 @@ impl RunLoop { eth, persistence, drivers, - solver_participation_guard, solvable_orders_cache, trusted_tokens, probes, maintenance, - competition_updates_sender, winner_selection: winner_selection::Arbitrator::new(max_winners, weth), wake_notify, } @@ -538,26 +531,6 @@ impl RunLoop { competition_table, }; - let save_solutions = self - .persistence - .save_solutions(auction.id, ranking.all()) - .map(|res| match res { - Ok(_) => { - // Notify the solver participation guard that the proposed solutions have been - // saved. - if let Err(err) = self.competition_updates_sender.send(()) { - tracing::error!(?err, "failed to notify solver participation guard"); - } - Ok(()) - } - Err(err) => { - // Don't error if saving of auction and solution fails, until stable. - // Various edge cases with JIT orders verifiable only in production. - tracing::warn!(?err, "failed to save new competition data"); - Err(err.0.context("failed to save solutions")) - } - }); - tracing::trace!(?competition, "saving competition"); futures::try_join!( @@ -579,7 +552,6 @@ impl RunLoop { self.persistence .store_fee_policies(auction.id, fee_policies) .map_err(|e| e.context("failed to fee_policies")), - save_solutions ) .inspect_err(|err| tracing::warn!(?err, "failed to write post processed data to DB"))?; @@ -684,11 +656,14 @@ impl RunLoop { { let (can_participate, response) = { let driver = driver.clone(); - let guard = self.solver_participation_guard.clone(); + let eth = self.eth.clone(); let mut handle = tokio::task::spawn(async move { let fetch_response = driver.solve(request); - let check_allowed = guard.can_participate(&driver.submission_address); - tokio::join!(check_allowed, fetch_response) + let check_allowed = eth + .contracts() + .authenticator() + .isSolver(driver.submission_address); + tokio::join!(check_allowed.call(), fetch_response) }); tokio::time::timeout(self.config.solve_deadline, &mut handle) .await diff --git a/crates/database/src/solver_competition_v2.rs b/crates/database/src/solver_competition_v2.rs index 11aefbcdc9..046090b1e9 100644 --- a/crates/database/src/solver_competition_v2.rs +++ b/crates/database/src/solver_competition_v2.rs @@ -209,115 +209,6 @@ pub async fn fetch_auction_participants( sqlx::query_as(QUERY).bind(auction_id).fetch_all(ex).await } -/// Identifies solvers that have consistently failed to settle solutions in -/// recent N auctions. -/// -/// 1. Retrieves `last_auctions_count` most recent auctions already ended -/// auctions by filtering them by their deadlines. -/// 2. Identifies solvers who won these auctions but did not submit a successful -/// settlement. -/// 3. Counts how often each solver appears in these unsuccessful cases. -/// 4. Determines the total number of auctions considered. -/// 5. Flags solvers who failed to settle in all of these auctions. -/// 6. Returns a list of solvers that have consistently failed to settle -/// solutions. -#[instrument(skip_all)] -pub async fn find_non_settling_solvers( - ex: &mut PgConnection, - last_auctions_count: u32, - current_block: u64, -) -> Result, sqlx::Error> { - const QUERY: &str = r#" -WITH - last_auctions AS ( - SELECT ps.auction_id, ps.solver - FROM ( - SELECT DISTINCT ca.id AS auction_id - FROM competition_auctions ca - WHERE ca.deadline <= $1 - ORDER BY ca.id DESC - LIMIT $2 - ) latest_auctions - JOIN proposed_solutions ps ON ps.auction_id = latest_auctions.auction_id - WHERE ps.is_winner = true - ), - unsuccessful_solvers AS ( - SELECT la.auction_id, la.solver - FROM last_auctions la - LEFT JOIN settlements s - ON la.auction_id = s.auction_id AND la.solver = s.solver - WHERE s.auction_id IS NULL - ), - solver_appearance_count AS ( - SELECT solver, COUNT(DISTINCT auction_id) AS appearance_count - FROM unsuccessful_solvers - GROUP BY solver - ), - auction_count AS ( - SELECT COUNT(DISTINCT auction_id) AS total_auctions - FROM last_auctions - ), - consistent_solvers AS ( - SELECT sa.solver - FROM solver_appearance_count sa, auction_count ac - WHERE sa.appearance_count = ac.total_auctions - ) -SELECT DISTINCT solver -FROM consistent_solvers; - "#; - - sqlx::query_scalar(QUERY) - .bind(sqlx::types::BigDecimal::from(current_block)) - .bind(i64::from(last_auctions_count)) - .fetch_all(ex) - .await -} - -#[instrument(skip_all)] -pub async fn find_low_settling_solvers( - ex: &mut PgConnection, - last_auctions_count: u32, - current_block: u64, - max_failure_rate: f64, - min_wins_threshold: u32, -) -> Result, sqlx::Error> { - const QUERY: &str = r#" -WITH - last_auctions AS ( - SELECT ps.auction_id, ps.solver - FROM ( - SELECT DISTINCT ca.id AS auction_id - FROM competition_auctions ca - WHERE ca.deadline <= $1 - ORDER BY ca.id DESC - LIMIT $2 - ) latest_auctions - JOIN proposed_solutions ps ON ps.auction_id = latest_auctions.auction_id - WHERE ps.is_winner = true - ), - solver_settlement_counts AS ( - SELECT la.solver, - COUNT(DISTINCT la.auction_id) AS total_wins, - COUNT(DISTINCT s.auction_id) AS total_settlements - FROM last_auctions la - LEFT JOIN settlements s - ON la.auction_id = s.auction_id AND la.solver = s.solver - GROUP BY la.solver - ) -SELECT solver -FROM solver_settlement_counts -WHERE total_wins >= $3 AND (1 - (total_settlements::decimal / NULLIF(total_wins, 0))) > $4; - "#; - - sqlx::query_scalar(QUERY) - .bind(sqlx::types::BigDecimal::from(current_block)) - .bind(i64::from(last_auctions_count)) - .bind(i64::from(min_wins_threshold)) - .bind(max_failure_rate) - .fetch_all(ex) - .await -} - #[derive(Clone, Debug, PartialEq, Default)] pub struct Solution { // Unique Id generated by the autopilot to uniquely identify the solution within Auction @@ -702,351 +593,6 @@ mod tests { assert!(fetched_solutions[2].orders.len() == 3); } - #[tokio::test] - #[ignore] - async fn postgres_non_settling_solvers_roundtrip() { - let mut db = PgConnection::connect("postgresql://").await.unwrap(); - let mut db = db.begin().await.unwrap(); - crate::clear_DANGER_(&mut db).await.unwrap(); - - let non_settling_solver = ByteArray([1u8; 20]); - - let mut solution_uid = 0; - let deadline_block = 100u64; - let last_auctions_count = 3i64; - // competition_auctions - // Insert auctions within the deadline - for auction_id in 1..=4 { - let auction = auction::Auction { - id: auction_id, - block: auction_id, - deadline: i64::try_from(deadline_block).unwrap(), - order_uids: Default::default(), - price_tokens: Default::default(), - price_values: Default::default(), - surplus_capturing_jit_order_owners: Default::default(), - }; - auction::save(&mut db, auction).await.unwrap(); - } - - // Insert auctions outside the deadline - for auction_id in 5..=6 { - let auction = auction::Auction { - id: auction_id, - block: auction_id, - deadline: i64::try_from(deadline_block).unwrap() + auction_id, - order_uids: Default::default(), - price_tokens: Default::default(), - price_values: Default::default(), - surplus_capturing_jit_order_owners: Default::default(), - }; - auction::save(&mut db, auction).await.unwrap(); - } - - // proposed_solutions - // Non-settling solver wins `last_auctions_count` auctions within the deadline - for auction_id in 2..=4 { - solution_uid += 1; - let solutions = vec![Solution { - uid: auction_id, - id: solution_uid.into(), - solver: non_settling_solver, - is_winner: true, - filtered_out: false, - score: Default::default(), - orders: Default::default(), - price_tokens: Default::default(), - price_values: Default::default(), - }]; - save_solutions(&mut db, auction_id, &solutions) - .await - .unwrap(); - } - - // Another non-settling solver wins not all the auctions within the deadline - for auction_id in 2..=4 { - solution_uid += 1; - let solutions = vec![Solution { - uid: auction_id, - id: solution_uid.into(), - solver: ByteArray([2u8; 20]), - is_winner: auction_id != 2, - filtered_out: false, - score: Default::default(), - orders: Default::default(), - price_tokens: Default::default(), - price_values: Default::default(), - }]; - save_solutions(&mut db, auction_id, &solutions) - .await - .unwrap(); - } - - // One more non-settling solver has `last_auctions_count` winning auctions but - // not consecutive - for auction_id in 1..=4 { - // Break the sequence - if auction_id == 2 { - continue; - } - solution_uid += 1; - let solutions = vec![Solution { - uid: auction_id, - id: solution_uid.into(), - solver: ByteArray([3u8; 20]), - is_winner: true, - filtered_out: false, - score: Default::default(), - orders: Default::default(), - price_tokens: Default::default(), - price_values: Default::default(), - }]; - save_solutions(&mut db, auction_id, &solutions) - .await - .unwrap(); - } - - // One more non-settling solver has `last_auctions_count` winning auctions but - // some of them are outside the deadline - for auction_id in 3..=5 { - solution_uid += 1; - let solutions = vec![Solution { - uid: auction_id, - id: solution_uid.into(), - solver: ByteArray([4u8; 20]), - is_winner: true, - filtered_out: false, - score: Default::default(), - orders: Default::default(), - price_tokens: Default::default(), - price_values: Default::default(), - }]; - save_solutions(&mut db, auction_id, &solutions) - .await - .unwrap(); - } - - // Verify only the non-settling solver is returned - let result = find_non_settling_solvers( - &mut db, - u32::try_from(last_auctions_count).unwrap(), - deadline_block, - ) - .await - .unwrap(); - assert_eq!(result, vec![non_settling_solver]); - - // Non-settling solver settles one of the auctions - let event = EventIndex { - block_number: 4, - log_index: 0, - }; - let settlement = Settlement { - solver: non_settling_solver, - transaction_hash: ByteArray([0u8; 32]), - }; - events::insert_settlement(&mut db, &event, &settlement) - .await - .unwrap(); - - // The same result until the auction_id is updated in the settlements table - let result = find_non_settling_solvers( - &mut db, - u32::try_from(last_auctions_count).unwrap(), - deadline_block, - ) - .await - .unwrap(); - assert_eq!(result, vec![non_settling_solver]); - - settlements::update_settlement_auction(&mut db, 4, 0, 4) - .await - .unwrap(); - - let result = find_non_settling_solvers( - &mut db, - u32::try_from(last_auctions_count).unwrap(), - deadline_block, - ) - .await - .unwrap(); - assert!(result.is_empty()); - } - - #[tokio::test] - #[ignore] - async fn postgres_low_settling_solvers_roundtrip() { - let mut db = PgConnection::connect("postgresql://").await.unwrap(); - let mut db = db.begin().await.unwrap(); - crate::clear_DANGER_(&mut db).await.unwrap(); - - let deadline_block = 2u64; - let last_auctions_count = 100i64; - let max_failure_ratio = 0.6; - let min_wins_threshold = 2; - let mut solution_uid = 0; - - for auction_id in 1..=10 { - let auction = auction::Auction { - id: auction_id, - block: auction_id, - deadline: i64::try_from(deadline_block).unwrap(), - order_uids: Default::default(), - price_tokens: Default::default(), - price_values: Default::default(), - surplus_capturing_jit_order_owners: Default::default(), - }; - auction::save(&mut db, auction).await.unwrap(); - } - - // Settles only 20% of won auctions - let low_settling_solver = ByteArray([1u8; 20]); - for auction_id in 1..=5 { - solution_uid += 1; - let solutions = vec![Solution { - uid: solution_uid, - id: auction_id.into(), - solver: low_settling_solver, - is_winner: true, - filtered_out: false, - score: Default::default(), - orders: Default::default(), - price_tokens: Default::default(), - price_values: Default::default(), - }]; - save_solutions(&mut db, auction_id, &solutions) - .await - .unwrap(); - } - let event = EventIndex { - block_number: 1, - log_index: 0, - }; - let settlement = Settlement { - solver: low_settling_solver, - transaction_hash: ByteArray([0u8; 32]), - }; - events::insert_settlement(&mut db, &event, &settlement) - .await - .unwrap(); - settlements::update_settlement_auction(&mut db, 1, 0, 1) - .await - .unwrap(); - - // Settles 0% of won auctions - let non_settling_solver = ByteArray([2u8; 20]); - for auction_id in 1..=5 { - solution_uid += 1; - let solutions = vec![Solution { - uid: solution_uid, - id: auction_id.into(), - solver: non_settling_solver, - is_winner: true, - filtered_out: false, - score: Default::default(), - orders: Default::default(), - price_tokens: Default::default(), - price_values: Default::default(), - }]; - save_solutions(&mut db, auction_id, &solutions) - .await - .unwrap(); - } - - // Settled 40% of won auctions - let settling_solver = ByteArray([3u8; 20]); - for auction_id in 1..=5 { - solution_uid += 1; - let solutions = vec![Solution { - uid: solution_uid, - id: auction_id.into(), - solver: settling_solver, - is_winner: true, - filtered_out: false, - score: Default::default(), - orders: Default::default(), - price_tokens: Default::default(), - price_values: Default::default(), - }]; - save_solutions(&mut db, auction_id, &solutions) - .await - .unwrap(); - } - for auction_id in 2..=3 { - let event = EventIndex { - block_number: auction_id, - log_index: 0, - }; - let settlement = Settlement { - solver: settling_solver, - transaction_hash: ByteArray([u8::try_from(auction_id).unwrap(); 32]), - }; - events::insert_settlement(&mut db, &event, &settlement) - .await - .unwrap(); - settlements::update_settlement_auction(&mut db, auction_id, 0, auction_id) - .await - .unwrap(); - } - - let result = find_low_settling_solvers( - &mut db, - u32::try_from(last_auctions_count).unwrap(), - deadline_block, - max_failure_ratio, - min_wins_threshold, - ) - .await - .unwrap(); - - assert_eq!(result.len(), 2); - assert!(result.contains(&low_settling_solver)); - assert!(result.contains(&non_settling_solver)); - - // Both won only 5 auctions. With threshold 6, no solver should be returned. - assert!( - find_low_settling_solvers( - &mut db, - u32::try_from(last_auctions_count).unwrap(), - deadline_block, - max_failure_ratio, - 6, - ) - .await - .unwrap() - .is_empty() - ); - - // Low settling solver settles another auction - let event = EventIndex { - block_number: 2, - log_index: 1, - }; - let settlement = Settlement { - solver: low_settling_solver, - transaction_hash: ByteArray([2u8; 32]), - }; - events::insert_settlement(&mut db, &event, &settlement) - .await - .unwrap(); - settlements::update_settlement_auction(&mut db, 2, 1, 2) - .await - .unwrap(); - - let result = find_low_settling_solvers( - &mut db, - u32::try_from(last_auctions_count).unwrap(), - deadline_block, - max_failure_ratio, - min_wins_threshold, - ) - .await - .unwrap(); - - // Now, it is not a low-settling solver anymore - assert_eq!(result, vec![non_settling_solver]); - } - #[tokio::test] #[ignore] async fn postgres_load_by_tx_hash() { diff --git a/crates/driver/src/infra/api/mod.rs b/crates/driver/src/infra/api/mod.rs index afd3f27578..38d106a258 100644 --- a/crates/driver/src/infra/api/mod.rs +++ b/crates/driver/src/infra/api/mod.rs @@ -96,7 +96,6 @@ impl Api { let router = routes::solve(router); let router = routes::reveal(router); let router = routes::settle(router); - let router = routes::notify(router); let bad_order_config = solver.bad_order_detection(); let mut bad_tokens = diff --git a/crates/driver/src/infra/api/routes/mod.rs b/crates/driver/src/infra/api/routes/mod.rs index dcd4afffc8..9ea3bdf93f 100644 --- a/crates/driver/src/infra/api/routes/mod.rs +++ b/crates/driver/src/infra/api/routes/mod.rs @@ -2,7 +2,6 @@ mod gasprice; mod healthz; mod info; mod metrics; -mod notify; mod quote; mod reveal; mod settle; @@ -13,7 +12,6 @@ pub(super) use { healthz::healthz, info::info, metrics::metrics, - notify::notify, quote::{OrderError, quote}, reveal::reveal, settle::settle, diff --git a/crates/driver/src/infra/api/routes/notify/dto/mod.rs b/crates/driver/src/infra/api/routes/notify/dto/mod.rs deleted file mode 100644 index 9a24eedbc1..0000000000 --- a/crates/driver/src/infra/api/routes/notify/dto/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -mod notify_request; - -pub use notify_request::NotifyRequest; diff --git a/crates/driver/src/infra/api/routes/notify/dto/notify_request.rs b/crates/driver/src/infra/api/routes/notify/dto/notify_request.rs deleted file mode 100644 index c812453e32..0000000000 --- a/crates/driver/src/infra/api/routes/notify/dto/notify_request.rs +++ /dev/null @@ -1,42 +0,0 @@ -use { - crate::infra::notify, - chrono::{DateTime, Utc}, - serde::Deserialize, - serde_with::serde_as, -}; - -#[serde_as] -#[derive(Debug, Deserialize)] -#[serde(rename_all = "camelCase")] -pub enum NotifyRequest { - Banned { - reason: BanReason, - until: DateTime, - }, -} - -#[serde_as] -#[derive(Debug, Deserialize)] -#[serde(rename_all = "camelCase")] -pub enum BanReason { - /// The driver won multiple consecutive auctions but never settled them. - UnsettledConsecutiveAuctions, - /// Driver's settle failure rate is above the threshold. - HighSettleFailureRate, -} - -impl From for notify::Kind { - fn from(value: NotifyRequest) -> Self { - match value { - NotifyRequest::Banned { reason, until } => notify::Kind::Banned { - reason: match reason { - BanReason::UnsettledConsecutiveAuctions => { - notify::BanReason::UnsettledConsecutiveAuctions - } - BanReason::HighSettleFailureRate => notify::BanReason::HighSettleFailureRate, - }, - until, - }, - } - } -} diff --git a/crates/driver/src/infra/api/routes/notify/mod.rs b/crates/driver/src/infra/api/routes/notify/mod.rs deleted file mode 100644 index 3e952a6c6c..0000000000 --- a/crates/driver/src/infra/api/routes/notify/mod.rs +++ /dev/null @@ -1,17 +0,0 @@ -mod dto; - -use crate::infra::api::{Error, State}; - -pub(in crate::infra::api) fn notify(router: axum::Router) -> axum::Router { - router.route("/notify", axum::routing::post(route)) -} - -async fn route( - state: axum::extract::State, - req: axum::Json, -) -> Result)> { - let solver = &state.solver().name().0; - tracing::debug!(?req, ?solver, "received a notification"); - state.solver().notify(None, None, req.0.into()); - Ok(hyper::StatusCode::OK) -} diff --git a/crates/driver/src/infra/notify/mod.rs b/crates/driver/src/infra/notify/mod.rs index 14187231b8..321335d8c8 100644 --- a/crates/driver/src/infra/notify/mod.rs +++ b/crates/driver/src/infra/notify/mod.rs @@ -6,14 +6,7 @@ use { pub mod liquidity_sources; mod notification; -pub use notification::{ - BanReason, - Kind, - Notification, - ScoreKind, - Settlement, - SimulationSucceededAtLeastOnce, -}; +pub use notification::{Kind, Notification, ScoreKind, Settlement, SimulationSucceededAtLeastOnce}; use { super::simulator, crate::domain::{eth, mempools::Error}, diff --git a/crates/driver/src/infra/notify/notification.rs b/crates/driver/src/infra/notify/notification.rs index 265bcab5b2..c8d4e48f21 100644 --- a/crates/driver/src/infra/notify/notification.rs +++ b/crates/driver/src/infra/notify/notification.rs @@ -3,7 +3,6 @@ use { competition::{auction, solution}, eth::{self, Ether, TokenAddress}, }, - chrono::{DateTime, Utc}, std::collections::BTreeSet, }; @@ -46,11 +45,6 @@ pub enum Kind { DriverError(String), /// On-chain solution postprocessing timed out. PostprocessingTimedOut, - /// The solver has been banned for a specific reason. - Banned { - reason: BanReason, - until: DateTime, - }, /// The solver sent an invalid request format DeserializationError(String), } @@ -66,14 +60,6 @@ pub enum ScoreKind { MissingPrice(TokenAddress), } -#[derive(Debug)] -pub enum BanReason { - /// The driver won multiple consecutive auctions but never settled them. - UnsettledConsecutiveAuctions, - /// Driver's settle failure rate is above the threshold. - HighSettleFailureRate, -} - #[derive(Debug)] pub enum Settlement { /// Winning solver settled successfully transaction onchain. diff --git a/crates/driver/src/infra/solver/dto/notification.rs b/crates/driver/src/infra/solver/dto/notification.rs index 40f1be47a4..e06a344024 100644 --- a/crates/driver/src/infra/solver/dto/notification.rs +++ b/crates/driver/src/infra/solver/dto/notification.rs @@ -58,17 +58,6 @@ pub fn new( notify::Kind::PostprocessingTimedOut => { solvers_dto::notification::Kind::PostprocessingTimedOut } - notify::Kind::Banned { reason, until } => solvers_dto::notification::Kind::Banned { - reason: match reason { - notify::BanReason::UnsettledConsecutiveAuctions => { - solvers_dto::notification::BanReason::UnsettledConsecutiveAuctions - } - notify::BanReason::HighSettleFailureRate => { - solvers_dto::notification::BanReason::HighSettleFailureRate - } - }, - until, - }, notify::Kind::DeserializationError(reason) => { solvers_dto::notification::Kind::DeserializationError { reason } } diff --git a/crates/e2e/src/setup/services.rs b/crates/e2e/src/setup/services.rs index a56ff2120a..023e36fe40 100644 --- a/crates/e2e/src/setup/services.rs +++ b/crates/e2e/src/setup/services.rs @@ -204,8 +204,6 @@ impl<'a> Services<'a> { let args = [ "autopilot".to_string(), - "--non-settling-solvers-blacklisting-enabled=false".to_string(), - "--low-settling-solvers-blacklisting-enabled=false".to_string(), "--max-run-loop-delay=100ms".to_string(), "--run-loop-native-price-timeout=500ms".to_string(), format!("--ethflow-contracts={ethflow_contracts}"), diff --git a/crates/e2e/tests/e2e/main.rs b/crates/e2e/tests/e2e/main.rs index d2b5f840a4..738e30317b 100644 --- a/crates/e2e/tests/e2e/main.rs +++ b/crates/e2e/tests/e2e/main.rs @@ -35,7 +35,6 @@ mod refunder; mod replace_order; mod smart_contract_orders; mod solver_competition; -mod solver_participation_guard; mod submission; mod token_metadata; mod tracking_insufficient_funds; diff --git a/crates/e2e/tests/e2e/solver_participation_guard.rs b/crates/e2e/tests/e2e/solver_participation_guard.rs deleted file mode 100644 index b5a16e2ec9..0000000000 --- a/crates/e2e/tests/e2e/solver_participation_guard.rs +++ /dev/null @@ -1,359 +0,0 @@ -use { - alloy::primitives::{Address, U256}, - e2e::setup::{ - Db, - ExtraServiceArgs, - MintableToken, - OnchainComponents, - Services, - TIMEOUT, - TestAccount, - run_test, - wait_for_condition, - }, - ethrpc::{Web3, alloy::CallBuilderExt}, - model::{ - order::{OrderClass, OrderCreation, OrderKind}, - signature::EcdsaSigningScheme, - }, - number::units::EthUnit, - sqlx::Row, - std::time::Instant, -}; - -#[tokio::test] -#[ignore] -async fn local_node_non_settling_solver() { - run_test(non_settling_solver).await; -} - -#[tokio::test] -#[ignore] -async fn local_node_low_settling_solver() { - run_test(low_settling_solver).await; -} - -#[tokio::test] -#[ignore] -async fn local_node_not_allowed_solver() { - run_test(not_allowed_solver).await; -} - -async fn non_settling_solver(web3: Web3) { - let mut onchain = OnchainComponents::deploy(web3.clone()).await; - - let [solver, solver_b] = onchain.make_solvers(1u64.eth()).await; - let (trader_a, token_a, token_b) = setup(&mut onchain, &solver).await; - - let services = Services::new(&onchain).await; - let args = ExtraServiceArgs { - autopilot: vec![ - "--non-settling-solvers-blacklisting-enabled=true".to_string(), - "--low-settling-solvers-blacklisting-enabled=true".to_string(), - // The solver gets banned for 40s. - "--solver-blacklist-cache-ttl=40s".to_string(), - ], - ..Default::default() - }; - services.start_protocol_with_args(args, solver).await; - - // Amount of order should be more or equal the non-settling threshold, which is - // 3. - for _ in 0..4 { - execute_order(&onchain, &trader_a, &token_a, &token_b, &services) - .await - .unwrap(); - } - - let pool = services.db(); - let settled_auction_ids = fetch_last_settled_auction_ids(pool).await; - assert_eq!(settled_auction_ids.len(), 4); - // Build 5 blocks to make sure the submission deadline is passed, which is 5 by - // default. - for _ in 0..5 { - onchain.mint_block().await; - } - - // Simulate failed settlements by replacing the solver for the last 3 - // settlements. - let last_auctions = settled_auction_ids - .iter() - .take(3) - .cloned() - .collect::>(); - replace_solver_for_auction_ids(pool, &last_auctions, &solver_b.address()).await; - // The competition still passes since the stats are updated only after a new - // solution from anyone is received and stored. - let now = Instant::now(); - assert!( - execute_order(&onchain, &trader_a, &token_a, &token_b, &services) - .await - .is_ok() - ); - // Now, the stat is updated, and the solver is banned. - assert!( - execute_order(&onchain, &trader_a, &token_a, &token_b, &services) - .await - .is_err() - ); - - // 40 seconds is the cache TTL, and 5 seconds is added to compensate any - // possible delays. - let sleep_timeout_secs = 40 - now.elapsed().as_secs() + 5; - println!( - "Sleeping for {sleep_timeout_secs} seconds to reset the solver participation guard cache" - ); - tokio::time::sleep(tokio::time::Duration::from_secs(sleep_timeout_secs)).await; - // The cache is reset, and the solver is allowed to participate again. - execute_order(&onchain, &trader_a, &token_a, &token_b, &services) - .await - .unwrap(); -} - -async fn low_settling_solver(web3: Web3) { - let mut onchain = OnchainComponents::deploy(web3.clone()).await; - - let [solver, solver_b] = onchain.make_solvers(1u64.eth()).await; - let (trader_a, token_a, token_b) = setup(&mut onchain, &solver).await; - - let services = Services::new(&onchain).await; - let args = ExtraServiceArgs { - autopilot: vec![ - "--non-settling-solvers-blacklisting-enabled=true".to_string(), - "--low-settling-solvers-blacklisting-enabled=true".to_string(), - // The solver gets banned for 40s. - "--solver-blacklist-cache-ttl=40s".to_string(), - // The solver is banned if the failure settlement rate is above 55%. - "--solver-max-settlement-failure-rate=0.55".to_string(), - ], - ..Default::default() - }; - services.start_protocol_with_args(args, solver).await; - - // Create 5 orders, to easily test 60% of them failing, which is 3/5. - for _ in 0..5 { - execute_order(&onchain, &trader_a, &token_a, &token_b, &services) - .await - .unwrap(); - } - - let pool = services.db(); - let settled_auction_ids = fetch_last_settled_auction_ids(pool).await; - assert_eq!(settled_auction_ids.len(), 5); - // Build 5 blocks to make sure the submission deadline is passed, which is 5 by - // default. - for _ in 0..5 { - onchain.mint_block().await; - } - - // Simulate low settling rate by replacing the solver for the 60% of the - // settlements. - let random_auctions = settled_auction_ids - .iter() - .enumerate() - .filter_map(|(i, id)| (i % 2 == 0).then_some(*id)) - .collect::>(); - replace_solver_for_auction_ids(pool, &random_auctions, &solver_b.address()).await; - // The competition still passes since the stats are updated only after a new - // solution from anyone is received and stored. - let now = Instant::now(); - execute_order(&onchain, &trader_a, &token_a, &token_b, &services) - .await - .unwrap(); - // Now, the stat is updated, and the solver is banned. - execute_order(&onchain, &trader_a, &token_a, &token_b, &services) - .await - .unwrap_err(); - - // 40 seconds is the cache TTL, and 5 seconds is added to compensate any - // possible delays. - let sleep_timeout_secs = 40 - now.elapsed().as_secs() + 5; - println!( - "Sleeping for {sleep_timeout_secs} seconds to reset the solver participation guard cache" - ); - tokio::time::sleep(tokio::time::Duration::from_secs(sleep_timeout_secs)).await; - // The cache is reset, and the solver is allowed to participate again. - execute_order(&onchain, &trader_a, &token_a, &token_b, &services) - .await - .unwrap(); -} - -async fn not_allowed_solver(web3: Web3) { - let mut onchain = OnchainComponents::deploy(web3.clone()).await; - - let [solver] = onchain.make_solvers(1u64.eth()).await; - let (trader_a, token_a, token_b) = setup(&mut onchain, &solver).await; - - let solver_address = solver.address(); - let services = Services::new(&onchain).await; - services.start_protocol(solver).await; - - execute_order(&onchain, &trader_a, &token_a, &token_b, &services) - .await - .unwrap(); - - // Ban the solver - onchain - .contracts() - .gp_authenticator - .removeSolver(solver_address) - .send_and_watch() - .await - .unwrap(); - - assert!( - execute_order(&onchain, &trader_a, &token_a, &token_b, &services) - .await - .is_err() - ); - - // Unban the solver - onchain - .contracts() - .gp_authenticator - .addSolver(solver_address) - .send_and_watch() - .await - .unwrap(); - - execute_order(&onchain, &trader_a, &token_a, &token_b, &services) - .await - .unwrap(); -} - -async fn setup( - onchain: &mut OnchainComponents, - solver: &TestAccount, -) -> (TestAccount, MintableToken, MintableToken) { - let [trader_a] = onchain.make_accounts(1u64.eth()).await; - let [token_a, token_b] = onchain - .deploy_tokens_with_weth_uni_v2_pools(1_000u64.eth(), 1_000u64.eth()) - .await; - - // Fund trader accounts - token_a.mint(trader_a.address(), 1000u64.eth()).await; - - // Create and fund Uniswap pool - token_a.mint(solver.address(), 1000u64.eth()).await; - token_b.mint(solver.address(), 1000u64.eth()).await; - onchain - .contracts() - .uniswap_v2_factory - .createPair(*token_a.address(), *token_b.address()) - .from(solver.address()) - .send_and_watch() - .await - .unwrap(); - - token_a - .approve( - *onchain.contracts().uniswap_v2_router.address(), - 1000u64.eth(), - ) - .from(solver.address()) - .send_and_watch() - .await - .unwrap(); - - token_b - .approve( - *onchain.contracts().uniswap_v2_router.address(), - 1000u64.eth(), - ) - .from(solver.address()) - .send_and_watch() - .await - .unwrap(); - onchain - .contracts() - .uniswap_v2_router - .addLiquidity( - *token_a.address(), - *token_b.address(), - 1000u64.eth(), - 1000u64.eth(), - U256::ZERO, - U256::ZERO, - solver.address(), - U256::MAX, - ) - .from(solver.address()) - .send_and_watch() - .await - .unwrap(); - - // Approve GPv2 for trading - - token_a - .approve(onchain.contracts().allowance, 1000u64.eth()) - .from(trader_a.address()) - .send_and_watch() - .await - .unwrap(); - - (trader_a, token_a, token_b) -} - -async fn replace_solver_for_auction_ids(pool: &Db, auction_ids: &[i64], solver: &Address) { - for auction_id in auction_ids { - sqlx::query("UPDATE settlements SET solver = $1 WHERE auction_id = $2") - .bind(solver.as_slice()) - .bind(auction_id) - .execute(pool) - .await - .unwrap(); - } -} - -async fn fetch_last_settled_auction_ids(pool: &Db) -> Vec { - sqlx::query("SELECT auction_id FROM settlements ORDER BY auction_id DESC") - .fetch_all(pool) - .await - .unwrap() - .into_iter() - .filter_map(|row| { - let auction_id: Option = row.try_get(0).unwrap(); - auction_id - }) - .collect() -} - -async fn execute_order( - onchain: &OnchainComponents, - trader_a: &TestAccount, - token_a: &MintableToken, - token_b: &MintableToken, - services: &Services<'_>, -) -> anyhow::Result<()> { - let order = OrderCreation { - sell_token: *token_a.address(), - sell_amount: 10u64.eth(), - buy_token: *token_b.address(), - buy_amount: 5u64.eth(), - valid_to: model::time::now_in_epoch_seconds() + 300, - kind: OrderKind::Sell, - ..Default::default() - } - .sign( - EcdsaSigningScheme::Eip712, - &onchain.contracts().domain_separator, - &trader_a.signer, - ); - let balance_before = token_b.balanceOf(trader_a.address()).call().await.unwrap(); - let order_id = services.create_order(&order).await.unwrap(); - onchain.mint_block().await; - let limit_order = services.get_order(&order_id).await.unwrap(); - assert_eq!(limit_order.metadata.class, OrderClass::Limit); - let auction_ids_before = fetch_last_settled_auction_ids(services.db()).await.len(); - - // Drive solution - tracing::info!("Waiting for trade."); - wait_for_condition(TIMEOUT, || async { - let balance_after = token_b.balanceOf(trader_a.address()).call().await.unwrap(); - let balance_changes = balance_after.checked_sub(balance_before).unwrap() >= 5u64.eth(); - let auction_ids_after = - fetch_last_settled_auction_ids(services.db()).await.len() > auction_ids_before; - balance_changes && auction_ids_after - }) - .await -} diff --git a/crates/solvers-dto/src/notification.rs b/crates/solvers-dto/src/notification.rs index b63e2a8fa4..7abdb0b53f 100644 --- a/crates/solvers-dto/src/notification.rs +++ b/crates/solvers-dto/src/notification.rs @@ -4,7 +4,6 @@ use { primitives::{Address, B256, U256}, rpc::types::AccessList, }, - chrono::{DateTime, Utc}, number::serialization::HexOrDecimalU256, serde::{Deserialize, Serialize}, serde_with::{DisplayFromStr, serde_as}, @@ -69,10 +68,6 @@ pub enum Kind { Expired, Fail, PostprocessingTimedOut, - Banned { - reason: BanReason, - until: DateTime, - }, DeserializationError { reason: String, }, @@ -92,10 +87,3 @@ pub struct Tx { pub value: U256, pub access_list: AccessList, } - -#[derive(Debug, Serialize, Deserialize)] -#[serde(rename_all = "camelCase", tag = "reason")] -pub enum BanReason { - UnsettledConsecutiveAuctions, - HighSettleFailureRate, -} From 64cd00bab43619b96c3a02b561e28f6c42186c04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Duarte?= Date: Fri, 30 Jan 2026 13:04:10 +0000 Subject: [PATCH 021/219] [TRIVIAL] Fix playground configs (#4108) # Description Estimators were expecting different strings and the tx gas was missing from the driver # Changes - [ ] Remove Native from gas estimators - [ ] Add "Driver" to the native price estimators - [ ] Add tx-gas-limit to the driver config ## How to test Run docker compose and check if autopilot, orderbook and driver are up Co-authored-by: Claude --- playground/docker-compose.fork.yml | 4 ++-- playground/docker-compose.non-interactive.yml | 6 +++--- playground/driver.toml | 2 ++ 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/playground/docker-compose.fork.yml b/playground/docker-compose.fork.yml index d697c061ba..b0c89b63f0 100644 --- a/playground/docker-compose.fork.yml +++ b/playground/docker-compose.fork.yml @@ -80,7 +80,7 @@ services: - ENABLE_EIP1271_ORDERS=true - PRICE_ESTIMATORS=None - PRICE_ESTIMATION_DRIVERS=baseline|http://driver/baseline - - NATIVE_PRICE_ESTIMATORS=baseline|http://driver/baseline + - NATIVE_PRICE_ESTIMATORS=Driver|baseline|http://driver/baseline - DRIVERS=baseline|http://driver/baseline - BIND_ADDRESS=0.0.0.0:80 - CHAIN_ID=$CHAIN @@ -114,7 +114,7 @@ services: - NODE_URL=http://chain:8545 - SIMULATION_NODE_URL=http://chain:8545 - SETTLE_INTERVAL=15s - - GAS_ESTIMATORS=Native,Web3 + - GAS_ESTIMATORS=Web3 - PRICE_ESTIMATORS=None - BLOCK_STREAM_POLL_INTERVAL=1s - NATIVE_PRICE_CACHE_MAX_UPDATE_SIZE=100 diff --git a/playground/docker-compose.non-interactive.yml b/playground/docker-compose.non-interactive.yml index f56f605072..c9d5fca62f 100644 --- a/playground/docker-compose.non-interactive.yml +++ b/playground/docker-compose.non-interactive.yml @@ -82,7 +82,7 @@ services: - ENABLE_EIP1271_ORDERS=true - PRICE_ESTIMATORS=None - PRICE_ESTIMATION_DRIVERS=baseline|http://driver/baseline - - NATIVE_PRICE_ESTIMATORS=baseline|http://driver/baseline + - NATIVE_PRICE_ESTIMATORS=Driver|baseline|http://driver/baseline - DRIVERS=baseline|http://driver/baseline - BIND_ADDRESS=0.0.0.0:80 - CHAIN_ID=$CHAIN @@ -115,7 +115,7 @@ services: - NODE_URL=http://chain:8545 - SIMULATION_NODE_URL=http://chain:8545 - SETTLE_INTERVAL=15s - - GAS_ESTIMATORS=Native,Web3 + - GAS_ESTIMATORS=Web3 - PRICE_ESTIMATORS=None - NATIVE_PRICE_ESTIMATORS=baseline - BLOCK_STREAM_POLL_INTERVAL=1s @@ -123,7 +123,7 @@ services: - NATIVE_PRICE_CACHE_MAX_AGE=20m - SOLVER_TIME_LIMIT=5 - PRICE_ESTIMATION_DRIVERS=baseline|http://driver/baseline - - NATIVE_PRICE_ESTIMATORS=baseline|http://driver/baseline + - NATIVE_PRICE_ESTIMATORS=Driver|baseline|http://driver/baseline - DRIVERS=baseline|http://driver/baseline|0xa0Ee7A142d267C1f36714E4a8F75612F20a79720 - SKIP_EVENT_SYNC=true - BASELINE_SOURCES=None diff --git a/playground/driver.toml b/playground/driver.toml index a5933cde5f..9099b173ae 100644 --- a/playground/driver.toml +++ b/playground/driver.toml @@ -1,3 +1,5 @@ +tx-gas-limit = "45000000" + [[solver]] name = "baseline" # Arbitrary name given to this solver, must be unique endpoint = "http://baseline" From 8dd0fddba728888b79d4045bc708698d4d43880a Mon Sep 17 00:00:00 2001 From: MarcusWentz <52706599+MarcusWentz@users.noreply.github.com> Date: Fri, 30 Jan 2026 10:30:34 -0500 Subject: [PATCH 022/219] Update playground frontend Dockerfile (#4103) # Description Fixes Docker pnpm version error. # Changes Enable and prepare corepack version before running pnpm install. # Fixes https://github.com/cowprotocol/services/issues/4101 --- playground/Dockerfile.cowswap | 68 ++++++++++++++++++---------------- playground/Dockerfile.explorer | 22 ++++++----- 2 files changed, 50 insertions(+), 40 deletions(-) diff --git a/playground/Dockerfile.cowswap b/playground/Dockerfile.cowswap index 7ba495ced9..8318bd5760 100644 --- a/playground/Dockerfile.cowswap +++ b/playground/Dockerfile.cowswap @@ -2,6 +2,10 @@ FROM docker.io/node:22-bookworm-slim AS node-build WORKDIR /usr/src/app +# Enable Corepack and prepare pnpm +RUN corepack enable \ + && corepack prepare pnpm@10.12.1 --activate + # RPC URL args ARG REACT_APP_NETWORK_URL_1=https://rpc.mevblocker.io ARG REACT_APP_NETWORK_URL_5=https://ethereum-goerli.publicnode.com @@ -10,54 +14,56 @@ ARG REACT_APP_NETWORK_URL_100=https://gnosis.publicnode.com # Orderbook URL args ARG REACT_APP_ORDER_BOOK_URLS='{"1":"https://api.cow.fi/mainnet","100":"https://api.cow.fi/goerli","5":"https://api.cow.fi/xdai"}' -# Install dependencies +# Install system dependencies RUN --mount=type=cache,target=/var/cache/apt,sharing=locked apt-get update && \ - apt-get install -y git libssl-dev pkg-config git jq python3 make g++ - -# Clone the repo to the present working directory -RUN git clone https://github.com/cowprotocol/cowswap . && \ - git submodule update --init --recursive + apt-get install -y git libssl-dev pkg-config jq python3 make g++ && \ + rm -rf /var/lib/apt/lists/* -# Install npm dependencies -RUN yarn install --frozen-lockfile --no-cache +# Clone the repo, initialize submodules, and install dependencies +RUN git clone https://github.com/cowprotocol/cowswap . \ + && git submodule update --init --recursive \ + && pnpm install --frozen-lockfile -# Set the environment variable "chain" +# Set environment variable "chain" ARG CHAIN -ENV CHAIN=$CHAIN +ENV CHAIN="$CHAIN" -# Set the environment variable "ETH_RPC_URL" +# Set environment variable "ETH_RPC_URL" ARG ETH_RPC_URL -ENV ETH_RPC_URL=$ETH_RPC_URL +ENV ETH_RPC_URL="$ETH_RPC_URL" -# Set the default environment variables -ENV REACT_APP_NETWORK_URL_1=$REACT_APP_NETWORK_URL_1 -ENV REACT_APP_NETWORK_URL_5=$REACT_APP_NETWORK_URL_5 -ENV REACT_APP_NETWORK_URL_100=$REACT_APP_NETWORK_URL_100 -ENV REACT_APP_ORDER_BOOK_URLS=$REACT_APP_ORDER_BOOK_URLS +# Set default environment variables for network and order book URLs +ENV REACT_APP_NETWORK_URL_1="$REACT_APP_NETWORK_URL_1" +ENV REACT_APP_NETWORK_URL_5="$REACT_APP_NETWORK_URL_5" +ENV REACT_APP_NETWORK_URL_100="$REACT_APP_NETWORK_URL_100" +ENV REACT_APP_ORDER_BOOK_URLS="$REACT_APP_ORDER_BOOK_URLS" -# Update environment variables based on "chain" and "ETH_RPC_URL" and build the frontend -RUN if [ -n "$ETH_RPC_URL" ]; then \ +# Update environment variables based on "chain" and "ETH_RPC_URL", then build safely +RUN set -e; \ + if [ -n "$ETH_RPC_URL" ]; then \ case "$CHAIN" in \ 1) \ - REACT_APP_NETWORK_URL_1=$ETH_RPC_URL \ - REACT_APP_ORDER_BOOK_URLS=$(echo $REACT_APP_ORDER_BOOK_URLS | jq --arg chain "1" '.[$chain]="http://127.0.0.1:8080"') \ + REACT_APP_NETWORK_URL_1="$ETH_RPC_URL"; \ + REACT_APP_ORDER_BOOK_URLS=$(echo "$REACT_APP_ORDER_BOOK_URLS" | jq --arg chain "1" '.[$chain]="http://127.0.0.1:8080"'); \ ;; \ 5) \ - REACT_APP_NETWORK_URL_5=$ETH_RPC_URL \ - REACT_APP_ORDER_BOOK_URLS=$(echo $REACT_APP_ORDER_BOOK_URLS | jq --arg chain "5" '.[$chain]="http://127.0.0.1:8080"') \ + REACT_APP_NETWORK_URL_5="$ETH_RPC_URL"; \ + REACT_APP_ORDER_BOOK_URLS=$(echo "$REACT_APP_ORDER_BOOK_URLS" | jq --arg chain "5" '.[$chain]="http://127.0.0.1:8080"'); \ ;; \ 100) \ - REACT_APP_NETWORK_URL_100=$ETH_RPC_URL \ - REACT_APP_ORDER_BOOK_URLS=$(echo $REACT_APP_ORDER_BOOK_URLS | jq --arg chain "100" '.[$chain]="http://127.0.0.1:8080"') \ + REACT_APP_NETWORK_URL_100="$ETH_RPC_URL"; \ + REACT_APP_ORDER_BOOK_URLS=$(echo "$REACT_APP_ORDER_BOOK_URLS" | jq --arg chain "100" '.[$chain]="http://127.0.0.1:8080"'); \ ;; \ esac; \ - NODE_OPTIONS="--max-old-space-size=4096" NX_NO_CLOUD=true yarn build --env REACT_APP_NETWORK_URL_1=$REACT_APP_NETWORK_URL_1 \ - --env REACT_APP_NETWORK_URL_5=$REACT_APP_NETWORK_URL_5 \ - --env REACT_APP_NETWORK_URL_100=$REACT_APP_NETWORK_URL_100 \ - --env REACT_APP_ORDER_BOOK_URLS="$REACT_APP_ORDER_BOOK_URLS"; \ - fi + fi; \ + export REACT_APP_NETWORK_URL_1 REACT_APP_NETWORK_URL_5 REACT_APP_NETWORK_URL_100 REACT_APP_ORDER_BOOK_URLS; \ + NODE_OPTIONS="--max-old-space-size=4096" NX_NO_CLOUD=true pnpm run build \ + --env REACT_APP_NETWORK_URL_1="$REACT_APP_NETWORK_URL_1" \ + --env REACT_APP_NETWORK_URL_5="$REACT_APP_NETWORK_URL_5" \ + --env REACT_APP_NETWORK_URL_100="$REACT_APP_NETWORK_URL_100" \ + --env REACT_APP_ORDER_BOOK_URLS="$REACT_APP_ORDER_BOOK_URLS" -# Stage 2: Copy the frontend to the nginx container +# Stage 2: Copy the frontend build to the nginx container FROM docker.io/nginx:1.21-alpine AS frontend COPY --from=node-build /usr/src/app/build/cowswap /usr/share/nginx/html EXPOSE 80 diff --git a/playground/Dockerfile.explorer b/playground/Dockerfile.explorer index 34a2929007..917923a0f4 100644 --- a/playground/Dockerfile.explorer +++ b/playground/Dockerfile.explorer @@ -2,23 +2,27 @@ FROM docker.io/node:22-bookworm-slim AS node-build WORKDIR /usr/src/app -# Install dependencies +# Install system dependencies needed for building RUN --mount=type=cache,target=/var/cache/apt,sharing=locked apt-get update && \ - apt-get install -y git libssl-dev pkg-config git autoconf automake file g++ libtool make python3 + apt-get install -y git libssl-dev pkg-config autoconf automake file g++ libtool make python3 && \ + rm -rf /var/lib/apt/lists/* -# Clone the repo to the present working directory -RUN git clone https://github.com/cowprotocol/cowswap . && \ - git submodule update --init --recursive +# Enable Corepack and prepare pnpm +RUN corepack enable \ + && corepack prepare pnpm@10.12.1 --activate -# Install npm dependencies -RUN yarn install --frozen-lockfile --no-cache +# Clone the repo, initialize submodules, and install dependencies +RUN git clone https://github.com/cowprotocol/cowswap . \ + && git submodule update --init --recursive \ + && pnpm install --frozen-lockfile +# Set environment variable for the order book ENV REACT_APP_ORDER_BOOK_URLS='{"1":"http://localhost:8080"}' # Build the frontend -RUN APP_ID=1 yarn build:explorer +RUN APP_ID=1 pnpm run build:explorer -# Stage 2: Copy the frontend to the nginx container +# Stage 2: Copy the frontend build to the nginx container FROM docker.io/nginx:1.21-alpine AS frontend COPY --from=node-build /usr/src/app/build/explorer /usr/share/nginx/html EXPOSE 80 From 00ae19d28764dd37bb4b44301ec44838cd5bf0a2 Mon Sep 17 00:00:00 2001 From: ilya Date: Mon, 2 Feb 2026 14:18:31 +0300 Subject: [PATCH 023/219] Normalize ECDSA signature `v` parameter for Solidity `ecrecover` compatibility (#4107) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description This PR fixes an issue where EIP-712 signatures with `v = 0` or `1` (modern EIP-2 format) pass off-chain validation but fail on-chain settlement with `GPv2: invalid signature`. # Problem Some wallets (e.g., Bitget Wallet) produce ECDSA signatures using the modern EIP-2 format, where `v ∈ {0, 1}`, while Solidity's `ecrecover` precompile expects the legacy format where `v ∈ {27, 28}`. Off-chain (Alloy library): The https://github.com/alloy-rs/core/blob/main/crates/primitives/src/signature/sig.rs internally normalizes `v` to a boolean parity before recovery, so signatures with `v = 0` or `1` recover correctly. On-chain (Solidity): The `ecrecover` precompile https://coders-errand.com/ecrecover-signature-verification-ethereum/. When given `v = 0`, it returns `address(0)`, which triggers the https://github.com/cowprotocol/contracts/blob/main/src/contracts/mixins/GPv2Signing.sol#L207-L208: ``` signer = ecrecover(message, v, r, s); require(signer != address(0), "GPv2: invalid ecdsa signature"); ``` This mismatch causes orders to pass orderbook validation but fail at settlement. # Solution Normalize `v` to the legacy format (`27/28`) at signature parsing time in `EcdsaSignature::from_bytes()`: ``` let normalized_v = match v { 0 | 27 => 27, 1 | 28 => 28, _ => anyhow::bail!("invalid signature v value: {v}, expected 0, 1, 27, or 28"), }; ``` This ensures: 1. Signatures are stored with normalized `v` values 2. Both off-chain validation and on-chain `ecrecover` receive compatible parameters 3. The fix applies to all entry points (`Signature::from_bytes`, JSON deserialization) # Reproducing the Issue The issue can be verified using a real failed order and Foundry's cast tool to call the `ecrecover` precompile directly: Failed order: - Order UID: `0xb8e19962dd762067afb9f169684abfcbf2cb13bdc7a62ae2e680ebd5ce18c9bcca0c9c4a650cc4ed406d4a6dd031cdd9d4ebf0dc697a0686` - Order hash (struct hash): `0xb8e19962dd762067afb9f169684abfcbf2cb13bdc7a62ae2e680ebd5ce18c9bc` - Expected signer (owner): `0xca0c9c4a650cc4ed406d4a6dd031cdd9d4ebf0dc` - Signature: `0xAB2E74AA0D67233ADC7B52C3B832357ED35F2052338D820D4DA66210EFA7A9684601726CB76BD26DDD958EFE291CFB57E02C39B3F60FBB8BBED1E891FB14CB5D00` - r: `0xAB2E74AA0D67233ADC7B52C3B832357ED35F2052338D820D4DA66210EFA7A968` - s: `0x4601726CB76BD26DDD958EFE291CFB57E02C39B3F60FBB8BBED1E891FB14CB5D` - v: `0x00` ← the problem ## Step 1: Compute the EIP-712 message hash To avoid computing it manually, I grabbed it from a Tenderly simulation[[URL](https://dashboard.tenderly.co/cow-protocol/barn/simulator/babc6191-e15a-470c-83e0-5825b8a4501b/debugger?trace=0.0.4.1.1.0)]. ``` MESSAGE_HASH="0xb8e19962dd762067afb9f169684abfcbf2cb13bdc7a62ae2e680ebd5ce18c9bc" ``` ## Step 2: Test ecrecover with `v=0` (returns zero address - FAILS on-chain) ``` cast call 0x0000000000000000000000000000000000000001 \ "${MESSAGE_HASH}0000000000000000000000000000000000000000000000000000000000000000AB2E74AA0D67233ADC7B52C3B832357ED35F2052338D820D4DA66210EFA7A9684601726CB76BD26DDD958EFE291CFB57E02C39B3F60FBB8BBED1E891FB14CB5D" \ --rpc-url https://eth.llamarpc.com ``` ### Returns: `0x0000000000000000000000000000000000000000000000000000000000000000` ## Step 3: Test ecrecover with `v=27` (returns correct signer - WORKS) ``` cast call 0x0000000000000000000000000000000000000001 \ "${MESSAGE_HASH}000000000000000000000000000000000000000000000000000000000000001bAB2E74AA0D67233ADC7B52C3B832357ED35F2052338D820D4DA66210EFA7A9684601726CB76BD26DDD958EFE291CFB57E02C39B3F60FBB8BBED1E891FB14CB5D" \ --rpc-url https://eth.llamarpc.com ``` ### Returns: `0x000000000000000000000000ca0c9c4a650cc4ed406d4a6dd031cdd9d4ebf0dc` ✅ --- .../src/domain/competition/order/signature.rs | 22 +- .../driver/src/infra/solver/dto/solution.rs | 12 +- crates/model/src/order.rs | 217 ++++++++++++------ crates/model/src/signature.rs | 211 +++++++++++++---- crates/shared/src/encoded_settlement.rs | 5 +- 5 files changed, 349 insertions(+), 118 deletions(-) diff --git a/crates/driver/src/domain/competition/order/signature.rs b/crates/driver/src/domain/competition/order/signature.rs index 2964fdc8e3..852f8b3606 100644 --- a/crates/driver/src/domain/competition/order/signature.rs +++ b/crates/driver/src/domain/competition/order/signature.rs @@ -13,21 +13,29 @@ pub struct Signature { } impl Signature { - pub fn to_boundary_signature(&self) -> model::signature::Signature { + pub fn to_boundary_signature(&self) -> anyhow::Result { // TODO Different signing schemes imply different sizes of signature data, which // indicates that I'm missing an invariant in my types and I need to fix // that PreSign, for example, carries no data. Everything should be // reflected in the types! - match self.scheme { + Ok(match self.scheme { Scheme::Eip712 => model::signature::Signature::Eip712(EcdsaSignature::from_bytes( - self.data.0.as_slice().try_into().unwrap(), - )), + self.data + .0 + .as_slice() + .try_into() + .map_err(|_| anyhow::anyhow!("ECDSA signature must be 65 bytes"))?, + )?), Scheme::EthSign => model::signature::Signature::EthSign(EcdsaSignature::from_bytes( - self.data.0.as_slice().try_into().unwrap(), - )), + self.data + .0 + .as_slice() + .try_into() + .map_err(|_| anyhow::anyhow!("ECDSA signature must be 65 bytes"))?, + )?), Scheme::Eip1271 => model::signature::Signature::Eip1271(self.data.clone().into()), Scheme::PreSign => model::signature::Signature::PreSign, - } + }) } } diff --git a/crates/driver/src/infra/solver/dto/solution.rs b/crates/driver/src/infra/solver/dto/solution.rs index 3bb6e357c1..a3794444a5 100644 --- a/crates/driver/src/infra/solver/dto/solution.rs +++ b/crates/driver/src/infra/solver/dto/solution.rs @@ -328,11 +328,13 @@ impl JitOrder { let signer = signature .to_boundary_signature() - .recover_owner( - self.0.signature.as_slice(), - &DomainSeparator(domain_separator.0), - &self.raw_order_data().hash_struct(), - ) + .and_then(|sig| { + sig.recover_owner( + self.0.signature.as_slice(), + &DomainSeparator(domain_separator.0), + &self.raw_order_data().hash_struct(), + ) + }) .map_err(|e| super::Error(e.to_string()))?; if matches!( diff --git a/crates/model/src/order.rs b/crates/model/src/order.rs index 251e330d16..e27279e540 100644 --- a/crates/model/src/order.rs +++ b/crates/model/src/order.rs @@ -1072,7 +1072,9 @@ mod tests { #[test] fn deserialization_and_back() { - let value = json!( + // Input JSON has v=0x01, which gets normalized to v=28 (0x1c) for Solidity + // ecrecover compatibility. Serialization then outputs v=0x1c. + let input_json = json!( { "creationDate": "1970-01-01T00:00:03Z", "owner": "0x0000000000000000000000000000000000000001", @@ -1109,6 +1111,7 @@ mod tests { }, "fullAppData": "123", }); + let signing_scheme = EcdsaSigningScheme::Eip712; let expected = Order { metadata: OrderMetadata { @@ -1146,82 +1149,166 @@ mod tests { buy_token_balance: BuyTokenDestination::Internal, }, signature: EcdsaSignature { - v: 1, + // v=0x01 in input gets normalized to 28 for Solidity ecrecover compatibility + v: 28, r: b256!("0200000000000000000000000000000000000000000000000000000000000003"), s: b256!("0400000000000000000000000000000000000000000000000000000000000005"), } .to_signature(signing_scheme), interactions: Interactions::default(), }; - let deserialized: Order = serde_json::from_value(value.clone()).unwrap(); + + // After deserialization, v=0x01 becomes v=28 + let deserialized: Order = serde_json::from_value(input_json).unwrap(); assert_eq!(deserialized, expected); + + // Serialization outputs the normalized v=0x1c (28) + let expected_output_json = json!( + { + "creationDate": "1970-01-01T00:00:03Z", + "owner": "0x0000000000000000000000000000000000000001", + "uid": "0x1111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + "availableBalance": null, + "executedBuyAmount": "3", + "executedSellAmount": "5", + "executedSellAmountBeforeFees": "4", + "executedFeeAmount": "1", + "invalidated": true, + "sellToken": "0x000000000000000000000000000000000000000a", + "buyToken": "0x0000000000000000000000000000000000000009", + "receiver": "0x000000000000000000000000000000000000000b", + "sellAmount": "1", + "buyAmount": "0", + "validTo": 4294967295u32, + "appData": "0x6000000000000000000000000000000000000000000000000000000000000007", + "feeAmount": "115792089237316195423570985008687907853269984665640564039457584007913129639935", + "executedFee": "1", + "executedFeeToken": "0x000000000000000000000000000000000000000a", + "kind": "buy", + "class": "limit", + "partiallyFillable": false, + "signature": "0x02000000000000000000000000000000000000000000000000000000000000030400000000000000000000000000000000000000000000000000000000000005\ + 1c", + "signingScheme": "eip712", + "status": "open", + "settlementContract": "0x0000000000000000000000000000000000000002", + "sellTokenBalance": "external", + "buyTokenBalance": "internal", + "isLiquidityOrder": false, + "interactions": { + "pre": [], + "post": [], + }, + "fullAppData": "123", + }); let serialized = serde_json::to_value(expected).unwrap(); - assert_json_matches!(serialized, value); + assert_json_matches!(serialized, expected_output_json); } #[test] fn order_creation_serialization() { let owner = Address::repeat_byte(0xff); - for (signature, signing_scheme, from, signature_bytes) in [ - ( - Signature::default_with(SigningScheme::Eip712), - "eip712", - Some(owner), - "0x0000000000000000000000000000000000000000000000000000000000000000\ - 0000000000000000000000000000000000000000000000000000000000000000\ - 00", - ), - ( - Signature::default_with(SigningScheme::EthSign), - "ethsign", - None, - "0x0000000000000000000000000000000000000000000000000000000000000000\ - 0000000000000000000000000000000000000000000000000000000000000000\ - 00", - ), - (Signature::PreSign, "presign", Some(owner), "0x"), - ] { - let order = OrderCreation { - sell_token: Address::repeat_byte(0x11), - buy_token: Address::repeat_byte(0x22), - receiver: Some(Address::repeat_byte(0x33)), - sell_amount: alloy::primitives::U256::from(123), - buy_amount: alloy::primitives::U256::from(456), - valid_to: 1337, - app_data: OrderCreationAppData::Hash { - hash: AppDataHash([0x44; 32]), - }, - fee_amount: alloy::primitives::U256::from(789), - kind: OrderKind::Sell, - partially_fillable: false, - sell_token_balance: SellTokenSource::Erc20, - buy_token_balance: BuyTokenDestination::Erc20, - from, - signature, - quote_id: Some(42), - }; - let order_json = json!({ - "sellToken": "0x1111111111111111111111111111111111111111", - "buyToken": "0x2222222222222222222222222222222222222222", - "receiver": "0x3333333333333333333333333333333333333333", - "sellAmount": "123", - "buyAmount": "456", - "validTo": 1337, - "appData": "0x4444444444444444444444444444444444444444444444444444444444444444", - "feeAmount": "789", - "kind": "sell", - "partiallyFillable": false, - "sellTokenBalance": "erc20", - "buyTokenBalance": "erc20", - "quoteId": 42, - "signingScheme": signing_scheme, - "signature": signature_bytes, - "from": from, - }); - - assert_json_matches!(json!(order), order_json); - assert_eq!(order, serde_json::from_value(order_json).unwrap()); - } + + let template_order = OrderCreation { + sell_token: Address::repeat_byte(0x11), + buy_token: Address::repeat_byte(0x22), + receiver: Some(Address::repeat_byte(0x33)), + sell_amount: alloy::primitives::U256::from(123), + buy_amount: alloy::primitives::U256::from(456), + valid_to: 1337, + app_data: OrderCreationAppData::Hash { + hash: AppDataHash([0x44; 32]), + }, + fee_amount: alloy::primitives::U256::from(789), + kind: OrderKind::Sell, + partially_fillable: false, + sell_token_balance: SellTokenSource::Erc20, + buy_token_balance: BuyTokenDestination::Erc20, + from: Some(owner), + signature: Signature::PreSign, + quote_id: Some(42), + }; + + // Test PreSign round-trip (no signature normalization needed) + let presign_order = template_order.clone(); + let presign_json = json!({ + "sellToken": "0x1111111111111111111111111111111111111111", + "buyToken": "0x2222222222222222222222222222222222222222", + "receiver": "0x3333333333333333333333333333333333333333", + "sellAmount": "123", + "buyAmount": "456", + "validTo": 1337, + "appData": "0x4444444444444444444444444444444444444444444444444444444444444444", + "feeAmount": "789", + "kind": "sell", + "partiallyFillable": false, + "sellTokenBalance": "erc20", + "buyTokenBalance": "erc20", + "quoteId": 42, + "signingScheme": "presign", + "signature": "0x", + "from": owner, + }); + assert_json_matches!(json!(presign_order), presign_json); + assert_eq!(presign_order, serde_json::from_value(presign_json).unwrap()); + + // Test ECDSA signature with v normalization. + // Input JSON has v=0x00, which normalizes to v=27 (0x1b). + let input_json_v0 = json!({ + "sellToken": "0x1111111111111111111111111111111111111111", + "buyToken": "0x2222222222222222222222222222222222222222", + "receiver": "0x3333333333333333333333333333333333333333", + "sellAmount": "123", + "buyAmount": "456", + "validTo": 1337, + "appData": "0x4444444444444444444444444444444444444444444444444444444444444444", + "feeAmount": "789", + "kind": "sell", + "partiallyFillable": false, + "sellTokenBalance": "erc20", + "buyTokenBalance": "erc20", + "quoteId": 42, + "signingScheme": "eip712", + "signature": "0x0000000000000000000000000000000000000000000000000000000000000000\ + 0000000000000000000000000000000000000000000000000000000000000000\ + 00", + "from": owner, + }); + let expected_order = OrderCreation { + signature: Signature::Eip712(EcdsaSignature { + r: B256::ZERO, + s: B256::ZERO, + v: 27, // normalized from v=0 + }), + ..template_order.clone() + }; + + // Deserialization normalizes v=0 to v=27 + let deserialized: OrderCreation = serde_json::from_value(input_json_v0).unwrap(); + assert_eq!(deserialized, expected_order); + + // Serialization outputs normalized v=0x1b + let output_json_v27 = json!({ + "sellToken": "0x1111111111111111111111111111111111111111", + "buyToken": "0x2222222222222222222222222222222222222222", + "receiver": "0x3333333333333333333333333333333333333333", + "sellAmount": "123", + "buyAmount": "456", + "validTo": 1337, + "appData": "0x4444444444444444444444444444444444444444444444444444444444444444", + "feeAmount": "789", + "kind": "sell", + "partiallyFillable": false, + "sellTokenBalance": "erc20", + "buyTokenBalance": "erc20", + "quoteId": 42, + "signingScheme": "eip712", + "signature": "0x0000000000000000000000000000000000000000000000000000000000000000\ + 0000000000000000000000000000000000000000000000000000000000000000\ + 1b", + "from": owner, + }); + assert_json_matches!(json!(expected_order), output_json_v27); } #[test] @@ -1379,7 +1466,7 @@ mod tests { order_uid: OrderUid(hex!( "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a" )), - signature: EcdsaSignature::from_bytes(signature), + signature: EcdsaSignature::from_bytes(signature).unwrap(), signing_scheme: *signing_scheme, }; let owner = cancellation.validate(&domain_separator).unwrap(); diff --git a/crates/model/src/signature.rs b/crates/model/src/signature.rs index 378fad0018..70da569597 100644 --- a/crates/model/src/signature.rs +++ b/crates/model/src/signature.rs @@ -118,12 +118,7 @@ impl Signature { let bytes: [u8; 65] = bytes .try_into() .context("ECDSA signature must be 65 bytes long")?; - EcdsaSignature { - r: B256::from_slice(&bytes[..32]), - s: B256::from_slice(&bytes[32..64]), - v: bytes[64], - } - .to_signature( + EcdsaSignature::from_bytes(&bytes)?.to_signature( scheme .try_to_ecdsa_scheme() .expect("scheme is an ecdsa scheme"), @@ -255,13 +250,24 @@ impl SigningScheme { } } -#[derive(Eq, PartialEq, Clone, Copy, Debug, Default, Hash)] +#[derive(Eq, PartialEq, Clone, Copy, Debug, Hash)] pub struct EcdsaSignature { pub r: B256, pub s: B256, pub v: u8, } +impl Default for EcdsaSignature { + fn default() -> Self { + Self { + r: B256::ZERO, + s: B256::ZERO, + // Use normalized v value (equivalent to 0) for Solidity ecrecover compatibility + v: 27, + } + } +} + pub fn hashed_eip712_message(domain_separator: &DomainSeparator, struct_hash: &[u8; 32]) -> B256 { let mut message = [0u8; 66]; // 0x19 0x01 are the magic prefix bytes for the domain separator @@ -308,12 +314,23 @@ impl EcdsaSignature { bytes } - pub fn from_bytes(bytes: &[u8; 65]) -> Self { - EcdsaSignature { + pub fn from_bytes(bytes: &[u8; 65]) -> Result { + let v = bytes[64]; + // Normalize v to legacy format (27/28) for Solidity ecrecover compatibility. + // Modern EIP-2 signatures use v = 0 or 1, but Solidity's ecrecover expects + // v = 27 or 28. Alloy normalizes internally for off-chain recovery, but + // on-chain ecrecover(hash, v=0, r, s) returns address(0) and fails. + // Only valid v values are 0, 1, 27, 28. + let normalized_v = match v { + 0 | 27 => 27, + 1 | 28 => 28, + _ => anyhow::bail!("invalid signature v value: {v}, expected 0, 1, 27, or 28"), + }; + Ok(EcdsaSignature { r: B256::from_slice(&bytes[..32]), s: B256::from_slice(&bytes[32..64]), - v: bytes[64], - } + v: normalized_v, + }) } pub fn recover( @@ -338,7 +355,7 @@ impl EcdsaSignature { let message = hashed_signing_message(signing_scheme, domain_separator, struct_hash); // Unwrap because the only error is for invalid messages which we don't create. let signature = key.sign_hash_sync(&message).unwrap(); - Self::from_bytes(&signature.as_bytes()) + Self::from_bytes(&signature.as_bytes()).expect("signing produces valid v values") } /// Returns an arbitrary non-zero signature that can be used for recovery @@ -380,7 +397,7 @@ impl<'de> Deserialize<'de> for EcdsaSignature { write!( formatter, "the 65 ecdsa signature bytes as a hex encoded string, ordered as r, s, v, \ - where v is either 27 or 28" + where v is 0, 1, 27, or 28" ) } @@ -400,7 +417,7 @@ impl<'de> Deserialize<'de> for EcdsaSignature { "failed to decode {s:?} as hex ecdsa signature: {err}" )) })?; - Ok(EcdsaSignature::from_bytes(&bytes)) + EcdsaSignature::from_bytes(&bytes).map_err(de::Error::custom) } } @@ -437,6 +454,7 @@ mod tests { assert!(Signature::from_bytes(SigningScheme::EthSign, &[0u8; 20]).is_err()); assert!(Signature::from_bytes(SigningScheme::PreSign, &[0u8; 32]).is_err()); + // Note: v=0 in input bytes gets normalized to v=27 for ecrecover compatibility assert_eq!( Signature::from_bytes(SigningScheme::Eip712, &[0u8; 65]).unwrap(), Signature::default_with(SigningScheme::Eip712) @@ -461,13 +479,17 @@ mod tests { #[test] fn signature_to_bytes() { + // Default ECDSA signatures have normalized v = 27 (equivalent to 0) + let mut expected_ecdsa = [0u8; 65]; + expected_ecdsa[64] = 27; + assert_eq!( Signature::default_with(SigningScheme::Eip712).to_bytes(), - [0u8; 65].to_vec() + expected_ecdsa.to_vec() ); assert_eq!( Signature::default_with(SigningScheme::EthSign).to_bytes(), - [0u8; 65].to_vec() + expected_ecdsa.to_vec() ); assert_eq!( Signature::default_with(SigningScheme::PreSign).to_bytes(), @@ -490,31 +512,8 @@ mod tests { #[test] fn deserialize_and_back() { + // Test round-trip for non-ECDSA signatures (no normalization needed) for (signature, json) in [ - ( - Signature::Eip712(Default::default()), - json!({ - "signingScheme": "eip712", - "signature": "0x\ - 0000000000000000000000000000000000000000000000000000000000000000\ - 0000000000000000000000000000000000000000000000000000000000000000\ - 00", - }), - ), - ( - Signature::EthSign(EcdsaSignature { - r: B256::repeat_byte(1), - s: B256::repeat_byte(2), - v: 3, - }), - json!({ - "signingScheme": "ethsign", - "signature": "0x\ - 0101010101010101010101010101010101010101010101010101010101010101\ - 0202020202020202020202020202020202020202020202020202020202020202\ - 03", - }), - ), ( Signature::Eip1271(vec![1, 2, 3]), json!({ @@ -540,6 +539,57 @@ mod tests { assert_eq!(signature, serde_json::from_value(json.clone()).unwrap()); assert_json_matches!(json, json!(signature)); } + + // Test ECDSA signature deserialization with v normalization. + // Input v=0x00 normalizes to v=27, so serialization outputs v=0x1b. + let input_json = json!({ + "signingScheme": "eip712", + "signature": "0x\ + 0000000000000000000000000000000000000000000000000000000000000000\ + 0000000000000000000000000000000000000000000000000000000000000000\ + 00", + }); + let expected_signature = Signature::Eip712(EcdsaSignature { + r: B256::ZERO, + s: B256::ZERO, + v: 27, // normalized from v=0 + }); + let expected_output_json = json!({ + "signingScheme": "eip712", + "signature": "0x\ + 0000000000000000000000000000000000000000000000000000000000000000\ + 0000000000000000000000000000000000000000000000000000000000000000\ + 1b", + }); + + let deserialized: Signature = serde_json::from_value(input_json).unwrap(); + assert_eq!(deserialized, expected_signature); + assert_json_matches!(json!(deserialized), expected_output_json); + + // Test EthSign with v=1 normalizing to v=28 + let input_json = json!({ + "signingScheme": "ethsign", + "signature": "0x\ + 0101010101010101010101010101010101010101010101010101010101010101\ + 0202020202020202020202020202020202020202020202020202020202020202\ + 01", + }); + let expected_signature = Signature::EthSign(EcdsaSignature { + r: B256::repeat_byte(1), + s: B256::repeat_byte(2), + v: 28, // normalized from v=1 + }); + let expected_output_json = json!({ + "signingScheme": "ethsign", + "signature": "0x\ + 0101010101010101010101010101010101010101010101010101010101010101\ + 0202020202020202020202020202020202020202020202020202020202020202\ + 1c", + }); + + let deserialized: Signature = serde_json::from_value(input_json).unwrap(); + assert_eq!(deserialized, expected_signature); + assert_json_matches!(json!(deserialized), expected_output_json); } #[test] @@ -628,4 +678,85 @@ mod tests { hashed_ethsign_message(&domain_separator, &struct_hash) ); } + + #[test] + fn ecdsa_signature_v_normalization() { + // Modern EIP-2 signatures use v = 0 or 1, but Solidity's ecrecover expects + // v = 27 or 28. This test verifies that v values are normalized correctly. + + // v = 0 should be normalized to 27 + let mut bytes_v0 = [0u8; 65]; + bytes_v0[64] = 0; + let sig = EcdsaSignature::from_bytes(&bytes_v0).unwrap(); + assert_eq!(sig.v, 27); + assert_eq!(sig.to_bytes()[64], 27); + + // v = 1 should be normalized to 28 + let mut bytes_v1 = [0u8; 65]; + bytes_v1[64] = 1; + let sig = EcdsaSignature::from_bytes(&bytes_v1).unwrap(); + assert_eq!(sig.v, 28); + assert_eq!(sig.to_bytes()[64], 28); + + // v = 27 should stay 27 + let mut bytes_v27 = [0u8; 65]; + bytes_v27[64] = 27; + let sig = EcdsaSignature::from_bytes(&bytes_v27).unwrap(); + assert_eq!(sig.v, 27); + assert_eq!(sig.to_bytes()[64], 27); + + // v = 28 should stay 28 + let mut bytes_v28 = [0u8; 65]; + bytes_v28[64] = 28; + let sig = EcdsaSignature::from_bytes(&bytes_v28).unwrap(); + assert_eq!(sig.v, 28); + assert_eq!(sig.to_bytes()[64], 28); + + // Verify normalization also works through Signature::from_bytes + let sig = Signature::from_bytes(SigningScheme::Eip712, &bytes_v0).unwrap(); + assert_eq!(sig.to_bytes()[64], 27); + + let sig = Signature::from_bytes(SigningScheme::EthSign, &bytes_v1).unwrap(); + assert_eq!(sig.to_bytes()[64], 28); + } + + #[test] + fn ecdsa_signature_invalid_v_rejected() { + // Invalid v values should be rejected + for invalid_v in [2u8, 3, 26, 29, 30, 255] { + let mut bytes = [0u8; 65]; + bytes[64] = invalid_v; + + // EcdsaSignature::from_bytes should return an error + let result = EcdsaSignature::from_bytes(&bytes); + assert!( + result.is_err(), + "v={invalid_v} should be rejected but was accepted" + ); + + // Signature::from_bytes should also return an error + let result = Signature::from_bytes(SigningScheme::Eip712, &bytes); + assert!( + result.is_err(), + "v={invalid_v} should be rejected via Signature::from_bytes" + ); + + // Deserialization should also fail + let hex_sig = format!( + "0x{}{}{}", + const_hex::encode([0u8; 32]), + const_hex::encode([0u8; 32]), + const_hex::encode([invalid_v]) + ); + let json = json!({ + "signingScheme": "eip712", + "signature": hex_sig, + }); + let result: Result = serde_json::from_value(json); + assert!( + result.is_err(), + "v={invalid_v} should be rejected during deserialization" + ); + } + } } diff --git a/crates/shared/src/encoded_settlement.rs b/crates/shared/src/encoded_settlement.rs index 61bc8ce86e..ceca3aac28 100644 --- a/crates/shared/src/encoded_settlement.rs +++ b/crates/shared/src/encoded_settlement.rs @@ -181,8 +181,11 @@ mod tests { #[test] fn trade_signature_encoding() { let owner = Address::repeat_byte(1); + // Default EcdsaSignature has v = 27 (normalized for Solidity ecrecover) + let mut default_ecdsa_bytes = vec![0; 65]; + default_ecdsa_bytes[64] = 27; for (signature, bytes) in [ - (Signature::Eip712(Default::default()), vec![0; 65]), + (Signature::Eip712(Default::default()), default_ecdsa_bytes), ( Signature::EthSign(EcdsaSignature { r: B256::repeat_byte(1), From 77bbbdf153b001df08b484340fded4bc6333eda5 Mon Sep 17 00:00:00 2001 From: ilya Date: Mon, 2 Feb 2026 18:08:47 +0300 Subject: [PATCH 024/219] Fix haircut mismatch between reported and on-chain amounts (#4109) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description Fixes the mismatch between driver-reported amounts and on-chain executed amounts when the haircut is configured. Previously, the driver reported higher buy amounts than users actually received on-chain (for sell orders), resulting in a discrepancy that matched the configured haircut. Root cause: `sell_amount()` and `buy_amount()` did NOT include haircut, but `custom_prices()` (used for on-chain encoding) DID. This caused reported amounts to differ from on-chain execution. # Changes Include haircut effects in `sell_amount()` and `buy_amount()` so that: - Reported amounts include haircut - On-chain execution matches reported amounts - Autopilot scores based on actual (haircutted) amounts For sell orders: - `sell_amount()` → unchanged (user sells exactly what they signed) - `buy_amount()` → reduced by haircut (user receives less) For buy orders: - `sell_amount()` → increased by haircut (user pays more) - `buy_amount()` → unchanged (user receives exactly what they signed for) ## How to test Adjusted existing tests. --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- .../src/domain/competition/solution/trade.rs | 60 ++++++---- crates/driver/src/domain/quote.rs | 6 +- crates/driver/src/tests/cases/haircut.rs | 111 ++++++++++++++---- crates/e2e/tests/e2e/limit_orders.rs | 59 +++++----- 4 files changed, 163 insertions(+), 73 deletions(-) diff --git a/crates/driver/src/domain/competition/solution/trade.rs b/crates/driver/src/domain/competition/solution/trade.rs index 40a97ebf2c..f89f2a8549 100644 --- a/crates/driver/src/domain/competition/solution/trade.rs +++ b/crates/driver/src/domain/competition/solution/trade.rs @@ -212,6 +212,9 @@ impl Fulfillment { } /// The effective amount that left the user's wallet including all fees. + /// + /// For buy orders, this includes the haircut effect (haircut increases the + /// effective sell amount the user pays). pub fn sell_amount(&self, prices: &ClearingPrices) -> Result { let before_fee = match self.order.side { order::Side::Sell => self.executed.0, @@ -224,31 +227,51 @@ impl Fulfillment { .ok_or(Math::DivisionByZero)?, }; + let with_fee = before_fee.checked_add(self.fee().0).ok_or(Math::Overflow)?; + // Add haircut for buy orders (haircut is in buy token, convert to sell token) + let haircut = match self.order.side { + order::Side::Sell => eth::U256::ZERO, // Haircut applied to buy_amount for sell orders + order::Side::Buy => self.haircut_in_sell_token(prices)?, + }; + Ok(eth::TokenAmount( - before_fee.checked_add(self.fee().0).ok_or(Math::Overflow)?, + with_fee.checked_add(haircut).ok_or(Math::Overflow)?, )) } /// The effective amount the user received after all fees. /// /// Settlement contract uses `ceil` division for buy amount calculation. + /// + /// For sell orders, this includes the haircut effect (haircut reduces the + /// effective buy amount the user receives). pub fn buy_amount(&self, prices: &ClearingPrices) -> Result { let amount = match self.order.side { order::Side::Buy => self.executed.0, - order::Side::Sell => self - .executed - .0 - .checked_mul(prices.sell) - .ok_or(Math::Overflow)? - .checked_ceil_div(&prices.buy) - .ok_or(Math::DivisionByZero)?, + order::Side::Sell => { + // Base buy amount from executed sell + let base = self + .executed + .0 + .checked_mul(prices.sell) + .ok_or(Math::Overflow)? + .checked_ceil_div(&prices.buy) + .ok_or(Math::DivisionByZero)?; + // Reduce by haircut (haircut is in sell token, convert to buy token) + let haircut_in_buy = self + .haircut_fee + .checked_mul(prices.sell) + .ok_or(Math::Overflow)? + .checked_div(prices.buy) + .ok_or(Math::DivisionByZero)?; + base.checked_sub(haircut_in_buy).ok_or(Math::Negative)? + } }; Ok(eth::TokenAmount(amount)) } - /// Computes the haircut amount in sell token for use in custom_prices(). - /// This applies haircut to pricing while keeping sell_amount() clean for - /// reporting. + /// Computes the haircut amount in sell token. + /// Used for buy orders to add haircut to the sell amount. fn haircut_in_sell_token(&self, prices: &ClearingPrices) -> Result { match self.order.side { order::Side::Sell => Ok(self.haircut_fee), @@ -261,21 +284,18 @@ impl Fulfillment { } } + /// Computes custom clearing prices for this trade. + /// + /// Note: This function relies on `sell_amount()` and `buy_amount()` to + /// correctly incorporate all adjustments (fees, haircuts). No additional + /// modifications are applied here. pub fn custom_prices( &self, prices: &ClearingPrices, ) -> Result { - // Include haircut in custom prices for quotes/scoring. - // This makes bids more conservative without affecting the actual - // reported sell_amount (which is used for user-facing reporting). - let haircut = self.haircut_in_sell_token(prices)?; Ok(CustomClearingPrices { sell: self.buy_amount(prices)?.into(), - buy: self - .sell_amount(prices)? - .0 - .checked_add(haircut) - .ok_or(Math::Overflow)?, + buy: self.sell_amount(prices)?.into(), }) } diff --git a/crates/driver/src/domain/quote.rs b/crates/driver/src/domain/quote.rs index ad513d0bfc..ee25380a40 100644 --- a/crates/driver/src/domain/quote.rs +++ b/crates/driver/src/domain/quote.rs @@ -68,10 +68,8 @@ impl Quote { /// Compute clearing prices for the quote. /// /// Uses uniform clearing prices from the solution, adjusted for haircut - /// when enabled. This uses the same approach as settlement encoding: - /// `custom_prices()` which internally uses `sell_amount()` and - /// `buy_amount()` to include the haircut in the effective trade - /// amounts. + /// when enabled. Uses `custom_prices()` which includes haircut effects + /// to make quotes conservative for users. fn compute_clearing_prices( solution: &competition::Solution, ) -> Result, Error> { diff --git a/crates/driver/src/tests/cases/haircut.rs b/crates/driver/src/tests/cases/haircut.rs index 86093659e9..a4b6a60fbf 100644 --- a/crates/driver/src/tests/cases/haircut.rs +++ b/crates/driver/src/tests/cases/haircut.rs @@ -10,15 +10,20 @@ use { setup::{ab_order, ab_pool, ab_solution}, }, }, - number::units::EthUnit, + number::{testing::ApproxEq, units::EthUnit}, }; +/// Haircut in basis points used across tests (500 bps = 5%) +const HAIRCUT_BPS: u32 = 500; + /// Test that haircut correctly reduces the solution score for sell orders. -/// The haircut adjusts clearing prices to report lower output amounts, making -/// the bid more conservative. +/// The haircut reduces the reported buy_amount, making the bid more +/// conservative. /// -/// Also verifies that the reported sell amount matches the user's signed -/// sell amount exactly (fill-or-kill requires exact execution). +/// Verifies that: +/// - `executedSell == signedSellAmount` (fill-or-kill requires exact execution) +/// - `executedBuy` with haircut < `executedBuy` without haircut (haircut +/// reduces output) #[tokio::test] #[ignore] async fn order_haircut_reduces_score() { @@ -60,7 +65,7 @@ async fn order_haircut_reduces_score() { .solver_fee(Some(eth::U256::from(100))), ) .solution(ab_solution()) - .solvers(vec![tests::setup::test_solver().haircut_bps(500)]) + .solvers(vec![tests::setup::test_solver().haircut_bps(HAIRCUT_BPS)]) .done() .await; @@ -82,6 +87,22 @@ async fn order_haircut_reduces_score() { percentage ); + // Extract executedBuy from baseline (no haircut) + let solution_no_haircut = solve_no_haircut.solution(); + let orders_no_haircut = solution_no_haircut + .get("orders") + .unwrap() + .as_object() + .unwrap(); + let executed_buy_no_haircut = orders_no_haircut + .values() + .next() + .unwrap() + .get("executedBuy") + .and_then(|v| v.as_str()) + .and_then(|s| eth::U256::from_str_radix(s, 10).ok()) + .unwrap(); + // Verify that reported sell amount matches signed amount exactly. // Fill-or-kill orders require exact execution. let solution = solve_with_haircut.solution(); @@ -92,6 +113,11 @@ async fn order_haircut_reduces_score() { .and_then(|v| v.as_str()) .and_then(|s| eth::U256::from_str_radix(s, 10).ok()) .unwrap(); + let executed_buy = order_data + .get("executedBuy") + .and_then(|v| v.as_str()) + .and_then(|s| eth::U256::from_str_radix(s, 10).ok()) + .unwrap(); let limit_sell = order_data .get("limitSell") .and_then(|v| v.as_str()) @@ -111,26 +137,39 @@ async fn order_haircut_reduces_score() { executed_sell, limit_sell ); + + // Verify haircut reduces executedBuy for sell orders by approximately + // HAIRCUT_BPS + let expected_buy = + executed_buy_no_haircut * eth::U256::from(10000 - HAIRCUT_BPS) / eth::U256::from(10000); + assert!( + executed_buy.is_approx_eq(&expected_buy, Some(0.01)), + "Sell order: executedBuy {} should be ~{}% of baseline {} (expected ~{})", + executed_buy, + 100 - HAIRCUT_BPS / 100, + executed_buy_no_haircut, + expected_buy + ); } } /// Test that haircut is properly applied for buy orders. -/// For buy orders, the haircut reduces the effective buy amount, which -/// increases the sell amount the user pays. This reduces surplus and thus the -/// score. Note: The percentage reduction for buy orders differs from sell -/// orders because the haircut is applied to the executed buy amount, not -/// directly to surplus. +/// For buy orders, the haircut increases the sell_amount the user pays. +/// This reduces surplus and thus the score. /// -/// Also verifies that: +/// Verifies that: /// - `executedBuy == signedBuyAmount` (fill-or-kill must execute exactly) -/// - `executedSell <= sellLimit` (don't take more than user's maximum) +/// - `executedSell <= sellLimit` (haircut increases sell, but must stay within +/// limit) +/// - `executedSell` with haircut > `executedSell` without haircut (haircut +/// increases cost) #[tokio::test] #[ignore] async fn buy_order_haircut() { let side = order::Side::Buy; let kind = order::Kind::Limit; let signed_buy_amount = 2u64.eth(); - let sell_limit = 100u64.ether().into_wei(); + let signed_sell_limit = 100u64.ether().into_wei(); // For buy orders, we need to set a buy_amount that creates enough surplus. // The pool has 100000:6000 ratio. For a buy order wanting 2.97 B, @@ -143,7 +182,7 @@ async fn buy_order_haircut() { .side(side) .kind(kind) .buy_amount(signed_buy_amount) // Target buy amount (what user signs for) - .sell_amount(sell_limit) // Generous sell limit creates surplus + .sell_amount(signed_sell_limit) // Generous sell limit creates surplus .solver_fee(Some(eth::U256::from(100))), ) .solution(ab_solution()) @@ -162,11 +201,11 @@ async fn buy_order_haircut() { .side(side) .kind(kind) .buy_amount(signed_buy_amount) // Same target buy amount - .sell_amount(sell_limit) // Same generous sell limit + .sell_amount(signed_sell_limit) // Same generous sell limit .solver_fee(Some(eth::U256::from(100))), ) .solution(ab_solution()) - .solvers(vec![tests::setup::test_solver().haircut_bps(500)]) + .solvers(vec![tests::setup::test_solver().haircut_bps(HAIRCUT_BPS)]) .done() .await; @@ -194,6 +233,22 @@ async fn buy_order_haircut() { percentage ); + // Extract executedSell from baseline (no haircut) + let solution_no_haircut = solve_no_haircut.solution(); + let orders_no_haircut = solution_no_haircut + .get("orders") + .unwrap() + .as_object() + .unwrap(); + let executed_sell_no_haircut = orders_no_haircut + .values() + .next() + .unwrap() + .get("executedSell") + .and_then(|v| v.as_str()) + .and_then(|s| eth::U256::from_str_radix(s, 10).ok()) + .unwrap(); + // Verify buy order constraints: // - Fill-or-kill must execute exactly (executedBuy == signedBuyAmount) // - Don't take more than user's maximum (executedSell <= sellLimit) @@ -224,11 +279,11 @@ async fn buy_order_haircut() { signed_buy_amount ); assert!( - executed_sell <= sell_limit, - "Buy order: executedSell {} exceeds sell limit {}. Haircut should reduce surplus, not \ - inflate sell amount!", + executed_sell <= signed_sell_limit, + "Buy order: executedSell {} exceeds sell limit {}. Haircut increases sell_amount but \ + it must still respect the user's limit!", executed_sell, - sell_limit + signed_sell_limit ); assert!( executed_sell <= limit_sell, @@ -236,5 +291,19 @@ async fn buy_order_haircut() { executed_sell, limit_sell ); + + // Verify haircut increases executedSell for buy orders. + // For buy orders, haircut increases the sell amount proportionally. + let haircut_ratio = 1.0 + (HAIRCUT_BPS as f64 / 10000.0); // ~1.05 for 500 bps + let expected_sell = + eth::U256::from((executed_sell_no_haircut.to::() as f64 * haircut_ratio) as u128); + assert!( + executed_sell.is_approx_eq(&expected_sell, Some(0.02)), + "Buy order: executedSell {} should be ~{:.1}% higher than baseline {} (expected ~{})", + executed_sell, + (haircut_ratio - 1.0) * 100.0, + executed_sell_no_haircut, + expected_sell + ); } } diff --git a/crates/e2e/tests/e2e/limit_orders.rs b/crates/e2e/tests/e2e/limit_orders.rs index ed71e6e609..d4de694f72 100644 --- a/crates/e2e/tests/e2e/limit_orders.rs +++ b/crates/e2e/tests/e2e/limit_orders.rs @@ -57,19 +57,21 @@ async fn local_node_no_liquidity_limit_order() { run_test(no_liquidity_limit_order).await; } -/// Test that sell orders with haircut configured still execute on-chain. -/// The haircut reduces the reported surplus but the order should still be -/// fillable and execute successfully. +/// Test that sell orders with haircut configured execute on-chain with +/// haircutted amounts. The haircut reduces both the reported buy_amount and +/// the on-chain buy_amount (they should match). User receives less than +/// without haircut, with the difference going to the settlement contract. #[tokio::test] #[ignore] async fn local_node_limit_order_with_haircut() { run_test(sell_order_with_haircut_test).await; } -/// Test that buy orders with haircut configured still execute on-chain. -/// For buy orders, verifies that: -/// - executedBuy >= signedBuyAmount (user gets at least what they signed for) -/// - executedSell <= sellLimit (don't take more than user's maximum) +/// Test that buy orders with haircut configured execute on-chain with +/// haircutted amounts. The haircut increases both the reported sell_amount and +/// the on-chain sell_amount (they should match). Verifies that: +/// - executedBuy == signedBuyAmount (user gets exactly what they signed for) +/// - executedSell includes haircut (but still <= sellLimit) #[tokio::test] #[ignore] async fn local_node_buy_order_with_haircut() { @@ -1182,9 +1184,10 @@ async fn no_liquidity_limit_order(web3: Web3) { assert!(balance_after.checked_sub(balance_before).unwrap() >= 5u64.eth()); } -/// Test that a limit order with haircut configured still executes on-chain. -/// The haircut adjusts clearing prices to report lower surplus, but the order -/// should still be fillable since the limit price allows for enough slack. +/// Test that a limit order with haircut configured executes on-chain with +/// haircutted amounts. The haircut reduces the buy_amount the user receives, +/// both in reported amounts and on-chain execution (they should match). +/// The haircut difference goes to the settlement contract. async fn sell_order_with_haircut_test(web3: Web3) { let mut onchain = OnchainComponents::deploy(web3.clone()).await; @@ -1299,9 +1302,9 @@ async fn sell_order_with_haircut_test(web3: Web3) { .await .unwrap(); - // Verify that haircut (positive slippage) remains in the settlement contract. - // The haircut is 500 bps (5%) of the executed sell amount (10 ETH). - // At 1:1 pool ratio, this is approximately 0.5 ETH worth of token_b. + // Verify that haircut DOES affect on-chain execution. + // The haircut reduces the buy_amount the user receives, with the difference + // going to the settlement contract. let trader_balance_after = token_b.balanceOf(trader_a.address()).call().await.unwrap(); let settlement_balance_after = token_b .balanceOf(*onchain.contracts().gp_settlement.address()) @@ -1316,20 +1319,19 @@ async fn sell_order_with_haircut_test(web3: Web3) { .checked_sub(settlement_balance_before) .unwrap(); - // Expected haircut: 5% of 10 ETH sell amount = 0.5 ETH (in buy token terms at - // ~1:1 ratio). Allow some tolerance for fees and rounding. + // With 500 bps (5%) haircut on ~9.87 ETH buy amount, settlement should receive + // ~0.49 ETH. Allow some tolerance for AMM fees and rounding. assert!( settlement_received >= 0.4.eth() && settlement_received <= 0.6.eth(), - "Settlement contract should have received haircut (positive slippage) between 0.4 and 0.6 \ - ETH, but got {}", + "Settlement contract should have received haircut (~0.49 ETH), but got {}", settlement_received ); - // Expected trader amount: output (~9.87 ETH at 1:1 ratio with 0.3% fee) - // minus haircut (~0.5 ETH) = ~9.37 ETH. Allow tolerance for rounding. + // Expected trader amount: AMM output minus haircut (~9.87 - 0.49 = ~9.38 ETH). + // Haircut reduces what trader receives on-chain. assert!( trader_received >= 9u64.eth() && trader_received <= 9.5.eth(), - "Trader should have received between 9 and 9.5 ETH (AMM output minus haircut), but got {}", + "Trader should have received AMM output minus haircut (~9.38 ETH), but got {}", trader_received ); @@ -1379,7 +1381,8 @@ async fn sell_order_with_haircut_test(web3: Web3) { /// Test that a buy order with haircut configured executes correctly. /// For buy orders, the user signs for a specific buy_amount they want to /// receive, and sell_amount is the maximum they're willing to pay. -/// Verifies that reported amounts respect these constraints. +/// Haircut increases the sell_amount on-chain (user pays more). +/// Verifies that reported amounts match on-chain execution. async fn buy_order_with_haircut_test(web3: Web3) { let mut onchain = OnchainComponents::deploy(web3.clone()).await; @@ -1543,15 +1546,15 @@ async fn buy_order_with_haircut_test(web3: Web3) { sell_limit_u256 ); - // 3. Reported sell_amount should be close to what's actually needed (~5.04 ETH - // for buying 5 ETH at 1:1 with 0.3% fee). - // We check that sell_amount is less than 5.2 ETH (5.0 ETH + 5% haircut = 5.25 - // ETH). - let reasonable_max_sell = U256::from(5_200_000_000_000_000_000u128); // 5.2 ETH + // 3. For buy orders, haircut INCREASES sell_amount (user pays more). Base + // needed is ~5.04 ETH, with 5% haircut on 5 ETH buy amount = 0.25 ETH. So + // sell_amount should be ~5.04 + 0.25 = ~5.29 ETH. We allow up to 5.5 ETH to + // account for variance. + let reasonable_max_sell = 5.5.eth(); assert!( reported_sell_amount <= reasonable_max_sell, - "Driver reported sell_amount {} exceeds expected max {} (actual needed is ~5.04 ETH). \ - Haircut should reduce surplus/score, not inflate the reported sell amount!", + "Driver reported sell_amount {} exceeds expected max {} (actual needed + haircut is ~5.29 \ + ETH)", reported_sell_amount, reasonable_max_sell ); From 05d199675ad5e9a48c4a9fc1e6e77312bf8a607f Mon Sep 17 00:00:00 2001 From: "Jan [Yann]" <4518474+fafk@users.noreply.github.com> Date: Mon, 2 Feb 2026 17:55:31 +0100 Subject: [PATCH 025/219] Support fractional vol fee bips in orderbook (#4112) # Description We were rounding fractional bips (e.g. `0.3`) to zero. This PR increases the scale so we can handle basis point values lower than 0. At this moment we have volume fee overrides for correlated assets sets to 0.3 and it works fine in autopilot, but /quote endpoint rounds to zero instead, so this fix is needed. ## How to test Unit tests & I tested by deploying this branch to staging. --------- Co-authored-by: ilya --- crates/orderbook/src/quoter.rs | 58 ++++++++++++++++++++++++++++++---- crates/shared/src/arguments.rs | 48 +++++++++++++++++++++++++--- 2 files changed, 94 insertions(+), 12 deletions(-) diff --git a/crates/orderbook/src/quoter.rs b/crates/orderbook/src/quoter.rs index b7ffde175f..ceec043b5d 100644 --- a/crates/orderbook/src/quoter.rs +++ b/crates/orderbook/src/quoter.rs @@ -228,14 +228,18 @@ fn get_vol_fee_adjusted_quote_data( // Calculate the volume (surplus token amount) to apply fee to // Following driver's logic in // crates/driver/src/domain/competition/solution/fee.rs:189-202: + // Use high precision scaling to support sub-basis-point fee factors (e.g., 0.3 + // BPS) + let scaled_factor = U256::from(factor.to_high_precision()); + let scale = U512::from(FeeFactor::HIGH_PRECISION_SCALE); let (adjusted_sell_amount, adjusted_buy_amount) = match side { OrderQuoteSide::Sell { .. } => { // For SELL orders, fee is calculated on buy amount let protocol_fee = U256::uint_try_from( quote .buy_amount - .widening_mul(U256::from(factor.to_bps())) - .checked_div(U512::from(FeeFactor::MAX_BPS)) + .widening_mul(scaled_factor) + .checked_div(scale) .ok_or_else(|| anyhow::anyhow!("volume fee calculation division by zero"))?, ) .map_err(|_| anyhow::anyhow!("volume fee calculation overflow"))?; @@ -249,11 +253,10 @@ fn get_vol_fee_adjusted_quote_data( // For BUY orders, fee is calculated on sell amount + network fee. // Network fee is already in sell token, so it is added to get the total volume. let total_sell_volume = quote.sell_amount.saturating_add(quote.fee_amount); - let factor = U256::from(factor.to_bps()); - let volume_bps: Uint<512, 8> = total_sell_volume.widening_mul(factor); + let volume_scaled: Uint<512, 8> = total_sell_volume.widening_mul(scaled_factor); let protocol_fee = U256::uint_try_from( - volume_bps - .checked_div(U512::from(FeeFactor::MAX_BPS)) + volume_scaled + .checked_div(scale) .ok_or_else(|| anyhow::anyhow!("volume fee calculation division by zero"))?, ) .map_err(|_| anyhow::anyhow!("volume fee calculation overflow"))?; @@ -268,7 +271,7 @@ fn get_vol_fee_adjusted_quote_data( Ok(AdjustedQuoteData { sell_amount: adjusted_sell_amount, buy_amount: adjusted_buy_amount, - protocol_fee_bps: Some(factor.to_bps().to_string()), + protocol_fee_bps: Some(factor.to_bps_str()), }) } @@ -568,4 +571,45 @@ mod tests { assert_eq!(result.buy_amount, 100u64.eth()); assert_eq!(result.protocol_fee_bps, None); } + + #[test] + fn test_volume_fee_sub_basis_point_precision() { + // Test sub-BPS precision: 0.00003 = 0.3 BPS + let volume_fee = FeeFactor::try_from(0.00003).unwrap(); + let volume_fee_config = VolumeFeeConfig { + factor: Some(volume_fee), + effective_from_timestamp: None, + }; + let volume_fee_policy = VolumeFeePolicy::new(vec![], Some(volume_fee), false); + + // Large amount to make the sub-BPS fee visible + let sell_amount = 1_000_000u64.eth(); + let buy_amount = 1_000_000u64.eth(); + let quote = create_test_quote(sell_amount, buy_amount); + let side = OrderQuoteSide::Sell { + sell_amount: model::quote::SellAmount::BeforeFee { + value: number::nonzero::NonZeroU256::try_from(sell_amount).unwrap(), + }, + }; + + let result = get_vol_fee_adjusted_quote_data( + "e, + &side, + Some(&volume_fee_config), + &volume_fee_policy, + TEST_BUY_TOKEN, + TEST_SELL_TOKEN, + ) + .unwrap(); + + // Protocol fee = 0.3 BPS + assert_eq!(result.protocol_fee_bps, Some("0.3".to_string())); + assert_eq!(result.sell_amount, sell_amount); + + // Expected fee: 1_000_000 * 0.00003 = 30 tokens + // buy_amount should be reduced by 30 tokens + let expected_fee = 30u64.eth(); + let expected_buy = buy_amount - expected_fee; + assert_eq!(result.buy_amount, expected_buy); + } } diff --git a/crates/shared/src/arguments.rs b/crates/shared/src/arguments.rs index 46f542e195..9f9238f006 100644 --- a/crates/shared/src/arguments.rs +++ b/crates/shared/src/arguments.rs @@ -549,17 +549,30 @@ impl FromStr for ExternalSolver { pub struct FeeFactor(f64); impl FeeFactor { - /// Number of basis points that make up 100%. - pub const MAX_BPS: u32 = 10_000; + /// High precision scale factor (1 million) for sub-basis-point precision. + /// Allows representing factors like 0.00003 (0.3 BPS) without rounding to + /// 0. Also used for converting to BPS string with 2 decimal precision + /// (1_000_000 / 100 = 10_000 BPS scale). + pub const HIGH_PRECISION_SCALE: u64 = 1_000_000; pub fn new(factor: f64) -> Self { Self(factor) } /// Converts the fee factor to basis points (BPS). - /// For example, 0.0002 -> 2 BPS - pub fn to_bps(&self) -> u64 { - (self.0 * f64::from(Self::MAX_BPS)).round() as u64 + /// Supports fractional BPS values (e.g., 0.00003 -> "0.3") + /// Rounds to 2 decimal places to avoid floating point representation + /// issues. + pub fn to_bps_str(&self) -> String { + let bps = (self.0 * Self::HIGH_PRECISION_SCALE as f64).round() / 100.0; + format!("{bps}") + } + + /// Converts the fee factor to a high precision scaled integer. + /// For example, 0.00003 -> 30 (with scale of 1_000_000) + /// This allows sub-basis-point precision in calculations. + pub fn to_high_precision(&self) -> u64 { + (self.0 * Self::HIGH_PRECISION_SCALE as f64).round() as u64 } /// Get the inner value @@ -714,4 +727,29 @@ mod test { .is_err() ); } + + #[test] + fn fee_factor_to_bps() { + assert_eq!(FeeFactor::new(0.0001).to_bps_str(), "1"); + assert_eq!(FeeFactor::new(0.001).to_bps_str(), "10"); + + // Fractional BPS values (sub-basis-point precision) + assert_eq!(FeeFactor::new(0.00003).to_bps_str(), "0.3"); + assert_eq!(FeeFactor::new(0.00005).to_bps_str(), "0.5"); + assert_eq!(FeeFactor::new(0.000025).to_bps_str(), "0.25"); + assert_eq!(FeeFactor::new(0.000075).to_bps_str(), "0.75"); + assert_eq!(FeeFactor::new(0.00015).to_bps_str(), "1.5"); + + assert_eq!(FeeFactor::new(0.0).to_bps_str(), "0"); + } + + #[test] + fn fee_factor_to_high_precision() { + // Verify high precision scaling + assert_eq!(FeeFactor::new(0.00003).to_high_precision(), 30); + assert_eq!(FeeFactor::new(0.0001).to_high_precision(), 100); + assert_eq!(FeeFactor::new(0.001).to_high_precision(), 1000); + assert_eq!(FeeFactor::new(0.01).to_high_precision(), 10_000); + assert_eq!(FeeFactor::new(0.1).to_high_precision(), 100_000); + } } From 5690fc4de0d92f330a25e2d379c406ad78657ae3 Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Tue, 3 Feb 2026 09:11:26 +0100 Subject: [PATCH 026/219] Improve autopilot maintenance (#4113) # Description While looking into the degraded time to happy moo SLI it became apparent that ethflow orders have a significantly worse SLI compared to "regular" orders. Ethflow orders are not harder to solve for than any other orders but they are special in the way they enter the system. Instead of having a REST API call that puts those orders into the DB they get placed by calling the ethflow contract onchain. The autopilot then indexes those events and puts them into the DB. Since the autopilot run loop is synced to the block chain (start a new auction right after seeing a new block) ethflow orders are comparable to regular orders that ALWAYS get placed at the worst possible time (immediately before cutting the auction). Due to being overwhelmed with indexing ethflow orders because of a trade inventive we moved ethflow indexing off of the critical path (see [here](https://github.com/cowprotocol/services/pull/3849)) but that also had the consequence of more ethflow orders not making it into the first possible auction which immediately delays them at least by 12s. # Changes This PR puts ethflow order indexing back on the critical path while still avoiding the issue that caused us to move it off the critical path in the first place. Instead of having a system where the autopilot triggers the maintenance to happen before a new auction or after new block appearing (when waiting for submitted solutions) with an additional background task that checks every second for new ethflow orders that need indexing. This PR moves autopilot maintenance (i.e. block indexing) completely into a background task which triggers ASAP when the system sees a new block. In order to build the auction only after the blocks have been indexed this background tasks feeds a channel of processed blocks. The autopilot then only has to wait for this channel to yield a block with a high enough block number. So the properties of the new solutions are: * event indexing has as little delay as possible * indexing runs concurrently so it's as fast as possible (without speeding up the individual code paths) * autopilot can wait for data from a given block to be processed fully * autopilot stops waiting after a configurable amount of time to keep running auctions even if indexing is slow for whatever reason ## How to test Covered by existing e2e tests --- Cargo.lock | 1 + crates/autopilot/Cargo.toml | 1 + crates/autopilot/src/maintenance.rs | 163 +++++++++++++++++----------- crates/autopilot/src/run.rs | 15 ++- crates/autopilot/src/run_loop.rs | 15 +-- 5 files changed, 113 insertions(+), 82 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bad5e2cfac..ff0a319dcd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1203,6 +1203,7 @@ dependencies = [ "thiserror 1.0.69", "tikv-jemallocator", "tokio", + "tokio-stream", "tower 0.4.13", "tower-http 0.4.4", "tracing", diff --git a/crates/autopilot/Cargo.toml b/crates/autopilot/Cargo.toml index c75a4ad24b..4d293dff5b 100644 --- a/crates/autopilot/Cargo.toml +++ b/crates/autopilot/Cargo.toml @@ -59,6 +59,7 @@ sqlx = { workspace = true } strum = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["macros", "rt-multi-thread", "signal", "sync", "time"] } +tokio-stream = { workspace = true } tower = { workspace = true } tower-http = { workspace = true, features = ["trace"] } tracing = { workspace = true } diff --git a/crates/autopilot/src/maintenance.rs b/crates/autopilot/src/maintenance.rs index 766d6a8b70..ff3c4ca74f 100644 --- a/crates/autopilot/src/maintenance.rs +++ b/crates/autopilot/src/maintenance.rs @@ -25,9 +25,45 @@ use { sync::Arc, time::{Duration, Instant}, }, - tokio::sync::Mutex, + tokio::sync::watch, + tokio_stream::wrappers::WatchStream, }; +/// Component to sync with the maintenance logic that runs in a background task. +/// This allows us to run the maintenance logic ASAP but still wait for it to +/// finish in a convenient manner. +#[derive(Clone)] +pub struct MaintenanceSync { + /// How long the autopilot wants to wait at most. + timeout: Duration, + last_processed_block: watch::Receiver, +} + +impl MaintenanceSync { + pub async fn wait_until_block_processed(&self, block: u64) { + let _timer = observe::metrics::metrics() + .on_auction_overhead_start("autopilot", "wait_for_maintenance"); + + if let Err(_timeout) = tokio::time::timeout(self.timeout, self.wait_inner(block)).await { + tracing::debug!("timed out waiting for maintenance"); + } + } + + async fn wait_inner(&self, target_block: u64) { + if *self.last_processed_block.borrow() >= target_block { + return; + } + + let mut stream = WatchStream::new(self.last_processed_block.clone()); + loop { + let processed_block = stream.next().await.unwrap(); + if processed_block >= target_block { + return; + } + } + } +} + /// Coordinates all the updates that need to run a new block /// to ensure a consistent view of the system. pub struct Maintenance { @@ -38,56 +74,70 @@ pub struct Maintenance { db_cleanup: Postgres, /// All indexing tasks to keep cow amms up to date. cow_amm_indexer: Vec>, - /// On which block we last ran an update successfully. - last_processed: Mutex, - /// Limits the amount of time the autopilot may spend running the - /// maintenance logic between 2 auctions. When this times out we prefer - /// running a not fully updated auction over stalling the protocol any - /// further. - timeout: Duration, + /// Tasks to index ethflow orders that were submitted onchain. + ethflow_indexer: Vec, } impl Maintenance { pub fn new( settlement_indexer: EventUpdater, db_cleanup: Postgres, - timeout: Duration, ) -> Self { Self { settlement_indexer, db_cleanup, cow_amm_indexer: Default::default(), - last_processed: Default::default(), - timeout, + ethflow_indexer: Default::default(), } } - /// Runs all update tasks in a coordinated manner to ensure the system - /// has a consistent state. - pub async fn update(&self, new_block: &BlockInfo) { - let mut last_block = self.last_processed.lock().await; - metrics().last_seen_block.set(new_block.number); - if last_block.number > new_block.number || last_block.hash == new_block.hash { - // `new_block` is neither newer than `last_block` nor a reorg - return; + /// Spawns a background task continously processing the latest block. + /// Returns a `[MaintenanceSync]` that handles waiting for a specific + /// block to be processed. + pub fn spawn_maintenance_task( + self, + blocks: CurrentBlockWatcher, + timeout: Duration, + ) -> MaintenanceSync { + let (sender, receiver) = watch::channel(blocks.borrow().number); + + tokio::task::spawn(async move { + let mut stream = into_stream(blocks); + loop { + let block = stream + .next() + .await + .expect("block stream terminated unexpectedly"); + self.index_until_block(block, &sender).await; + } + }); + + MaintenanceSync { + last_processed_block: receiver, + timeout, } + } + async fn index_until_block(&self, block: BlockInfo, last_processed_block: &watch::Sender) { + metrics().last_seen_block.set(block.number); let start = Instant::now(); - if let Err(err) = tokio::time::timeout(self.timeout, self.update_inner()).await { - tracing::warn!(?err, block = new_block.number, "failed to run maintenance"); + if let Err(err) = self.update_inner().await { + tracing::warn!(?err, block = block.number, "failed to run maintenance"); metrics().updates.with_label_values(&["error"]).inc(); return; } + tracing::info!( - block = new_block.number, + block = block.number, time = ?start.elapsed(), "successfully ran maintenance task" ); - + metrics().last_updated_block.set(block.number); metrics().updates.with_label_values(&["success"]).inc(); - metrics().last_updated_block.set(new_block.number); - *last_block = *new_block; + if let Err(err) = last_processed_block.send(block.number) { + tracing::warn!(?err, "nobody listening for processed blocks anymore"); + } } async fn update_inner(&self) -> Result<()> { @@ -99,6 +149,22 @@ impl Maintenance { self.settlement_indexer.run_maintenance() ), Self::timed_future("db_cleanup", self.db_cleanup.run_maintenance()), + Self::timed_future( + "cow_amm_indexer", + futures::future::try_join_all( + self.cow_amm_indexer + .iter() + .map(|indexer| indexer.run_maintenance()), + ), + ), + Self::timed_future( + "ethflow_indexer", + futures::future::try_join_all( + self.ethflow_indexer + .iter() + .map(|indexer| indexer.run_maintenance()), + ), + ), )?; Ok(()) @@ -106,18 +172,15 @@ impl Maintenance { /// Registers all maintenance tasks that are necessary to correctly support /// ethflow orders. - pub fn spawn_ethflow_indexer(&mut self, ethflow_indexer: EthflowIndexer) { - tokio::task::spawn(async move { - loop { - let _ = - Self::timed_future("ethflow_indexer", ethflow_indexer.run_maintenance()).await; - tokio::time::sleep(std::time::Duration::from_millis(1_000)).await; - } - }); + pub fn add_ethflow_indexer(&mut self, ethflow_indexer: EthflowIndexer) { + self.ethflow_indexer.push(ethflow_indexer); } - pub fn with_cow_amms(&mut self, registry: &cow_amm::Registry) { - self.cow_amm_indexer = registry.maintenance_tasks().clone(); + /// Registers all maintenance tasks that are necessary to correctly support + /// CoW AMMs. + pub fn add_cow_amm_indexer(&mut self, registry: &cow_amm::Registry) { + self.cow_amm_indexer + .extend(registry.maintenance_tasks().clone()); } /// Runs the future and collects runtime metrics. @@ -129,36 +192,6 @@ impl Maintenance { let _timer2 = observe::metrics::metrics().on_auction_overhead_start("autopilot", label); fut.await } - - /// Spawns a background task that runs on every new block but also - /// at least after every `update_interval`. - pub fn spawn_cow_amm_indexing_task(self_: Arc, current_block: CurrentBlockWatcher) { - tokio::task::spawn(async move { - let mut stream = into_stream(current_block); - loop { - let _ = match stream.next().await { - Some(block) => { - metrics().last_seen_block.set(block.number); - block - } - None => panic!("block stream terminated unexpectedly"), - }; - - // TODO: move this back into `Self::update_inner()` once we - // store cow amms in the DB to avoid incredibly slow restarts. - let _ = Self::timed_future( - "cow_amm_indexer", - futures::future::try_join_all( - self_ - .cow_amm_indexer - .iter() - .map(|indexer| async move { indexer.run_maintenance().await }), - ), - ) - .await; - } - }); - } } type EthflowIndexer = diff --git a/crates/autopilot/src/run.rs b/crates/autopilot/src/run.rs index 9784938a5f..3faef30513 100644 --- a/crates/autopilot/src/run.rs +++ b/crates/autopilot/src/run.rs @@ -577,12 +577,8 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { let trusted_tokens = AutoUpdatingTokenList::from_configuration(market_makable_token_list_configuration).await; - let mut maintenance = Maintenance::new( - settlement_event_indexer, - db_write.clone(), - args.max_maintenance_timeout, - ); - maintenance.with_cow_amms(&cow_amm_registry); + let mut maintenance = Maintenance::new(settlement_event_indexer, db_write.clone()); + maintenance.add_cow_amm_indexer(&cow_amm_registry); if !args.ethflow_contracts.is_empty() { let ethflow_refund_start_block = determine_ethflow_refund_indexing_start( @@ -638,7 +634,7 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { .await .expect("Should be able to initialize event updater. Database read issues?"); - maintenance.spawn_ethflow_indexer(onchain_order_indexer); + maintenance.add_ethflow_indexer(onchain_order_indexer); // refunds are not critical for correctness and can therefore be indexed // sporadically in a background task let service_maintainer = ServiceMaintenance::new(vec![Arc::new(refund_event_handler)]); @@ -679,6 +675,9 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { .into_iter() .collect(); + let awaiter = maintenance + .spawn_maintenance_task(eth.current_block().clone(), args.max_maintenance_timeout); + let run = RunLoop::new( run_loop_config, eth, @@ -690,7 +689,7 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { liveness: liveness.clone(), startup, }, - Arc::new(maintenance), + awaiter, ); run.run_forever(shutdown_controller).await; diff --git a/crates/autopilot/src/run_loop.rs b/crates/autopilot/src/run_loop.rs index 0b2ef60f33..7a8a0be012 100644 --- a/crates/autopilot/src/run_loop.rs +++ b/crates/autopilot/src/run_loop.rs @@ -19,7 +19,7 @@ use { solvers::dto::{settle, solve}, }, leader_lock_tracker::LeaderLockTracker, - maintenance::Maintenance, + maintenance::MaintenanceSync, run::Liveness, shutdown_controller::ShutdownController, solvable_orders::SolvableOrdersCache, @@ -82,7 +82,7 @@ pub struct RunLoop { probes: Probes, /// Maintenance tasks that should run before every runloop to have /// the most recent data available. - maintenance: Arc, + maintenance: MaintenanceSync, winner_selection: winner_selection::Arbitrator, /// Notifier that wakes the main loop on new blocks or orders wake_notify: Arc, @@ -98,7 +98,7 @@ impl RunLoop { solvable_orders_cache: Arc, trusted_tokens: AutoUpdatingTokenList, probes: Probes, - maintenance: Arc, + maintenance: MaintenanceSync, ) -> Self { let max_winners = config.max_winners_per_auction.get(); let weth = eth.contracts().wrapped_native_token(); @@ -125,11 +125,6 @@ impl RunLoop { } pub async fn run_forever(self, mut control: ShutdownController) { - Maintenance::spawn_cow_amm_indexing_task( - self.maintenance.clone(), - self.eth.current_block().clone(), - ); - let mut last_auction = None; let mut last_block = None; @@ -263,7 +258,9 @@ impl RunLoop { /// the latest available state. async fn run_maintenance(&self, block: &BlockInfo) { let start = Instant::now(); - self.maintenance.update(block).await; + self.maintenance + .wait_until_block_processed(block.number) + .await; Metrics::ran_maintenance(start.elapsed()); } From e68c266c4b6ba7cb5ac29d23c722d50feb9dc834 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Duarte?= Date: Tue, 3 Feb 2026 12:38:13 +0000 Subject: [PATCH 027/219] Remove ethcontract+web3+primitive-types (#4106) Completes the Alloy migration by removing the last remaining legacy Ethereum libraries: `ethcontract`, `web3`, and `primitive-types`. These dependencies are no longer needed and can be fully removed, simplifying the dependency tree. **Key change**: The labelling layer for observability now operates at the `Web3` wrapper level instead of directly on `DynProvider`, ensuring the wallet state is properly preserved when creating labeled provider instances. # Changes - [x] Remove `ethcontract`, `web3`, and `primitive-types` from workspace dependencies - [x] Delete unused legacy ethrpc implementations (`buffered.rs`, `http.rs`, `instrumented.rs`, `alloy/conversions.rs`) - [x] Migrate `ProviderLabelingExt` from `DynProvider` to `Web3` wrapper, preserving wallet state across labeled instances - [x] Clean up ethrpc module structure and simplify exports - [x] Update imports across affected crates to use Alloy types only - [x] Remove legacy references from contract vendoring script and test setup ## How to test Existing tests --- Cargo.lock | 194 +----- Cargo.toml | 3 - README.md | 3 +- crates/autopilot/Cargo.toml | 1 - crates/autopilot/src/boundary/mod.rs | 6 +- crates/autopilot/src/infra/blockchain/mod.rs | 2 - crates/autopilot/src/run.rs | 29 +- crates/contracts/src/bin/vendor.rs | 4 - crates/driver/Cargo.toml | 2 - crates/driver/src/boundary/mod.rs | 6 +- .../driver/src/infra/blockchain/contracts.rs | 4 +- crates/driver/src/infra/blockchain/mod.rs | 18 +- crates/driver/src/infra/config/file/mod.rs | 10 - crates/driver/src/tests/setup/mod.rs | 3 +- crates/e2e/tests/e2e/hooks.rs | 11 +- crates/ethrpc/Cargo.toml | 3 - crates/ethrpc/src/alloy/conversions.rs | 173 ----- crates/ethrpc/src/alloy/instrumentation.rs | 31 +- crates/ethrpc/src/alloy/mod.rs | 25 +- .../{block_stream/mod.rs => block_stream.rs} | 6 +- crates/ethrpc/src/buffered.rs | 600 ------------------ crates/ethrpc/src/http.rs | 305 --------- crates/ethrpc/src/instrumented.rs | 151 ----- crates/ethrpc/src/lib.rs | 108 +--- crates/ethrpc/src/mock.rs | 182 +----- crates/orderbook/src/run.rs | 22 +- crates/refunder/src/lib.rs | 4 +- crates/shared/Cargo.toml | 2 - crates/shared/src/account_balances/mod.rs | 4 +- .../shared/src/account_balances/simulation.rs | 4 +- .../src/bad_token/token_owner_finder/mod.rs | 4 +- crates/shared/src/ethrpc.rs | 18 +- crates/shared/src/order_validation.rs | 6 +- .../src/price_estimation/competition/quote.rs | 27 +- crates/shared/src/price_estimation/factory.rs | 4 +- .../trade_verifier/balance_overrides/mod.rs | 4 +- .../src/signature_validator/simulation.rs | 21 +- .../sources/balancer_v2/pool_fetching/mod.rs | 7 +- .../src/sources/balancer_v2/pools/weighted.rs | 4 +- crates/shared/src/sources/uniswap_v2/mod.rs | 3 +- .../src/sources/uniswap_v3/pool_fetching.rs | 3 +- crates/solvers/src/domain/solver.rs | 2 +- 42 files changed, 133 insertions(+), 1886 deletions(-) delete mode 100644 crates/ethrpc/src/alloy/conversions.rs rename crates/ethrpc/src/{block_stream/mod.rs => block_stream.rs} (99%) delete mode 100644 crates/ethrpc/src/buffered.rs delete mode 100644 crates/ethrpc/src/http.rs delete mode 100644 crates/ethrpc/src/instrumented.rs diff --git a/Cargo.lock b/Cargo.lock index ff0a319dcd..f94928ee4f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -102,7 +102,7 @@ dependencies = [ "k256", "once_cell", "rand 0.8.5", - "secp256k1 0.30.0", + "secp256k1", "serde", "serde_json", "serde_with", @@ -1209,7 +1209,6 @@ dependencies = [ "tracing", "url", "vergen", - "web3", "winner-selection", ] @@ -2264,12 +2263,6 @@ dependencies = [ "tracing-subscriber", ] -[[package]] -name = "convert_case" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" - [[package]] name = "convert_case" version = "0.6.0" @@ -2597,19 +2590,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "derive_more" -version = "0.99.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6edb4b64a43d977b8e99788fe3a04d483834fba1215a7e02caa415b626497f7f" -dependencies = [ - "convert_case 0.4.0", - "proc-macro2", - "quote", - "rustc_version 0.4.1", - "syn 2.0.114", -] - [[package]] name = "derive_more" version = "1.0.0" @@ -2718,7 +2698,6 @@ dependencies = [ "cow-amm", "dashmap", "derive_more 1.0.0", - "ethcontract", "ethrpc", "futures", "hex-literal", @@ -2755,7 +2734,6 @@ dependencies = [ "tracing", "url", "vergen", - "web3", ] [[package]] @@ -2925,90 +2903,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "ethabi" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7413c5f74cc903ea37386a8965a936cbeb334bd270862fdece542c1b2dcbc898" -dependencies = [ - "ethereum-types", - "hex", - "once_cell", - "regex", - "serde", - "serde_json", - "sha3", - "thiserror 1.0.69", - "uint", -] - -[[package]] -name = "ethbloom" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" -dependencies = [ - "crunchy", - "fixed-hash", - "impl-rlp", - "impl-serde", - "tiny-keccak", -] - -[[package]] -name = "ethcontract" -version = "0.25.9" -source = "git+https://github.com/cowprotocol/ethcontract-rs?rev=8e112a88988040cde6110379ee6d1be768a13244#8e112a88988040cde6110379ee6d1be768a13244" -dependencies = [ - "arrayvec", - "aws-config", - "aws-sdk-kms", - "ethcontract-common", - "futures", - "futures-timer", - "hex", - "jsonrpc-core", - "lazy_static", - "primitive-types", - "rlp", - "secp256k1 0.27.0", - "serde", - "serde_json", - "thiserror 1.0.69", - "uint", - "web3", - "zeroize", -] - -[[package]] -name = "ethcontract-common" -version = "0.25.9" -source = "git+https://github.com/cowprotocol/ethcontract-rs?rev=8e112a88988040cde6110379ee6d1be768a13244#8e112a88988040cde6110379ee6d1be768a13244" -dependencies = [ - "ethabi", - "hex", - "serde", - "serde_derive", - "serde_json", - "thiserror 1.0.69", - "tiny-keccak", - "web3", -] - -[[package]] -name = "ethereum-types" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02d215cbf040552efcbe99a38372fe80ab9d00268e20012b79fcd0f073edd8ee" -dependencies = [ - "ethbloom", - "fixed-hash", - "impl-rlp", - "impl-serde", - "primitive-types", - "uint", -] - [[package]] name = "ethrpc" version = "0.1.0" @@ -3017,13 +2911,11 @@ dependencies = [ "anyhow", "async-trait", "const-hex", - "ethcontract", "futures", "itertools 0.14.0", "jsonrpc-core", "mockall", "observe", - "primitive-types", "prometheus", "prometheus-metric-storage", "rand 0.8.5", @@ -3036,7 +2928,6 @@ dependencies = [ "tower 0.4.13", "tracing", "url", - "web3", ] [[package]] @@ -3931,16 +3822,6 @@ dependencies = [ "unicode-normalization", ] -[[package]] -name = "idna" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - [[package]] name = "idna" version = "1.1.0" @@ -3971,24 +3852,6 @@ dependencies = [ "parity-scale-codec", ] -[[package]] -name = "impl-rlp" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" -dependencies = [ - "rlp", -] - -[[package]] -name = "impl-serde" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebc88fc67028ae3db0c853baa36269d398d5f45b6982f95549ff5def78c935cd" -dependencies = [ - "serde", -] - [[package]] name = "impl-trait-for-tuples" version = "0.2.3" @@ -5179,8 +5042,6 @@ checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" dependencies = [ "fixed-hash", "impl-codec", - "impl-rlp", - "impl-serde", "uint", ] @@ -6043,15 +5904,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "secp256k1" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25996b82292a7a57ed3508f052cfff8640d38d32018784acd714758b43da9c8f" -dependencies = [ - "secp256k1-sys 0.8.2", -] - [[package]] name = "secp256k1" version = "0.30.0" @@ -6060,19 +5912,10 @@ checksum = "b50c5943d326858130af85e049f2661ba3c78b26589b8ab98e65e80ae44a1252" dependencies = [ "bitcoin_hashes", "rand 0.8.5", - "secp256k1-sys 0.10.1", + "secp256k1-sys", "serde", ] -[[package]] -name = "secp256k1-sys" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4473013577ec77b4ee3668179ef1186df3146e2cf2d927bd200974c6fe60fd99" -dependencies = [ - "cc", -] - [[package]] name = "secp256k1-sys" version = "0.10.1" @@ -6349,7 +6192,6 @@ dependencies = [ "number", "observe", "order-validation", - "primitive-types", "prometheus", "prometheus-metric-storage", "rand 0.8.5", @@ -6367,7 +6209,6 @@ dependencies = [ "tracing", "tracing-subscriber", "url", - "web3", ] [[package]] @@ -7860,37 +7701,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "web3" -version = "0.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5388522c899d1e1c96a4c307e3797e0f697ba7c77dd8e0e625ecba9dd0342937" -dependencies = [ - "arrayvec", - "base64 0.21.7", - "bytes", - "derive_more 0.99.20", - "ethabi", - "ethereum-types", - "futures", - "futures-timer", - "headers", - "hex", - "idna 0.4.0", - "jsonrpc-core", - "log", - "once_cell", - "parking_lot", - "pin-project", - "reqwest 0.11.27", - "rlp", - "secp256k1 0.27.0", - "serde", - "serde_json", - "tiny-keccak", - "url", -] - [[package]] name = "webpki-roots" version = "0.26.11" diff --git a/Cargo.toml b/Cargo.toml index d0c677d393..773660c1ac 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,7 +17,6 @@ clap = { version = "4.5.6", features = ["derive", "env"] } dashmap = "6.1.0" derivative = "2.2.0" derive_more = { version = "1.0.0", features = ["full"] } -ethcontract = { git = "https://github.com/cowprotocol/ethcontract-rs", rev = "8e112a88988040cde6110379ee6d1be768a13244", default-features = false, features = ["aws-kms"] } mimalloc = "0.1.43" tikv-jemallocator = { version = "0.6", features = ["unprefixed_malloc_on_supported_platforms", "profiling"] } jemalloc_pprof = { version = "0.8", features = ["symbolize"] } @@ -34,7 +33,6 @@ itertools = "0.14" maplit = "1.0.2" mockall = "0.12.1" num = "0.4.3" -primitive-types = "0.12" prometheus = "0.13.4" prometheus-metric-storage = "0.5.0" rand = "0.8.5" @@ -56,7 +54,6 @@ tracing = "0.1.41" tracing-subscriber = { version = "0.3.19", features = ["json"] } url = "2.5.0" warp = { git = 'https://github.com/cowprotocol/warp.git', rev = "586244e", default-features = false } -web3 = { version = "0.19.0", default-features = false } app-data = { path = "crates/app-data" } arc-swap = "1.7.1" async-stream = "0.3.5" diff --git a/README.md b/README.md index d7a257b7fe..4d5268fce2 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,7 @@ The `autopilot` connects to the same PostgreSQL database as the `orderbook` and There are additional crates that live in the cargo workspace. -- `contracts` provides _[ethcontract-rs](https://github.com/gnosis/ethcontract-rs)_ based smart contract bindings +- `contracts` provides Alloy-based smart contract bindings - `database` provides the shared database and storage layer logic shared between the `autopilot` and `orderbook` - `driver` an in-development binary that intends to replace the `solver`; it has a slightly different design that allows co-location with external solvers - `e2e` end-to-end tests @@ -160,4 +160,3 @@ Each process opens a UNIX socket at `/tmp/log_filter_override__ Web3 { - let http_factory = - shared::http_client::HttpClientFactory::new(&shared::http_client::Arguments { - http_timeout: std::time::Duration::from_secs(10), - }); - shared::ethrpc::web3(ethrpc_args, &http_factory, ethrpc, "base") + shared::ethrpc::web3(ethrpc_args, ethrpc, "base") } pub struct SolvableOrders { diff --git a/crates/autopilot/src/infra/blockchain/mod.rs b/crates/autopilot/src/infra/blockchain/mod.rs index e3d136e5a8..b771441d45 100644 --- a/crates/autopilot/src/infra/blockchain/mod.rs +++ b/crates/autopilot/src/infra/blockchain/mod.rs @@ -178,8 +178,6 @@ impl From for eth::CallFrame { #[derive(Debug, Error)] pub enum Error { - #[error("web3 error: {0:?}")] - Web3(#[from] web3::error::Error), #[error("alloy transport error: {0:?}")] Alloy(#[from] alloy::transports::TransportError), #[error("missing field {0}, node client bug?")] diff --git a/crates/autopilot/src/run.rs b/crates/autopilot/src/run.rs index 3faef30513..2b0cad10d8 100644 --- a/crates/autopilot/src/run.rs +++ b/crates/autopilot/src/run.rs @@ -178,15 +178,12 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { crate::database::run_database_metrics_work(db_write.clone()); let http_factory = HttpClientFactory::new(&args.http_client); - let web3 = shared::ethrpc::web3( - &args.shared.ethrpc, - &http_factory, - &args.shared.node_url, - "base", - ); - let simulation_web3 = args.shared.simulation_node_url.as_ref().map(|node_url| { - shared::ethrpc::web3(&args.shared.ethrpc, &http_factory, node_url, "simulation") - }); + let web3 = shared::ethrpc::web3(&args.shared.ethrpc, &args.shared.node_url, "base"); + let simulation_web3 = args + .shared + .simulation_node_url + .as_ref() + .map(|node_url| shared::ethrpc::web3(&args.shared.ethrpc, node_url, "simulation")); let chain_id = web3 .alloy @@ -334,12 +331,7 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { let trace_call_detector = args.tracing_node_url.as_ref().map(|tracing_node_url| { CachingDetector::new( Box::new(TraceCallDetector::new( - shared::ethrpc::web3( - &args.shared.ethrpc, - &http_factory, - tracing_node_url, - "trace", - ), + shared::ethrpc::web3(&args.shared.ethrpc, tracing_node_url, "trace"), *eth.contracts().settlement().address(), finder, )), @@ -734,12 +726,7 @@ async fn shadow_mode(args: Arguments) -> ! { .into_iter() .collect(); - let web3 = shared::ethrpc::web3( - &args.shared.ethrpc, - &http_factory, - &args.shared.node_url, - "base", - ); + let web3 = shared::ethrpc::web3(&args.shared.ethrpc, &args.shared.node_url, "base"); let weth = WETH9::Instance::deployed(&web3.alloy) .await .expect("couldn't find deployed WETH contract"); diff --git a/crates/contracts/src/bin/vendor.rs b/crates/contracts/src/bin/vendor.rs index 36bd8fce6a..2d26a17e27 100644 --- a/crates/contracts/src/bin/vendor.rs +++ b/crates/contracts/src/bin/vendor.rs @@ -1,7 +1,3 @@ -//! This script is used to vendor Truffle JSON artifacts to be used for code -//! generation with `ethcontract`. This is done instead of fetching contracts -//! at build time to reduce the risk of failure. - use { anyhow::Result, reqwest::Url, diff --git a/crates/driver/Cargo.toml b/crates/driver/Cargo.toml index edd2f47f6e..d93e4f4631 100644 --- a/crates/driver/Cargo.toml +++ b/crates/driver/Cargo.toml @@ -55,7 +55,6 @@ toml = { workspace = true } tower = { workspace = true } tower-http = { workspace = true, features = ["limit", "trace"] } url = { workspace = true, features = ["serde"] } -web3 = { workspace = true, features = ["http"] } # These still use { workspace = true } because they're used at # the boundary between driver and shared. @@ -65,7 +64,6 @@ web3 = { workspace = true, features = ["http"] } anyhow = { workspace = true } clap = { workspace = true } contracts = { workspace = true } -ethcontract = { workspace = true } model = { workspace = true } observe = { workspace = true } shared = { workspace = true } diff --git a/crates/driver/src/boundary/mod.rs b/crates/driver/src/boundary/mod.rs index 0e220a985d..630616a9f3 100644 --- a/crates/driver/src/boundary/mod.rs +++ b/crates/driver/src/boundary/mod.rs @@ -56,9 +56,5 @@ fn web3_client(ethrpc: &Url, max_batch_size: usize, max_concurrent_requests: usi ethrpc_max_concurrent_requests: max_concurrent_requests, ethrpc_batch_delay: Default::default(), }; - let http_factory = - shared::http_client::HttpClientFactory::new(&shared::http_client::Arguments { - http_timeout: std::time::Duration::from_secs(10), - }); - shared::ethrpc::web3(ðrpc_args, &http_factory, ethrpc, "base") + shared::ethrpc::web3(ðrpc_args, ethrpc, "base") } diff --git a/crates/driver/src/infra/blockchain/contracts.rs b/crates/driver/src/infra/blockchain/contracts.rs index deb0aed0c2..7ca0287e3f 100644 --- a/crates/driver/src/infra/blockchain/contracts.rs +++ b/crates/driver/src/infra/blockchain/contracts.rs @@ -51,7 +51,7 @@ impl Contracts { web3: &Web3, chain: Chain, addresses: Addresses, - ) -> Result { + ) -> Result { let settlement = GPv2Settlement::Instance::new( addresses .settlement @@ -167,8 +167,6 @@ impl Contracts { #[derive(Debug, Error)] pub enum Error { - #[error("method error: {0:?}")] - Method(#[from] ethcontract::errors::MethodError), #[error("method error: {0:?}")] Rpc(#[from] alloy::contract::Error), } diff --git a/crates/driver/src/infra/blockchain/mod.rs b/crates/driver/src/infra/blockchain/mod.rs index d3d58dcfb8..1e1bb52890 100644 --- a/crates/driver/src/infra/blockchain/mod.rs +++ b/crates/driver/src/infra/blockchain/mod.rs @@ -12,8 +12,7 @@ use { }, anyhow::anyhow, chain::Chain, - ethcontract::errors::ExecutionError, - ethrpc::{Web3, block_stream::CurrentBlockWatcher}, + ethrpc::{Web3, alloy::ProviderLabelingExt, block_stream::CurrentBlockWatcher}, shared::{ account_balances::{BalanceSimulator, SimulationError}, gas_price_estimation::Eip1559EstimationExt, @@ -73,8 +72,6 @@ impl Rpc { #[derive(Debug, Error)] pub enum RpcError { - #[error("web3 error: {0:?}")] - Web3(#[from] web3::error::Error), #[error("alloy transport error: {0:?}")] Alloy(#[from] alloy::transports::TransportError), #[error("unsupported chain")] @@ -161,7 +158,7 @@ impl Ethereum { /// the provided label. pub fn with_metric_label(&self, label: String) -> Self { Self { - web3: ethrpc::instrumented::instrument_with_label(&self.web3, label), + web3: self.web3.labeled(label), ..self.clone() } } @@ -326,10 +323,6 @@ pub enum Error { ContractRpc(#[from] alloy::contract::Error), #[error("alloy rpc error: {0:?}")] Rpc(#[from] alloy::transports::RpcError), - #[error("method error: {0:?}")] - Method(#[from] ethcontract::errors::MethodError), - #[error("web3 error: {0:?}")] - Web3(#[from] web3::error::Error), #[error("gas price estimation error: {0}")] GasPrice(boundary::Error), #[error("access list estimation error: {0:?}")] @@ -342,11 +335,6 @@ impl Error { pub fn is_revert(&self) -> bool { // This behavior is node dependent match self { - Error::Method(error) => matches!(error.inner, ExecutionError::Revert(_)), - Error::Web3(inner) => { - let error = ExecutionError::from(inner.clone()); - matches!(error, ExecutionError::Revert(_)) - } Error::GasPrice(_) => false, Error::AccessList(_) => true, Error::ContractRpc(_) => true, @@ -362,7 +350,6 @@ impl Error { impl From for Error { fn from(err: contracts::Error) -> Self { match err { - contracts::Error::Method(err) => Self::Method(err), contracts::Error::Rpc(err) => Self::ContractRpc(err), } } @@ -372,7 +359,6 @@ impl From for Error { fn from(err: SimulationError) -> Self { match err { SimulationError::Method(err) => Self::ContractRpc(err), - SimulationError::Web3(err) => Self::Web3(err), } } } diff --git a/crates/driver/src/infra/config/file/mod.rs b/crates/driver/src/infra/config/file/mod.rs index ad0c4d8184..bf12ac0895 100644 --- a/crates/driver/src/infra/config/file/mod.rs +++ b/crates/driver/src/infra/config/file/mod.rs @@ -138,16 +138,6 @@ enum BlockNumber { Earliest, } -impl From for web3::types::BlockNumber { - fn from(bn: BlockNumber) -> Self { - match bn { - BlockNumber::Pending => web3::types::BlockNumber::Pending, - BlockNumber::Latest => web3::types::BlockNumber::Latest, - BlockNumber::Earliest => web3::types::BlockNumber::Earliest, - } - } -} - impl From for BlockNumberOrTag { fn from(value: BlockNumber) -> Self { match value { diff --git a/crates/driver/src/tests/setup/mod.rs b/crates/driver/src/tests/setup/mod.rs index e336fe7945..e4202ef77d 100644 --- a/crates/driver/src/tests/setup/mod.rs +++ b/crates/driver/src/tests/setup/mod.rs @@ -1131,8 +1131,7 @@ impl Test { pub async fn settle_with_solver(&self, solver_name: &str, solution_id: u64) -> Settle { let submission_deadline_latest_block: u64 = - u64::try_from(self.web3().eth().block_number().await.unwrap()).unwrap() - + self.settle_submission_deadline; + self.web3().alloy.get_block_number().await.unwrap() + self.settle_submission_deadline; let old_balances = self.balances().await; let res = self .client diff --git a/crates/e2e/tests/e2e/hooks.rs b/crates/e2e/tests/e2e/hooks.rs index 5f21c2f615..2339f7e281 100644 --- a/crates/e2e/tests/e2e/hooks.rs +++ b/crates/e2e/tests/e2e/hooks.rs @@ -13,7 +13,7 @@ use { safe::Safe, wait_for_condition, }, - ethrpc::alloy::{CallBuilderExt, conversions::IntoLegacy}, + ethrpc::alloy::CallBuilderExt, model::{ order::{OrderCreation, OrderCreationAppData, OrderKind}, quote::{OrderQuoteRequest, OrderQuoteSide, SellAmount}, @@ -357,13 +357,8 @@ async fn signature(web3: Web3) { services.create_order(&order).await.unwrap(); onchain.mint_block().await; - let balance = token - .balanceOf(safe.address()) - .call() - .await - .unwrap() - .into_legacy(); - assert_eq!(balance, 5u64.eth().into_legacy()); + let balance = token.balanceOf(safe.address()).call().await.unwrap(); + assert_eq!(balance, 5u64.eth()); // Check that the Safe really hasn't been deployed yet. let code = web3.alloy.get_code_at(safe.address()).await.unwrap(); diff --git a/crates/ethrpc/Cargo.toml b/crates/ethrpc/Cargo.toml index a40c2ec12b..f500517f1e 100644 --- a/crates/ethrpc/Cargo.toml +++ b/crates/ethrpc/Cargo.toml @@ -15,13 +15,11 @@ alloy = { workspace = true, default-features = false, features = ["json-rpc", "p anyhow = { workspace = true } async-trait = { workspace = true } const-hex = { workspace = true } -ethcontract = { workspace = true } futures = { workspace = true } itertools = { workspace = true } jsonrpc-core.workspace = true mockall = { workspace = true, optional = true } observe = { workspace = true } -primitive-types = { workspace = true } prometheus = { workspace = true } prometheus-metric-storage = { workspace = true } rand = { workspace = true } @@ -34,7 +32,6 @@ tokio-stream = { workspace = true } tower = { workspace = true } tracing = { workspace = true } url = { workspace = true } -web3 = { workspace = true } [dev-dependencies] mockall = { workspace = true } diff --git a/crates/ethrpc/src/alloy/conversions.rs b/crates/ethrpc/src/alloy/conversions.rs deleted file mode 100644 index fcd04baec2..0000000000 --- a/crates/ethrpc/src/alloy/conversions.rs +++ /dev/null @@ -1,173 +0,0 @@ -use std::collections::HashMap; - -///////////////////////////////// -// Conversions to the alloy types -///////////////////////////////// - -pub trait IntoAlloy { - /// The corresponding Alloy type. - type To; - - /// Converts the legacy type to the corresponding Alloy type. - fn into_alloy(self) -> Self::To; -} - -impl IntoAlloy for ethcontract::I256 { - type To = alloy::primitives::I256; - - fn into_alloy(self) -> Self::To { - let mut buf = [0u8; 32]; - self.to_little_endian(&mut buf); - alloy::primitives::I256::from_le_bytes(buf) - } -} - -impl IntoAlloy for primitive_types::U256 { - type To = alloy::primitives::U256; - - fn into_alloy(self) -> Self::To { - let mut buf = [0u8; 32]; - self.to_little_endian(&mut buf); - alloy::primitives::U256::from_le_bytes(buf) - } -} - -impl IntoAlloy for primitive_types::U512 { - type To = alloy::primitives::U512; - - fn into_alloy(self) -> Self::To { - let mut buf = [0u8; 64]; - self.to_little_endian(&mut buf); - alloy::primitives::U512::from_le_bytes(buf) - } -} - -impl IntoAlloy for primitive_types::H160 { - type To = alloy::primitives::Address; - - fn into_alloy(self) -> Self::To { - alloy::primitives::Address(self.0.into()) - } -} - -impl IntoAlloy for primitive_types::H256 { - type To = alloy::primitives::aliases::B256; - - fn into_alloy(self) -> Self::To { - alloy::primitives::aliases::B256::new(self.0) - } -} - -impl IntoAlloy for web3::types::BlockNumber { - type To = alloy::eips::BlockNumberOrTag; - - fn into_alloy(self) -> Self::To { - match self { - web3::types::BlockNumber::Finalized => alloy::eips::BlockNumberOrTag::Finalized, - web3::types::BlockNumber::Safe => alloy::eips::BlockNumberOrTag::Safe, - web3::types::BlockNumber::Latest => alloy::eips::BlockNumberOrTag::Latest, - web3::types::BlockNumber::Earliest => alloy::eips::BlockNumberOrTag::Earliest, - web3::types::BlockNumber::Pending => alloy::eips::BlockNumberOrTag::Pending, - web3::types::BlockNumber::Number(number) => { - alloy::eips::BlockNumberOrTag::Number(number.as_u64()) - } - } - } -} - -impl IntoAlloy for web3::types::BlockId { - type To = alloy::eips::BlockId; - - fn into_alloy(self) -> Self::To { - match self { - web3::types::BlockId::Hash(hash) => { - alloy::eips::BlockId::Hash(alloy::eips::RpcBlockHash::from(hash.into_alloy())) - } - web3::types::BlockId::Number(number) => { - alloy::eips::BlockId::Number(number.into_alloy()) - } - } - } -} - -impl IntoAlloy for ethcontract::tokens::Bytes> { - type To = alloy::primitives::Bytes; - - fn into_alloy(self) -> Self::To { - alloy::primitives::Bytes::copy_from_slice(self.0.as_slice()) - } -} - -impl IntoAlloy for web3::types::Bytes { - type To = alloy::primitives::Bytes; - - fn into_alloy(self) -> Self::To { - alloy::primitives::Bytes::copy_from_slice(self.0.as_slice()) - } -} - -impl IntoAlloy for HashMap { - type To = HashMap< - alloy::primitives::B256, - alloy::primitives::B256, - alloy::primitives::map::FbBuildHasher<32>, - >; - - fn into_alloy(self) -> Self::To { - self.into_iter() - .map(|(k, v)| (k.into_alloy(), v.into_alloy())) - .collect() - } -} - -////////////////////////////////// -// Conversions to the legacy types -////////////////////////////////// - -pub trait IntoLegacy { - /// The corresponding legacy type. - type To; - - /// Converts the alloy type to the corresponding legacy type. - fn into_legacy(self) -> Self::To; -} - -impl IntoLegacy for alloy::primitives::U256 { - type To = primitive_types::U256; - - fn into_legacy(self) -> Self::To { - primitive_types::U256(self.into_limbs()) - } -} - -impl IntoLegacy for alloy::primitives::U512 { - type To = primitive_types::U512; - - fn into_legacy(self) -> Self::To { - primitive_types::U512(self.into_limbs()) - } -} - -impl IntoLegacy for alloy::primitives::Address { - type To = primitive_types::H160; - - fn into_legacy(self) -> Self::To { - primitive_types::H160(self.into()) - } -} - -impl IntoLegacy for alloy::primitives::aliases::B256 { - type To = primitive_types::H256; - - fn into_legacy(self) -> Self::To { - primitive_types::H256(self.into()) - } -} - -impl IntoLegacy for alloy::primitives::Bytes { - type To = web3::types::Bytes; - - fn into_legacy(self) -> Self::To { - web3::types::Bytes(self.to_vec()) - } -} diff --git a/crates/ethrpc/src/alloy/instrumentation.rs b/crates/ethrpc/src/alloy/instrumentation.rs index 38d0d8b36f..d4bd4143b4 100644 --- a/crates/ethrpc/src/alloy/instrumentation.rs +++ b/crates/ethrpc/src/alloy/instrumentation.rs @@ -9,9 +9,9 @@ //! trait to conveniently create a new `Provider` with an additional //! [`LabelingLayer`]. use { - crate::alloy::RpcClientRandomIdExt, + crate::{Web3, alloy::RpcClientRandomIdExt}, alloy::{ - providers::{DynProvider, Provider, ProviderBuilder}, + providers::{Provider, ProviderBuilder}, rpc::{ client::RpcClient, json_rpc::{RequestPacket, ResponsePacket, SerializedRequest}, @@ -151,16 +151,29 @@ where pub trait ProviderLabelingExt { /// Creates a new provider tagged with another label. - fn labeled(&self, label: String) -> Self; + fn labeled(&self, label: S) -> Self; } -impl ProviderLabelingExt for DynProvider { - fn labeled(&self, label: String) -> Self { - let is_local = self.client().is_local(); - let transport = self.client().transport().clone(); - let transport_with_label = LabelingLayer { label }.layer(transport); +impl ProviderLabelingExt for Web3 { + fn labeled(&self, label: S) -> Self { + let is_local = self.alloy.client().is_local(); + let transport = self.alloy.client().transport().clone(); + let transport_with_label = LabelingLayer { + label: label.to_string(), + } + .layer(transport); let client = RpcClient::with_random_id(transport_with_label, is_local); - ProviderBuilder::new().connect_client(client).erased() + let alloy = ProviderBuilder::new() + .wallet(self.wallet.clone()) + // TODO: eventually remove this and all the other simple nonce managers + .with_simple_nonce_management() + .connect_client(client) + .erased(); + + Self { + alloy, + wallet: self.wallet.clone(), + } } } diff --git a/crates/ethrpc/src/alloy/mod.rs b/crates/ethrpc/src/alloy/mod.rs index 6e6b12a07e..1e2679bd53 100644 --- a/crates/ethrpc/src/alloy/mod.rs +++ b/crates/ethrpc/src/alloy/mod.rs @@ -1,5 +1,4 @@ mod buffering; -pub mod conversions; pub mod errors; mod evm_ext; mod instrumentation; @@ -13,22 +12,18 @@ use { }, buffering::BatchCallLayer, instrumentation::{InstrumentationLayer, LabelingLayer}, - std::time::Duration, }; pub use {evm_ext::EvmProviderExt, instrumentation::ProviderLabelingExt, wallet::MutWallet}; /// Creates an [`RpcClient`] from the given URL with [`LabelingLayer`], /// [`InstrumentationLayer`] and [`BatchCallLayer`]. -fn rpc(url: &str) -> RpcClient { +fn rpc(url: &str, config: Config, label: Option<&str>) -> RpcClient { ClientBuilder::default() .layer(LabelingLayer { - label: "main".into(), + label: label.unwrap_or("main").into(), }) .layer(InstrumentationLayer) - .layer(BatchCallLayer::new(Config { - ethrpc_batch_delay: Duration::ZERO, - ..Default::default() - })) + .layer(BatchCallLayer::new(config)) .http(url.parse().unwrap()) } @@ -38,10 +33,10 @@ fn rpc(url: &str) -> RpcClient { /// /// This is useful for components that need to avoid batching (e.g., block /// stream polling on high-frequency chains). -fn unbuffered_rpc(url: &str) -> RpcClient { +fn unbuffered_rpc(url: &str, label: Option<&str>) -> RpcClient { ClientBuilder::default() .layer(LabelingLayer { - label: "main_unbuffered".into(), + label: label.unwrap_or("main_unbuffered").into(), }) .layer(InstrumentationLayer) .http(url.parse().unwrap()) @@ -53,8 +48,8 @@ fn unbuffered_rpc(url: &str) -> RpcClient { /// Useful for read-only operations like block polling. /// /// Returns a copy of the [`MutWallet`] so the caller can modify it later. -pub fn unbuffered_provider(url: &str) -> (AlloyProvider, MutWallet) { - let rpc = unbuffered_rpc(url); +pub fn unbuffered_provider(url: &str, label: Option<&str>) -> (AlloyProvider, MutWallet) { + let rpc = unbuffered_rpc(url, label); let wallet = MutWallet::default(); let provider = ProviderBuilder::new() .wallet(wallet.clone()) @@ -68,13 +63,13 @@ pub fn unbuffered_provider(url: &str) -> (AlloyProvider, MutWallet) { /// Creates a provider with the provided URL and an empty [`MutWallet`]. /// /// Returns a copy of the [`MutWallet`] so the caller can modify it later. -pub fn provider(url: &str) -> (AlloyProvider, MutWallet) { - let rpc = rpc(url); +pub fn provider(url: &str, config: Config, label: Option<&str>) -> (AlloyProvider, MutWallet) { + let rpc = rpc(url, config, label); let wallet = MutWallet::default(); let provider = ProviderBuilder::new() .wallet(wallet.clone()) // will query the node for the nonce every time that it is needed - // adds overhead but makes working with alloy/ethcontract at the same time much simpler + // adds overhead but makes working with alloy at the same time much simpler .with_simple_nonce_management() .connect_client(rpc) .erased(); diff --git a/crates/ethrpc/src/block_stream/mod.rs b/crates/ethrpc/src/block_stream.rs similarity index 99% rename from crates/ethrpc/src/block_stream/mod.rs rename to crates/ethrpc/src/block_stream.rs index c1538574fa..eeff615e5d 100644 --- a/crates/ethrpc/src/block_stream/mod.rs +++ b/crates/ethrpc/src/block_stream.rs @@ -1,5 +1,5 @@ use { - crate::{AlloyProvider, alloy::ProviderLabelingExt}, + crate::AlloyProvider, alloy::{ eips::{BlockId, BlockNumberOrTag}, primitives::{B256, U256}, @@ -235,8 +235,8 @@ pub async fn current_block_stream( ) -> Result { // Build an alloy transport specifically for the current block stream to avoid // batching requests together on chains with a very high block frequency. - let (provider, _) = crate::alloy::unbuffered_provider(url.as_str()); - let provider = provider.labeled("base_currentBlockStream".into()); + let (provider, _) = + crate::alloy::unbuffered_provider(url.as_str(), Some("base_currentBlockStream")); let first_block = provider.current_block().await?; tracing::debug!(number=%first_block.number, hash=?first_block.hash, "polled block"); diff --git a/crates/ethrpc/src/buffered.rs b/crates/ethrpc/src/buffered.rs deleted file mode 100644 index c942c4ef90..0000000000 --- a/crates/ethrpc/src/buffered.rs +++ /dev/null @@ -1,600 +0,0 @@ -//! A buffered `Transport` implementation that automatically groups JSON RPC -//! requests into batches. - -use { - super::MAX_BATCH_SIZE, - ethcontract::{ - jsonrpc::Call, - web3::{BatchTransport, Error as Web3Error, RequestId, Transport}, - }, - futures::{ - channel::{mpsc, oneshot}, - future::{self, BoxFuture, FutureExt as _}, - stream::{self, FusedStream, Stream, StreamExt as _}, - }, - serde_json::Value, - std::{ - collections::{BTreeMap, BTreeSet}, - fmt::Write, - future::Future, - num::NonZeroUsize, - sync::Arc, - time::Duration, - }, - tokio::task::JoinHandle, - tracing::Instrument as _, -}; - -/// Buffered transport configuration. -#[derive(Debug)] -pub struct Configuration { - /// The maximum amount of concurrent batches to send to the node. - /// - /// Specifying `None` means no limit on concurrency. - pub max_concurrent_requests: Option, - /// The maximum batch size. - pub max_batch_len: usize, - /// An additional minimum delay to wait for collecting requests. - /// - /// The delay starts counting after receiving the first request. - pub batch_delay: Duration, -} - -impl Default for Configuration { - fn default() -> Self { - // Default configuration behaves kind of like TCP Nagle. - Self { - max_concurrent_requests: NonZeroUsize::new(1), - max_batch_len: MAX_BATCH_SIZE, - batch_delay: Duration::default(), - } - } -} - -/// Buffered `Transport` implementation that implements automatic batching of -/// JSONRPC requests. -#[derive(Clone, Debug)] -pub struct BufferedTransport { - inner: Arc, - calls: mpsc::UnboundedSender, -} - -type RpcResult = Result; - -type CallContext = (RequestId, Call, Option, oneshot::Sender); - -impl BufferedTransport -where - Inner: BatchTransport + Send + Sync + 'static, - Inner::Out: Send, - Inner::Batch: Send, -{ - /// Create a new buffered transport with the default configuration. - pub fn new(inner: Inner) -> Self { - Self::with_config(inner, Default::default()) - } - - /// Creates a new buffered transport with the specified configuration. - pub fn with_config(inner: Inner, config: Configuration) -> Self { - let inner = Arc::new(inner); - let (calls, receiver) = mpsc::unbounded(); - Self::background_worker(inner.clone(), config, receiver); - - Self { inner, calls } - } - - /// Start a background worker for handling batched requests. - fn background_worker( - inner: Arc, - config: Configuration, - calls: mpsc::UnboundedReceiver, - ) -> JoinHandle<()> { - tokio::task::spawn(batched_for_each(config, calls, move |batch| { - let inner = inner.clone(); - async move { - let (mut requests, mut trace_ids, mut senders): (Vec<_>, Vec<_>, Vec<_>) = - itertools::multiunzip( - batch - .into_iter() - .filter(|(_, _, _, sender)| !sender.is_canceled()) - .map(|(id, request, trace_id, sender)| { - ((id, request), trace_id, sender) - }), - ); - match requests.len() { - 0 => (), - 1 => { - let ((id, request), trace_id, sender) = - (requests.remove(0), trace_ids.remove(0), senders.remove(0)); - let result = match (&request, trace_id) { - (Call::MethodCall(_), Some(trace_id)) => { - let span = - observe::distributed_tracing::request_id::info_span(trace_id); - inner.send(id, request).instrument(span).await - } - _ => inner.send(id, request).await, - }; - let _ = sender.send(result); - } - n => { - let results = match build_rpc_metadata(&requests, &trace_ids) { - Ok(metadata) => { - let span = - observe::distributed_tracing::request_id::info_span(metadata); - inner.send_batch(requests).instrument(span).await - } - Err(err) => { - tracing::error!( - ?err, - "failed to build metadata, sending RPC calls without the \ - metadata header" - ); - inner.send_batch(requests).await - } - } - .unwrap_or_else(|err| vec![Err(err); n]); - for (sender, result) in senders.into_iter().zip(results) { - let _ = sender.send(result); - } - } - } - } - })) - } - - /// Queue a call by sending it over calls channel to the background worker. - fn queue_call(&self, id: RequestId, request: Call) -> oneshot::Receiver { - let (sender, receiver) = oneshot::channel(); - let trace_id = observe::distributed_tracing::request_id::from_current_span(); - let context = (id, request, trace_id, sender); - self.calls - .unbounded_send(context) - .expect("worker task unexpectedly dropped"); - receiver - } - - /// Executes a call. - async fn execute_call(&self, id: RequestId, request: Call) -> RpcResult { - let method = match &request { - Call::MethodCall(call) => call.method.as_str(), - _ => "none", - }; - - tracing::trace!(%id, %method, "queueing call"); - - let response = self.queue_call(id, request); - let result = response.await.expect("worker task unexpectedly dropped"); - - tracing::trace!(%id, ok = %result.is_ok(), "received response"); - - result - } -} - -impl Transport for BufferedTransport -where - Inner: BatchTransport + Send + Sync + 'static, - Inner::Out: Send, - Inner::Batch: Send, -{ - type Out = BoxFuture<'static, RpcResult>; - - fn prepare(&self, method: &str, params: Vec) -> (RequestId, Call) { - self.inner.prepare(method, params) - } - - fn send(&self, id: RequestId, request: Call) -> Self::Out { - let this = self.clone(); - - async move { this.execute_call(id, request).await } - .in_current_span() - .boxed() - } -} - -impl BatchTransport for BufferedTransport -where - Inner: BatchTransport + Send + Sync + 'static, - Inner::Out: Send, - Inner::Batch: Send, -{ - type Batch = BoxFuture<'static, Result, Web3Error>>; - - fn send_batch(&self, requests: T) -> Self::Batch - where - T: IntoIterator, - { - let this = self.clone(); - let requests = requests.into_iter().collect::>(); - - async move { - let responses = requests - .into_iter() - .map(|(id, request)| this.execute_call(id, request)); - Ok(future::join_all(responses).await) - } - .in_current_span() - .boxed() - } -} - -/// Batches a stream into chunks. -/// -/// This is very similar to `futures::stream::StreamExt::ready_chunks` with the -/// difference that it allows configuring a minimum delay for a batch, so -/// waiting for a small amount of time to allow the stream to produce additional -/// items, thus decreasing the chance of batches of size 1. -fn batched_for_each( - config: Configuration, - items: St, - work: F, -) -> impl Future -where - St: Stream + FusedStream + Unpin, - F: Fn(Vec) -> Fut, - Fut: Future, -{ - let concurrency_limit = config.max_concurrent_requests.map(NonZeroUsize::get); - - let batches = stream::unfold(items, move |mut items| async move { - let mut chunk = vec![items.next().await?]; - - let delay = tokio::time::sleep(config.batch_delay).fuse(); - futures::pin_mut!(delay); - - while chunk.len() < config.max_batch_len { - futures::select_biased! { - item = items.next() => match item { - Some(item) => chunk.push(item), - None => break, - }, - _ = delay => break, - } - } - - Some((chunk, items)) - }); - - batches.for_each_concurrent(concurrency_limit, work) -} - -/// Builds a metadata string representation for RPC requests. -/// -/// This function takes an iterator of requests and their corresponding trace -/// IDs, and generates a metadata string that groups the requests by their trace -/// IDs and method names. The format of the output string is as follows: -/// -/// `trace_id:method_name(index1,index2,...),method_name(index1,index2,... -/// )|trace_id:...` -/// -/// Each trace ID is followed by a colon and a list of method names. Each method -/// name is followed by a list of indices (representing the position of the -/// request in the original vector) enclosed in parentheses. Different method -/// names are separated by commas. If there are multiple trace IDs, their -/// entries are separated by a pipe character. -/// -/// If a trace ID is `None`, it is represented as "null" in the output string. -/// All requests with absent trace IDs are grouped together under "null". -/// -/// # Arguments -/// -/// * `requests` - A vector of tuples, where each tuple contains a request ID -/// and a `Call` object representing the RPC request. -/// * `trace_ids` - A vector of optional strings representing the trace IDs of -/// the requests. The trace IDs correspond to the requests in the same -/// position in the `requests` vector. -/// -/// # Returns -/// -/// This function returns a string representing the metadata header. -fn build_rpc_metadata( - requests: &[(RequestId, Call)], - trace_ids: &[Option], -) -> anyhow::Result { - // Group the requests by trace ID(sorted) and method name(sorted) where values - // are sorted indices. - let mut grouped_metadata: BTreeMap>> = BTreeMap::new(); - for (idx, ((_, call), trace_id)) in requests.iter().zip(trace_ids).enumerate() { - if let Call::MethodCall(call) = call { - let trace_id = trace_id.clone().unwrap_or("null".to_string()); - grouped_metadata - .entry(trace_id) - .or_default() - .entry(call.method.clone()) - .or_default() - .insert(idx); - } - } - - let mut metadata_str = String::new(); - - let mut grouped_metadata_iter = grouped_metadata.into_iter().peekable(); - while let Some((trace_id, methods)) = grouped_metadata_iter.next() { - // New entry starts with the trace_id - write!(metadata_str, "{trace_id}:")?; - - // Followed by the method names and their indices - let mut methods_iter = methods.into_iter().peekable(); - while let Some((method, indices)) = methods_iter.next() { - write!(metadata_str, "{method}(")?; - - let indices_str = format_indices_as_ranges(indices)?; - write!(metadata_str, "{indices_str}")?; - - write!(metadata_str, ")")?; - - if methods_iter.peek().is_some() { - write!(metadata_str, ",")?; - } - } - - if grouped_metadata_iter.peek().is_some() { - write!(metadata_str, "|")?; - } - } - - Ok(metadata_str) -} - -/// Formats a set of indices as a string of ranges. -/// -/// This function takes a set of indices and formats them as a string where -/// consecutive indices are represented as ranges. For example, the set -/// `{1, 2, 3, 5, 6, 8}` would be formatted as the string `"1..3,5..6,8"`. -/// -/// # Arguments -/// -/// * `indices` - A set of indices to format. The indices should be unique and -/// sorted in ascending order. -/// -/// # Returns -/// -/// This function returns a string representing the indices as ranges. Each -/// range is formatted as `start..end`, and ranges are separated by commas. -/// Single indices (i.e., indices that are not part of a range) are represented -/// as themselves. -fn format_indices_as_ranges(indices: BTreeSet) -> anyhow::Result { - let mut result = String::new(); - let mut indices = indices.into_iter(); - // Initialize the start and last variables with the first index. - let mut start = match indices.next() { - Some(index) => index, - None => return Ok(result), - }; - let mut last = start; - - // Iterate over the rest of the indices - for index in indices { - // If the current index is the next consecutive number, update last index. - if index == last + 1 { - last = index; - // Otherwise, there is no need to accumulate the range anymore. Append - // the range to the result string. - } else { - append_sequence(&mut result, start, last)?; - write!(result, ",")?; - // Reset the start and last indices with the current value. - start = index; - last = index; - } - } - - // Append the remaining data. - append_sequence(&mut result, start, last)?; - - Ok(result) -} - -/// This function formats a range of integers into a condensed string -/// representation and appends it to the given buffer. The format varies based -/// on the relationship between `start` and `last`: -/// -/// - If `start` is equal to `last`, it indicates a single value, which is -/// appended as such. -/// - If `start` is one less than `last` (i.e., they are consecutive), both -/// numbers are appended separated by a comma. -/// - Otherwise, the numbers between `start` and `last` (inclusive) are -/// represented as a range using two dots (e.g., "start..last"). -fn append_sequence(buffer: &mut String, start: usize, last: usize) -> core::fmt::Result { - if start == last { - write!(buffer, "{start}") - } else if start == last - 1 { - write!(buffer, "{start},{last}") - } else { - write!(buffer, "{start}..{last}") - } -} - -#[cfg(test)] -mod tests { - use { - super::*, - crate::mock::MockTransport, - ethcontract::{ - U256, - Web3, - jsonrpc::{Id, MethodCall, Params}, - }, - mockall::predicate, - serde_json::json, - }; - - #[tokio::test] - async fn batches_calls_when_joining() { - let transport = MockTransport::new(); - transport - .mock() - .expect_execute_batch() - .with(predicate::eq(vec![ - ("foo".to_owned(), vec![json!(true), json!("stuff")]), - ("bar".to_owned(), vec![json!(42), json!("answer")]), - ])) - .returning(|_| Ok(vec![Ok(json!("hello")), Ok(json!("world"))])); - - let transport = BufferedTransport::new(transport); - - let (foo, bar) = futures::join!( - transport.execute("foo", vec![json!(true), json!("stuff")]), - transport.execute("bar", vec![json!(42), json!("answer")]), - ); - assert_eq!(foo.unwrap(), json!("hello")); - assert_eq!(bar.unwrap(), json!("world")); - } - - #[tokio::test] - async fn no_batching_with_only_one_request() { - let transport = MockTransport::new(); - transport - .mock() - .expect_execute() - .with( - predicate::eq("single".to_owned()), - predicate::eq(vec![json!("request")]), - ) - .returning(|_, _| Ok(json!(42))); - - let transport = BufferedTransport::new(transport); - - let response = transport - .execute("single", vec![json!("request")]) - .await - .unwrap(); - assert_eq!(response, json!(42)); - } - - #[tokio::test] - async fn batches_separate_web3_instances() { - let transport = MockTransport::new(); - transport - .mock() - .expect_execute_batch() - .with(predicate::eq(vec![ - ("eth_chainId".to_owned(), vec![]), - ("eth_chainId".to_owned(), vec![]), - ("eth_chainId".to_owned(), vec![]), - ])) - .returning(|_| { - Ok(vec![ - Ok(json!("0x2a")), - Ok(json!("0x2a")), - Ok(json!("0x2a")), - ]) - }); - - let web3 = Web3::new(BufferedTransport::new(transport)); - - let chain_ids = future::try_join_all(vec![ - web3.clone().eth().chain_id(), - web3.clone().eth().chain_id(), - web3.clone().eth().chain_id(), - ]) - .await - .unwrap(); - - assert_eq!(chain_ids, vec![U256::from(42); 3]); - } - - #[tokio::test] - async fn resolves_call_after_dropping_transport() { - let transport = MockTransport::new(); - transport - .mock() - .expect_execute() - .with(predicate::eq("used".to_owned()), predicate::eq(vec![])) - .returning(|_, _| Ok(json!(1337))); - - let transport = BufferedTransport::new(transport); - - let unused = transport.execute("unused", vec![]); - let unpolled = transport.execute("unpolled", vec![]); - let used = transport.execute("used", vec![]); - drop((unused, transport)); - - assert_eq!(used.await.unwrap(), json!(1337)); - drop(unpolled); - } - - #[test] - fn test_format_indices_as_ranges() { - // empty string - let indices = BTreeSet::new(); - assert_eq!(format_indices_as_ranges(indices).unwrap(), ""); - - // a single value - let indices = vec![2].into_iter().collect(); - assert_eq!(format_indices_as_ranges(indices).unwrap(), "2"); - - // only a range - let indices = vec![1, 2, 3, 4, 5].into_iter().collect(); - assert_eq!(format_indices_as_ranges(indices).unwrap(), "1..5"); - - // 2 subsequent values range - let indices = vec![2, 3].into_iter().collect(); - assert_eq!(format_indices_as_ranges(indices).unwrap(), "2,3"); - - // no ranges - let indices = vec![1, 3, 5, 7].into_iter().collect(); - assert_eq!(format_indices_as_ranges(indices).unwrap(), "1,3,5,7"); - - // ends with a non-range value - let indices = vec![1, 2, 3, 5, 7, 8, 9, 10, 20].into_iter().collect(); - assert_eq!( - format_indices_as_ranges(indices).unwrap(), - "1..3,5,7..10,20" - ); - - // ends with a range value - let indices = vec![1, 2, 3, 5, 6, 7, 8, 10, 11, 12].into_iter().collect(); - assert_eq!( - format_indices_as_ranges(indices).unwrap(), - "1..3,5..8,10..12" - ); - } - - fn method_call(method: &str) -> Call { - Call::MethodCall(MethodCall { - jsonrpc: None, - method: method.to_string(), - params: Params::None, - id: Id::Null, - }) - } - - #[test] - fn test_build_rpc_metadata_header() { - let requests = vec![ - (1001, method_call("eth_sendTransaction")), // 0 - (1001, method_call("eth_call")), // 1 - (1001, method_call("eth_sendTransaction")), // 2 - (1002, method_call("eth_call")), // 3 - (9999, method_call("eth_call")), // 4 - (1001, method_call("eth_sendTransaction")), // 5 - (1002, method_call("eth_call")), // 6 - (1002, method_call("eth_call")), // 7 - (1001, method_call("eth_sendTransaction")), // 8 - (9999, method_call("eth_sendTransaction")), // 9 - (9999, method_call("eth_sendTransaction")), // 10 - (9999, method_call("eth_sendTransaction")), // 11 - ]; - let trace_ids = vec![ - Some("1001".to_string()), // 0 - Some("1001".to_string()), // 1 - Some("1001".to_string()), // 2 - Some("1002".to_string()), // 3 - None, // 4 - Some("1001".to_string()), // 5 - Some("1002".to_string()), // 6 - Some("1002".to_string()), // 7 - Some("1001".to_string()), // 8 - None, // 9 - None, // 10 - None, // 11 - ]; - let metadata_header = build_rpc_metadata(&requests, &trace_ids).unwrap(); - assert_eq!( - metadata_header, - "1001:eth_call(1),eth_sendTransaction(0,2,5,8)|1002:eth_call(3,6,7)|null:eth_call(4),\ - eth_sendTransaction(9..11)" - ); - } -} diff --git a/crates/ethrpc/src/http.rs b/crates/ethrpc/src/http.rs deleted file mode 100644 index 2f1d89fd54..0000000000 --- a/crates/ethrpc/src/http.rs +++ /dev/null @@ -1,305 +0,0 @@ -use { - futures::{FutureExt, future::BoxFuture}, - jsonrpc_core::types::{Call, Output, Request, Value}, - observe::tracing::tracing_headers, - reqwest::{Client, Url, header}, - serde::{Deserialize, Serialize, de::DeserializeOwned}, - std::{ - collections::HashMap, - fmt::{Debug, Formatter}, - sync::{ - Arc, - atomic::{AtomicUsize, Ordering}, - }, - }, - web3::{ - BatchTransport, - RequestId, - Transport, - error::{Error as Web3Error, TransportError}, - helpers, - }, -}; - -#[derive(Clone)] -pub struct HttpTransport { - client: Client, - inner: Arc, -} - -struct Inner { - url: Url, - id: AtomicUsize, - /// Name of the transport used in logs to distinguish different transports. - name: String, -} - -impl HttpTransport { - pub fn new(client: Client, url: Url, name: String) -> Self { - Self { - client, - inner: Arc::new(Inner { - url, - id: AtomicUsize::new(0), - name, - }), - } - } - - fn next_id(&self) -> RequestId { - self.inner.id.fetch_add(1, Ordering::SeqCst) - } - - fn new_request(&self) -> (Client, Arc) { - (self.client.clone(), self.inner.clone()) - } -} - -impl Debug for HttpTransport { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - f.debug_struct("HttpTransport") - .field("url", &self.inner.url) - .finish() - } -} - -// Id is only used for logging. -async fn execute_rpc( - client: Client, - inner: Arc, - id: RequestId, - request: &Request, -) -> Result { - let body = serde_json::to_string(&request)?; - - let request_id = observe::distributed_tracing::request_id::from_current_span(); - - tracing::trace!(name = %inner.name, %id, %body, "executing request"); - let mut request_builder = client - .post(inner.url.clone()) - .header(header::CONTENT_TYPE, "application/json") - .header("X-RPC-REQUEST-ID", id.to_string()) - .headers(tracing_headers()) - .body(body); - match request { - Request::Single(Call::MethodCall(call)) => { - if let Some(metadata) = &request_id { - request_builder = request_builder.header("X-REQUEST-ID", metadata); - } - request_builder = request_builder.header("X-RPC-METHOD", call.method.clone()); - } - Request::Batch(_) => { - if let Some(ref metadata) = request_id { - request_builder = request_builder.header("X-RPC-BATCH-METADATA", metadata); - } - } - _ => {} - } - let response = request_builder - .send() - .await - .map_err(|err: reqwest::Error| { - tracing::warn!( - name = %inner.name, - rpc_request_id = %id, - request_id, - error = %err, - "failed to send request" - ); - Web3Error::Transport(TransportError::Message(err.to_string())) - })?; - let status = response.status(); - let text = response.text().await.map_err(|err: reqwest::Error| { - tracing::warn!( - name = %inner.name, - rpc_request_id = %id, - request_id, - error = %err, - "failed to get response body" - ); - Web3Error::Transport(TransportError::Message(err.to_string())) - })?; - // Log the raw text before decoding to get more information on responses that - // aren't valid json. Debug encoding so we don't get control characters like - // newlines in the output. - tracing::trace!(name = %inner.name, %id, body = %text.trim(), "received response"); - if !status.is_success() { - let error_msg = format!("HTTP error {status}"); - - return Err(Web3Error::Transport(TransportError::Message(error_msg))); - } - - let result = jsonrpc_core::serde_from_str(&text).map_err(|err| { - tracing::warn!( - name = %inner.name, - rpc_request_id = %id, - request_id, - error = %err, - raw_response = %text.trim(), - "failed to decode JSON response" - ); - Web3Error::Decoder(format!( - "{:?}, raw response: {}, rpc_request_id: {}, request_id: {:?}, {}", - err, - inner.name, - id, - request_id, - text.trim() - )) - })?; - Ok(result) -} - -type RpcResult = Result; - -impl Transport for HttpTransport { - type Out = BoxFuture<'static, RpcResult>; - - fn prepare(&self, method: &str, params: Vec) -> (RequestId, Call) { - let id = self.next_id(); - let request = helpers::build_request(id, method, params); - (id, request) - } - - fn send(&self, id: RequestId, call: Call) -> Self::Out { - let (client, inner) = self.new_request(); - - async move { - let output = execute_rpc(client, inner, id, &Request::Single(call)).await?; - helpers::to_result_from_output(output) - } - .boxed() - } -} - -impl BatchTransport for HttpTransport { - type Batch = BoxFuture<'static, Result, Web3Error>>; - - fn send_batch(&self, requests: T) -> Self::Batch - where - T: IntoIterator, - { - // Batch calls don't need an id but it helps associate the response log to the - // request log. - let id = self.next_id(); - let (client, inner) = self.new_request(); - let (ids, calls): (Vec<_>, Vec<_>) = requests.into_iter().unzip(); - - async move { - let outputs = execute_rpc(client, inner, id, &Request::Batch(calls)).await?; - handle_batch_response(&ids, outputs) - } - .boxed() - } -} - -/// Workaround for Erigon nodes, which encode each element of the Batch Response -/// as a String rather than a deserializable JSON object -#[derive(Debug, PartialEq, Clone, Deserialize, Serialize)] -#[serde(untagged)] -enum OutputOrString { - String(String), - Output(Output), -} - -impl OutputOrString { - fn try_into_output(self) -> Result { - Ok(match self { - OutputOrString::String(string) => jsonrpc_core::serde_from_str(&string)?, - OutputOrString::Output(output) => output, - }) - } -} - -fn handle_batch_response( - ids: &[RequestId], - outputs: Vec, -) -> Result, Web3Error> { - if ids.len() != outputs.len() { - return Err(Web3Error::InvalidResponse( - "unexpected number of responses".to_string(), - )); - } - let mut outputs = outputs - .into_iter() - .map(|output_or_string| { - let output = output_or_string.try_into_output()?; - Ok(( - id_of_output(&output)?, - helpers::to_result_from_output(output), - )) - }) - .collect::, Web3Error>>()?; - ids.iter() - .map(|id| { - outputs.remove(id).ok_or_else(|| { - Web3Error::InvalidResponse(format!("batch response is missing id {id}")) - }) - }) - .collect() -} - -fn id_of_output(output: &Output) -> Result { - let id = match output { - Output::Success(success) => &success.id, - Output::Failure(failure) => &failure.id, - }; - match id { - jsonrpc_core::Id::Num(num) => Ok(*num as RequestId), - _ => Err(Web3Error::InvalidResponse( - "response id is not u64".to_string(), - )), - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn handles_batch_response_being_in_different_order_than_input() { - let ids = vec![0, 1, 2]; - // This order is different from the ids. - let outputs = [1u64, 0, 2] - .iter() - .map(|&id| { - OutputOrString::Output(Output::Success(jsonrpc_core::Success { - jsonrpc: None, - result: id.into(), - id: jsonrpc_core::Id::Num(id), - })) - }) - .collect(); - let results = handle_batch_response(&ids, outputs) - .unwrap() - .into_iter() - .map(|result| result.unwrap().as_u64().unwrap() as usize) - .collect::>(); - // The order of the ids should have been restored. - assert_eq!(ids, results); - } - - #[test] - fn handles_batch_items_that_are_strings() { - let result = handle_batch_response( - &[1], - vec![OutputOrString::String("{\"result\": 1, \"id\": 1}".into())], - ) - .unwrap() - .into_iter() - .map(|result| result.unwrap().as_u64().unwrap() as usize) - .collect::>(); - assert_eq!(vec![1], result); - } - - #[test] - fn errors_on_invalid_string_batch_responses() { - assert!( - handle_batch_response( - &[1], - vec![OutputOrString::String("there is no spoon".into())], - ) - .is_err() - ); - } -} diff --git a/crates/ethrpc/src/instrumented.rs b/crates/ethrpc/src/instrumented.rs deleted file mode 100644 index 0cb9bf9ffb..0000000000 --- a/crates/ethrpc/src/instrumented.rs +++ /dev/null @@ -1,151 +0,0 @@ -use { - crate::alloy::ProviderLabelingExt, - ethcontract::{ - jsonrpc::types::{Call, Value}, - transport::DynTransport, - }, - futures::{FutureExt, future::BoxFuture}, - std::sync::Arc, - web3::{BatchTransport, RequestId, Transport, error::Error as Web3Error}, -}; - -#[derive(prometheus_metric_storage::MetricStorage, Clone, Debug)] -#[metric(subsystem = "rpc")] -struct Metrics { - /// Number of inflight RPC requests for ethereum node. - #[metric(labels("component", "method"))] - requests_inflight: prometheus::IntGaugeVec, - - /// Number of completed RPC requests for ethereum node. - #[metric(labels("component", "method"))] - requests_complete: prometheus::IntCounterVec, - - /// Execution time for each RPC request (batches are counted as one - /// request). - #[metric(labels("component", "method"))] - requests_duration_seconds: prometheus::HistogramVec, - - /// Number of RPC requests initiated within a batch request - #[metric(labels("component", "method"))] - inner_batch_requests_initiated: prometheus::IntCounterVec, -} - -impl Metrics { - #[must_use] - fn on_request_start(&self, label: &str, method: &str) -> impl Drop + use<> { - let requests_inflight = self.requests_inflight.with_label_values(&[label, method]); - let requests_complete = self.requests_complete.with_label_values(&[label, method]); - let requests_duration_seconds = self - .requests_duration_seconds - .with_label_values(&[label, method]); - - requests_inflight.inc(); - let timer = requests_duration_seconds.start_timer(); - - scopeguard::guard(timer, move |timer| { - requests_inflight.dec(); - requests_complete.inc(); - timer.stop_and_record(); - }) - } -} - -#[derive(Debug, Clone)] -pub struct InstrumentedTransport(Arc); - -impl InstrumentedTransport { - pub fn new(label: String, transport: DynTransport) -> Self { - Self(Arc::new(Inner { - metrics: Metrics::instance(observe::metrics::get_storage_registry()).unwrap(), - transport, - label, - })) - } - - pub fn with_additional_label(&self, label: String) -> Self { - Self(Arc::new(Inner { - label: format!("{}_{label}", self.0.label), - transport: self.0.transport.clone(), - metrics: self.0.metrics, - })) - } -} - -/// Adds metrics for RPC requests using the provided label. -pub fn instrument_with_label(web3: &crate::Web3, label: String) -> crate::Web3 { - let transport = web3.transport().clone(); - let instrumented = match transport.downcast::() { - Some(instrumented) => instrumented.with_additional_label(label.clone()), - _ => InstrumentedTransport::new(label.clone(), transport), - }; - crate::Web3 { - legacy: web3::Web3::new(DynTransport::new(instrumented)), - alloy: web3.alloy.labeled(label), - wallet: web3.wallet.clone(), - } -} - -#[derive(Debug)] -struct Inner { - metrics: &'static Metrics, - transport: DynTransport, - label: String, -} - -type RpcResult = Result; - -impl Transport for InstrumentedTransport { - type Out = BoxFuture<'static, RpcResult>; - - fn prepare(&self, method: &str, params: Vec) -> (RequestId, Call) { - self.0.transport.prepare(method, params) - } - - fn send(&self, id: RequestId, call: Call) -> Self::Out { - let inner = self.0.clone(); - - async move { - let _guard = inner - .metrics - .on_request_start(&inner.label, method_name(&call)); - inner.transport.send(id, call).await - } - .boxed() - } -} - -impl BatchTransport for InstrumentedTransport { - type Batch = BoxFuture<'static, Result, Web3Error>>; - - fn send_batch(&self, requests: R) -> Self::Batch - where - R: IntoIterator, - { - let inner = self.0.clone(); - let requests: Vec<_> = requests.into_iter().collect(); - - async move { - let _guard = inner.metrics.on_request_start(&inner.label, "batch"); - let metrics = inner.metrics; - let label = &inner.label; - - let requests = requests.into_iter().inspect(move |(_id, call)| { - metrics - .inner_batch_requests_initiated - .with_label_values(&[label, method_name(call)]) - .inc() - }); - - inner.transport.send_batch(requests).await - } - .boxed() - } -} - -fn method_name(call: &Call) -> &str { - match call { - Call::MethodCall(method) => &method.method, - Call::Notification(notification) => ¬ification.method, - Call::Invalid { .. } => "invalid", - } -} diff --git a/crates/ethrpc/src/lib.rs b/crates/ethrpc/src/lib.rs index 27e353e67f..b84d8a7fb4 100644 --- a/crates/ethrpc/src/lib.rs +++ b/crates/ethrpc/src/lib.rs @@ -1,24 +1,12 @@ pub mod alloy; pub mod block_stream; -pub mod buffered; -pub mod http; -pub mod instrumented; #[cfg(any(test, feature = "test-util"))] pub mod mock; -use { - self::{buffered::BufferedTransport, http::HttpTransport}, - crate::alloy::MutWallet, - ::alloy::providers::DynProvider, - ethcontract::transport::DynTransport, - reqwest::{Client, Url}, - std::{num::NonZeroUsize, time::Duration}, - web3::Transport, -}; +use {crate::alloy::MutWallet, ::alloy::providers::DynProvider, reqwest::Url, std::time::Duration}; pub const MAX_BATCH_SIZE: usize = 100; -pub type Web3Transport = DynTransport; pub type AlloyProvider = DynProvider; /// This is just a thin wrapper around providers (clients communicating @@ -28,21 +16,12 @@ pub type AlloyProvider = DynProvider; /// to convert each call site to use the new provider bit by bit instead of /// having to everything at once. #[derive(Debug, Clone)] -pub struct Web3 { - pub legacy: web3::Web3, +pub struct Web3 { pub alloy: AlloyProvider, pub wallet: MutWallet, } -impl std::ops::Deref for Web3 { - type Target = web3::Web3; - - fn deref(&self) -> &Self::Target { - &self.legacy - } -} - -impl Web3 { +impl Web3 { // for tests pub fn new_from_env() -> Self { let url = &std::env::var("NODE_URL").unwrap(); @@ -50,14 +29,8 @@ impl Web3 { } pub fn new_from_url(url: &str) -> Self { - let legacy_transport = create_test_transport(url); - let web3 = web3::Web3::new(legacy_transport); - let (alloy, wallet) = crate::alloy::provider(url); - Self { - legacy: web3, - alloy, - wallet, - } + let (alloy, wallet) = crate::alloy::provider(url, Default::default(), None); + Self { alloy, wallet } } } @@ -76,76 +49,25 @@ pub struct Config { pub ethrpc_batch_delay: Duration, } -impl Config { - /// Returns the buffered transport configuration or `None` if batching is - /// disabled. - fn into_buffered_configuration(self) -> Option { - match ( - self.ethrpc_max_batch_size, - self.ethrpc_max_concurrent_requests, - ) { - (0 | 1, 0) => None, - _ => Some(buffered::Configuration { - max_concurrent_requests: NonZeroUsize::new(self.ethrpc_max_concurrent_requests), - max_batch_len: self.ethrpc_max_batch_size.max(1), - batch_delay: self.ethrpc_batch_delay, - }), - } - } -} - impl Default for Config { fn default() -> Self { Self { ethrpc_max_batch_size: 20, ethrpc_max_concurrent_requests: 10, - ethrpc_batch_delay: Duration::from_millis(5), + ethrpc_batch_delay: Duration::ZERO, } } } -/// Create a Web3 instance. -pub fn web3( - args: Config, - http_factory: reqwest::ClientBuilder, - url: &Url, - name: impl ToString, -) -> Web3 { - let http = http_factory.cookie_store(true).build().unwrap(); - let http = HttpTransport::new(http, url.clone(), name.to_string()); - let buffered_config = args.into_buffered_configuration(); - let (legacy, alloy, wallet) = match buffered_config { - Some(config) => { - let legacy = Web3Transport::new(BufferedTransport::with_config(http, config)); - let (alloy, wallet) = alloy::provider(url.as_str()); - (legacy, alloy, wallet) - } - None => { - let legacy = Web3Transport::new(http); - let (alloy, wallet) = alloy::unbuffered_provider(url.as_str()); - (legacy, alloy, wallet) - } +/// Create a Web3 instance with an optional label for observability. +pub fn web3(args: Config, url: &Url, label: Option<&str>) -> Web3 { + let (alloy, wallet) = match ( + args.ethrpc_max_batch_size, + args.ethrpc_max_concurrent_requests, + ) { + (0 | 1, 0) => alloy::unbuffered_provider(url.as_str(), label), + _ => alloy::provider(url.as_str(), args, label), }; - let instrumented = instrumented::InstrumentedTransport::new(name.to_string(), legacy); - - Web3 { - legacy: web3::Web3::new(Web3Transport::new(instrumented)), - alloy, - wallet, - } -} -/// Convenience method to create a transport from a URL. -pub fn create_test_transport(url: &str) -> Web3Transport { - let http_transport = HttpTransport::new( - Client::builder() - .timeout(Duration::from_secs(10)) - .build() - .unwrap(), - url.try_into().unwrap(), - "test".into(), - ); - let dyn_transport = Web3Transport::new(http_transport); - let instrumented = instrumented::InstrumentedTransport::new("test".into(), dyn_transport); - Web3Transport::new(instrumented) + Web3 { alloy, wallet } } diff --git a/crates/ethrpc/src/mock.rs b/crates/ethrpc/src/mock.rs index ef5dd5b5b8..2ed9f586cb 100644 --- a/crates/ethrpc/src/mock.rs +++ b/crates/ethrpc/src/mock.rs @@ -3,28 +3,11 @@ use { crate::{Web3, alloy::MutWallet}, alloy::providers::{Provider, ProviderBuilder, mock::Asserter}, - ethcontract::{ - dyns::DynTransport, - futures::future::{self, Ready}, - jsonrpc::{Call, Id, MethodCall, Params}, - web3::{self, BatchTransport, RequestId, Transport}, - }, - serde_json::Value, - std::{ - fmt::{self, Debug, Formatter}, - sync::{ - Arc, - Mutex, - MutexGuard, - atomic::{AtomicUsize, Ordering}, - }, - }, }; -impl Web3 { +impl Web3 { pub fn with_asserter(asserter: Asserter) -> Self { Web3 { - legacy: web3::Web3::new(MockTransport::new()), // this will not behave like the original mock transport but it's only used // in one place so let's keep this for now and fix it when we switch to // alloy in the 1 place that uses the mock provider. @@ -34,174 +17,13 @@ impl Web3 { wallet: MutWallet::default(), } } - - // HACK(jmg-duarte): used to convert a MockTransport -> DynTransport so we can - // remove ethcontract imports from shared should be fixed in a follow up PR, - // removing web3 from ethrpc - pub fn erased(self) -> Web3 { - Web3 { - legacy: web3::Web3::new(DynTransport::new(self.legacy.transport().clone())), - alloy: self.alloy, - wallet: self.wallet, - } - } } -pub fn web3() -> Web3 { +pub fn web3() -> Web3 { Web3 { - legacy: web3::Web3::new(MockTransport::new()), - // this will not behave like the original mock transport but it's only used - // in one place so let's keep this for now and fix it when we switch to - // alloy in the 1 place that uses the mock provider. alloy: ProviderBuilder::new() .connect_mocked_client(Asserter::new()) .erased(), wallet: MutWallet::default(), } } - -/// An intermediate trait used for `mockall` to automatically generate a mock -/// transport for us. -#[mockall::automock] -pub trait MockableTransport { - fn execute(&self, method: String, params: Vec) -> web3::Result; - fn execute_batch( - &self, - requests: Vec<(String, Vec)>, - ) -> web3::Result>>; -} - -#[derive(Clone, Default)] -pub struct MockTransport(Arc); - -#[derive(Default)] -pub struct Inner { - inner: Mutex, - current_id: AtomicUsize, -} - -impl MockTransport { - pub fn new() -> Self { - Self::default() - } - - pub fn mock(&self) -> MutexGuard<'_, MockMockableTransport> { - self.0.inner.lock().unwrap() - } -} - -impl Debug for MockTransport { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - f.debug_struct("MockTransport").finish() - } -} - -impl Transport for MockTransport { - type Out = Ready>; - - fn prepare(&self, method: &str, params: Vec) -> (RequestId, Call) { - let id = self.0.current_id.fetch_add(1, Ordering::SeqCst); - ( - id, - Call::MethodCall(MethodCall { - jsonrpc: None, - method: method.to_owned(), - params: Params::Array(params), - id: Id::Null, - }), - ) - } - - fn send(&self, _: RequestId, call: Call) -> Self::Out { - let (method, params) = extract_call(call); - let response = self.mock().execute(method, params); - future::ready(response) - } -} - -impl BatchTransport for MockTransport { - type Batch = Ready>>>; - - fn send_batch(&self, requests: T) -> Self::Batch - where - T: IntoIterator, - { - let batch = requests - .into_iter() - .map(|(_, call)| extract_call(call)) - .collect(); - let responses = self.mock().execute_batch(batch); - future::ready(responses) - } -} - -fn extract_call(call: Call) -> (String, Vec) { - match call { - Call::MethodCall(MethodCall { - method, - params: Params::Array(params), - .. - }) => (method, params), - _ => panic!("unexpected call {call:?}"), - } -} - -#[cfg(test)] -mod tests { - use {super::*, mockall::predicate::*, serde_json::json}; - - #[tokio::test] - async fn can_mock_single_requests() { - let transport = MockTransport::new(); - transport - .mock() - .expect_execute() - .with( - eq("foo_bar".to_owned()), - eq(vec![json!(true), json!("stuff")]), - ) - .returning(|_, _| Ok(json!("hello"))); - - assert_eq!( - transport - .execute("foo_bar", vec![json!(true), json!("stuff")]) - .await - .unwrap(), - json!("hello") - ); - } - - #[tokio::test] - async fn can_mock_batch_requests() { - let transport = MockTransport::new(); - transport - .mock() - .expect_execute_batch() - .with(eq(vec![ - ("foo_bar".to_owned(), vec![json!(true), json!("stuff")]), - ("do_thing".to_owned(), vec![]), - ("fail_thing".to_owned(), vec![json!(42)]), - ])) - .returning(|_| { - Ok(vec![ - Ok(json!("hello")), - Ok(json!("world")), - Err(web3::Error::Transport( - web3::error::TransportError::Message("bad".to_string()), - )), - ]) - }); - - let responses = transport - .send_batch(vec![ - transport.prepare("foo_bar", vec![json!(true), json!("stuff")]), - transport.prepare("do_thing", vec![]), - transport.prepare("fail_thing", vec![json!(42)]), - ]) - .await - .unwrap(); - assert_eq!(responses[0].as_ref().unwrap(), &json!("hello")); - assert_eq!(responses[1].as_ref().unwrap(), &json!("world")); - assert!(responses[2].is_err()); - } -} diff --git a/crates/orderbook/src/run.rs b/crates/orderbook/src/run.rs index 04e65e5fd3..1271d956e6 100644 --- a/crates/orderbook/src/run.rs +++ b/crates/orderbook/src/run.rs @@ -78,15 +78,12 @@ pub async fn start(args: impl Iterator) { pub async fn run(args: Arguments) { let http_factory = HttpClientFactory::new(&args.http_client); - let web3 = shared::ethrpc::web3( - &args.shared.ethrpc, - &http_factory, - &args.shared.node_url, - "base", - ); - let simulation_web3 = args.shared.simulation_node_url.as_ref().map(|node_url| { - shared::ethrpc::web3(&args.shared.ethrpc, &http_factory, node_url, "simulation") - }); + let web3 = shared::ethrpc::web3(&args.shared.ethrpc, &args.shared.node_url, "base"); + let simulation_web3 = args + .shared + .simulation_node_url + .as_ref() + .map(|node_url| shared::ethrpc::web3(&args.shared.ethrpc, node_url, "simulation")); let chain_id = web3 .alloy @@ -259,12 +256,7 @@ pub async fn run(args: Arguments) { let trace_call_detector = args.tracing_node_url.as_ref().map(|tracing_node_url| { CachingDetector::new( Box::new(TraceCallDetector::new( - shared::ethrpc::web3( - &args.shared.ethrpc, - &http_factory, - tracing_node_url, - "trace", - ), + shared::ethrpc::web3(&args.shared.ethrpc, tracing_node_url, "trace"), *settlement_contract.address(), finder, )), diff --git a/crates/refunder/src/lib.rs b/crates/refunder/src/lib.rs index 4fc9a59675..c5a13365d4 100644 --- a/crates/refunder/src/lib.rs +++ b/crates/refunder/src/lib.rs @@ -11,7 +11,6 @@ use { contracts::alloy::CoWSwapEthFlow, observe::metrics::LivenessChecking, refund_service::RefundService, - shared::http_client::HttpClientFactory, sqlx::postgres::PgPoolOptions, std::{ sync::{Arc, RwLock}, @@ -40,8 +39,7 @@ pub async fn start(args: impl Iterator) { } pub async fn run(args: arguments::Arguments) { - let http_factory = HttpClientFactory::new(&args.http_client); - let web3 = shared::ethrpc::web3(&args.ethrpc, &http_factory, &args.node_url, "base"); + let web3 = shared::ethrpc::web3(&args.ethrpc, &args.node_url, "base"); if let Some(expected_chain_id) = args.chain_id { let chain_id = web3 .alloy diff --git a/crates/shared/Cargo.toml b/crates/shared/Cargo.toml index dc991e85c7..14538595f1 100644 --- a/crates/shared/Cargo.toml +++ b/crates/shared/Cargo.toml @@ -36,7 +36,6 @@ model = { workspace = true } num = { workspace = true } number = { workspace = true } order-validation = { workspace = true } -primitive-types = { workspace = true } prometheus = { workspace = true } prometheus-metric-storage = { workspace = true } rand = { workspace = true } @@ -52,7 +51,6 @@ tokio = { workspace = true, features = ["macros", "time", "signal"] } tracing = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter", "fmt", "time"] } url = { workspace = true } -web3 = { workspace = true } mockall = { workspace = true, optional = true } diff --git a/crates/shared/src/account_balances/mod.rs b/crates/shared/src/account_balances/mod.rs index 17b7463d53..03b853d41e 100644 --- a/crates/shared/src/account_balances/mod.rs +++ b/crates/shared/src/account_balances/mod.rs @@ -191,7 +191,7 @@ impl BalanceSimulator { )>::abi_decode(&response.0) .map_err(|err| { tracing::error!(?err, "failed to decode balance response"); - web3::error::Error::Decoder("failed to decode balance response".to_string()) + alloy::contract::Error::AbiError(alloy::dyn_abi::Error::SolTypes(err)) })?; let simulation = Simulation { @@ -226,6 +226,4 @@ pub struct Simulation { pub enum SimulationError { #[error("method error: {0:?}")] Method(#[from] alloy::contract::Error), - #[error("web3 error: {0:?}")] - Web3(#[from] web3::error::Error), } diff --git a/crates/shared/src/account_balances/simulation.rs b/crates/shared/src/account_balances/simulation.rs index 7f5a70a6cf..b497fae6c5 100644 --- a/crates/shared/src/account_balances/simulation.rs +++ b/crates/shared/src/account_balances/simulation.rs @@ -8,7 +8,7 @@ use { alloy::primitives::{Address, U256}, anyhow::Result, contracts::alloy::{BalancerV2Vault::BalancerV2Vault, ERC20}, - ethrpc::Web3, + ethrpc::{Web3, alloy::ProviderLabelingExt}, futures::future, model::order::SellTokenSource, tracing::instrument, @@ -28,7 +28,7 @@ impl Balances { // contracts exist at addresses that get called. This allows us to // properly check if the `source` is not supported for the deployment // work without additional code paths :tada:! - let web3 = ethrpc::instrumented::instrument_with_label(web3, "balanceFetching".into()); + let web3 = web3.labeled("balanceFetching"); Self { web3, diff --git a/crates/shared/src/bad_token/token_owner_finder/mod.rs b/crates/shared/src/bad_token/token_owner_finder/mod.rs index 2e1fd183c5..e12b41de61 100644 --- a/crates/shared/src/bad_token/token_owner_finder/mod.rs +++ b/crates/shared/src/bad_token/token_owner_finder/mod.rs @@ -33,7 +33,7 @@ use { anyhow::{Context, Result}, chain::Chain, contracts::alloy::{BalancerV2Vault, ERC20, IUniswapV3Factory}, - ethrpc::alloy::errors::ContractErrorExt, + ethrpc::alloy::{ProviderLabelingExt, errors::ContractErrorExt}, futures::{Stream, StreamExt as _}, rate_limit::Strategy, reqwest::Url, @@ -294,7 +294,7 @@ pub async fn init( base_tokens: &BaseTokens, settlement_contract: Address, ) -> Result> { - let web3 = ethrpc::instrumented::instrument_with_label(&web3, "tokenOwners".into()); + let web3 = web3.labeled("tokenOwners"); let finders = args .token_owner_finders .as_deref() diff --git a/crates/shared/src/ethrpc.rs b/crates/shared/src/ethrpc.rs index d855f0c7af..51e52eb495 100644 --- a/crates/shared/src/ethrpc.rs +++ b/crates/shared/src/ethrpc.rs @@ -1,11 +1,10 @@ -pub use ethrpc::{Web3, Web3Transport}; +pub use ethrpc::Web3; use { - crate::http_client::HttpClientFactory, - reqwest::Url, std::{ fmt::{self, Display, Formatter}, time::Duration, }, + url::Url, }; pub const MAX_BATCH_SIZE: usize = 100; @@ -59,13 +58,8 @@ impl Arguments { } } -/// Create a Web3 instance. -pub fn web3( - args: &Arguments, - http_factory: &HttpClientFactory, - url: &Url, - name: impl ToString, -) -> Web3 { - let http_builder = http_factory.builder(); - ethrpc::web3(args.ethrpc(), http_builder, url, name) +/// Create a Web3 instance with a label for observability. +pub fn web3(args: &Arguments, url: &Url, name: impl ToString) -> Web3 { + let label = name.to_string(); + ethrpc::web3(args.ethrpc(), url, Some(&label)) } diff --git a/crates/shared/src/order_validation.rs b/crates/shared/src/order_validation.rs index 9db1e46594..8460a501b3 100644 --- a/crates/shared/src/order_validation.rs +++ b/crates/shared/src/order_validation.rs @@ -23,7 +23,6 @@ use { app_data::{AppDataHash, Hook, Hooks, ValidatedAppData, Validator}, async_trait::async_trait, contracts::alloy::{HooksTrampoline, WETH9}, - ethrpc::alloy::conversions::{IntoAlloy, IntoLegacy}, model::{ DomainSeparator, interaction::InteractionData, @@ -608,10 +607,9 @@ impl OrderValidating for OrderValidator { // Happens before signature verification because a miscalculated app data hash // by the API user would lead to being unable to validate the signature below. let app_data = self.validate_app_data(&order.app_data, &full_app_data_override)?; - let app_data_signer = app_data.inner.protocol.signer.map(IntoLegacy::into_legacy); + let app_data_signer = app_data.inner.protocol.signer; - let owner = - order.verify_owner(domain_separator, app_data_signer.map(IntoAlloy::into_alloy))?; + let owner = order.verify_owner(domain_separator, app_data_signer)?; tracing::debug!(?owner, "recovered owner from order and signature"); let signing_scheme = order.signature.scheme(); let data = OrderData { diff --git a/crates/shared/src/price_estimation/competition/quote.rs b/crates/shared/src/price_estimation/competition/quote.rs index 8c6be7c544..2caa30c786 100644 --- a/crates/shared/src/price_estimation/competition/quote.rs +++ b/crates/shared/src/price_estimation/competition/quote.rs @@ -8,11 +8,10 @@ use { Query, QuoteVerificationMode, }, + alloy::primitives::{Address, U256}, anyhow::Context, - ethrpc::alloy::conversions::{IntoAlloy, IntoLegacy}, futures::future::{BoxFuture, FutureExt, TryFutureExt}, model::order::OrderKind, - primitive_types::{H160, U256}, std::{cmp::Ordering, sync::Arc, time::Duration}, tracing::instrument, }; @@ -27,9 +26,7 @@ impl PriceEstimating for CompetitionEstimator> { OrderKind::Buy => query.sell_token, OrderKind::Sell => query.buy_token, }; - let get_context = self - .ranking - .provide_context(out_token.into_legacy(), query.timeout); + let get_context = self.ranking.provide_context(out_token, query.timeout); // Filter out obviously wrong estimates: // - 0 gas cost would lead to us paying huge subsidies @@ -100,7 +97,7 @@ fn compare_quote(query: &Query, a: &Estimate, b: &Estimate, context: &RankingCon impl PriceRanking { async fn provide_context( &self, - token: H160, + token: Address, timeout: Duration, ) -> Result { match self { @@ -114,10 +111,8 @@ impl PriceRanking { let gas = gas .effective_gas_price() .map_err(PriceEstimationError::ProtocolInternal); - let (native_price, gas_price) = futures::try_join!( - native.estimate_native_price(token.into_alloy(), timeout), - gas - )?; + let (native_price, gas_price) = + futures::try_join!(native.estimate_native_price(token, timeout), gas)?; Ok(RankingContext { native_price, @@ -148,8 +143,16 @@ impl RankingContext { // High fees mean paying more `sell_token` for your buy order. OrderKind::Buy => eth_out + fees, }; - // converts `NaN` and `(-∞, 0]` to `0` - U256::from_f64_lossy(effective_eth_out) + match effective_eth_out { + // converts `NaN` and `(-∞, 0]` to `0` + v if v.is_sign_negative() || v.is_nan() => U256::ZERO, + // Previous case already covered negative infinity + v if v.is_infinite() => U256::MAX, + // Note on truncation: previously we used primitive_types::U256::from_f64_lossy which + // truncated the floating point, while alloy is slightly more faithful to the original + // value and rounds to closest integer: [0, 0.5) => 0, [0.5, 1] => 1 + v => U256::from(v.trunc()), + } } } diff --git a/crates/shared/src/price_estimation/factory.rs b/crates/shared/src/price_estimation/factory.rs index 0893f4cd68..305ecc11e8 100644 --- a/crates/shared/src/price_estimation/factory.rs +++ b/crates/shared/src/price_estimation/factory.rs @@ -31,7 +31,7 @@ use { alloy::primitives::Address, anyhow::{Context as _, Result}, contracts::alloy::WETH9, - ethrpc::block_stream::CurrentBlockWatcher, + ethrpc::{alloy::ProviderLabelingExt, block_stream::CurrentBlockWatcher}, number::nonzero::NonZeroU256, rate_limit::RateLimiter, reqwest::Url, @@ -99,7 +99,7 @@ impl<'a> PriceEstimatorFactory<'a> { let Some(web3) = network.simulation_web3.clone() else { return Ok(None); }; - let web3 = ethrpc::instrumented::instrument_with_label(&web3, "simulator".into()); + let web3 = web3.labeled("simulator"); let tenderly = shared_args .tenderly diff --git a/crates/shared/src/price_estimation/trade_verifier/balance_overrides/mod.rs b/crates/shared/src/price_estimation/trade_verifier/balance_overrides/mod.rs index 44668f70b9..3462f18802 100644 --- a/crates/shared/src/price_estimation/trade_verifier/balance_overrides/mod.rs +++ b/crates/shared/src/price_estimation/trade_verifier/balance_overrides/mod.rs @@ -542,7 +542,7 @@ mod tests { let balance_overrides = BalanceOverrides { hardcoded: Default::default(), detector: Some(( - Detector::new(mock_web3.erased(), 60), + Detector::new(mock_web3, 60), Mutex::new(SizedCache::with_size(100)), )), }; @@ -590,7 +590,7 @@ mod tests { let balance_overrides = BalanceOverrides { hardcoded: Default::default(), detector: Some(( - Detector::new(mock_web3.erased(), 60), + Detector::new(mock_web3, 60), Mutex::new(SizedCache::with_size(100)), )), }; diff --git a/crates/shared/src/signature_validator/simulation.rs b/crates/shared/src/signature_validator/simulation.rs index 4f2a6e755f..d7d3adfa99 100644 --- a/crates/shared/src/signature_validator/simulation.rs +++ b/crates/shared/src/signature_validator/simulation.rs @@ -8,7 +8,7 @@ use { crate::price_estimation::trade_verifier::balance_overrides::BalanceOverriding, alloy::{ dyn_abi::SolType, - primitives::Address, + primitives::{Address, U256}, rpc::types::state::StateOverride, sol_types::{SolCall, sol_data}, transports::RpcError, @@ -19,8 +19,7 @@ use { GPv2Settlement, support::Signatures, }, - ethrpc::{Web3, alloy::conversions::IntoLegacy}, - primitive_types::U256, + ethrpc::{Web3, alloy::ProviderLabelingExt}, std::sync::Arc, tracing::instrument, }; @@ -44,7 +43,7 @@ impl Validator { vault_relayer: Address, balance_overrider: Arc, ) -> Self { - let web3 = ethrpc::instrumented::instrument_with_label(web3, "signatureValidation".into()); + let web3 = web3.labeled("signatureValidation"); Self { signatures_address, settlement, @@ -148,14 +147,12 @@ impl Validator { }) .map_err(|_| SignatureValidationError::Invalid)?; - let gas_used = >::abi_decode(&response_bytes.0) - .with_context(|| { - format!( - "could not decode signature check result: {}", - const_hex::encode(&response_bytes.0) - ) - })? - .into_legacy(); + let gas_used = >::abi_decode(&response_bytes.0).with_context(|| { + format!( + "could not decode signature check result: {}", + const_hex::encode(&response_bytes.0) + ) + })?; Ok(Simulation { gas_used }) } diff --git a/crates/shared/src/sources/balancer_v2/pool_fetching/mod.rs b/crates/shared/src/sources/balancer_v2/pool_fetching/mod.rs index 3d9c027643..5cb30bfc92 100644 --- a/crates/shared/src/sources/balancer_v2/pool_fetching/mod.rs +++ b/crates/shared/src/sources/balancer_v2/pool_fetching/mod.rs @@ -51,7 +51,10 @@ use { BalancerV2WeightedPoolFactoryV3, BalancerV2WeightedPoolFactoryV4, }, - ethrpc::block_stream::{BlockRetrieving, CurrentBlockWatcher}, + ethrpc::{ + alloy::ProviderLabelingExt, + block_stream::{BlockRetrieving, CurrentBlockWatcher}, + }, model::TokenPair, reqwest::{Client, Url}, std::{ @@ -247,7 +250,7 @@ impl BalancerPoolFetcher { deny_listed_pool_ids: Vec, ) -> Result { let pool_initializer = BalancerSubgraphClient::from_subgraph_url(subgraph_url, client)?; - let web3 = ethrpc::instrumented::instrument_with_label(&web3, "balancerV2".into()); + let web3 = web3.labeled("balancerV2"); let fetcher = Arc::new(Cache::new( create_aggregate_pool_fetcher( web3, diff --git a/crates/shared/src/sources/balancer_v2/pools/weighted.rs b/crates/shared/src/sources/balancer_v2/pools/weighted.rs index 3e60249538..56a7e16734 100644 --- a/crates/shared/src/sources/balancer_v2/pools/weighted.rs +++ b/crates/shared/src/sources/balancer_v2/pools/weighted.rs @@ -151,7 +151,7 @@ mod tests { providers::{Provider, ProviderBuilder, mock::Asserter}, sol_types::SolCall, }, - ethrpc::{Web3, mock::MockTransport}, + ethrpc::Web3, futures::future, maplit::btreemap, }; @@ -279,7 +279,7 @@ mod tests { let asserter = Asserter::new(); asserter.push_success(&10); - let web3 = Web3::::with_asserter(asserter); + let web3 = Web3::with_asserter(asserter); let factory = BalancerV2WeightedPoolFactory::Instance::new(Address::default(), web3.alloy.clone()); diff --git a/crates/shared/src/sources/uniswap_v2/mod.rs b/crates/shared/src/sources/uniswap_v2/mod.rs index 8c07616489..d23a4d1d6f 100644 --- a/crates/shared/src/sources/uniswap_v2/mod.rs +++ b/crates/shared/src/sources/uniswap_v2/mod.rs @@ -16,6 +16,7 @@ use { alloy::primitives::{Address, B256}, anyhow::{Context, Result}, contracts::alloy::IUniswapLikeRouter, + ethrpc::alloy::ProviderLabelingExt, hex_literal::hex, std::{fmt::Display, str::FromStr, sync::Arc}, }; @@ -102,7 +103,7 @@ impl UniV2BaselineSourceParameters { } pub async fn into_source(&self, web3: &Web3) -> Result { - let web3 = ethrpc::instrumented::instrument_with_label(web3, "uniswapV2".into()); + let web3 = web3.labeled("uniswapV2"); let router = contracts::alloy::IUniswapLikeRouter::Instance::new(self.router, web3.alloy.clone()); let factory = router.factory().call().await.context("factory")?; diff --git a/crates/shared/src/sources/uniswap_v3/pool_fetching.rs b/crates/shared/src/sources/uniswap_v3/pool_fetching.rs index 9eab04c7ae..f839e2107a 100644 --- a/crates/shared/src/sources/uniswap_v3/pool_fetching.rs +++ b/crates/shared/src/sources/uniswap_v3/pool_fetching.rs @@ -20,6 +20,7 @@ use { }, ethrpc::{ Web3, + alloy::ProviderLabelingExt, block_stream::{BlockRetrieving, RangeInclusive}, }, itertools::{Either, Itertools}, @@ -287,7 +288,7 @@ impl UniswapV3PoolFetcher { max_pools_to_initialize: usize, max_pools_per_tick_query: usize, ) -> Result { - let web3 = ethrpc::instrumented::instrument_with_label(&web3, "uniswapV3".into()); + let web3 = web3.labeled("uniswapV3"); let checkpoint = PoolsCheckpointHandler::new( subgraph_url, client, diff --git a/crates/solvers/src/domain/solver.rs b/crates/solvers/src/domain/solver.rs index fc58155fe8..ac1463eaab 100644 --- a/crates/solvers/src/domain/solver.rs +++ b/crates/solvers/src/domain/solver.rs @@ -79,7 +79,7 @@ impl Solver { pub async fn new(config: Config) -> Self { let uni_v3_quoter_v2 = match config.uni_v3_node_url { Some(url) => { - let web3 = ethrpc::web3(Default::default(), Default::default(), &url, "baseline"); + let web3 = ethrpc::web3(Default::default(), &url, Some("baseline")); contracts::alloy::UniswapV3QuoterV2::Instance::deployed(&web3.alloy) .await .map(Arc::new) From 3760d319d0162cb826936c538ac7deeceead947a Mon Sep 17 00:00:00 2001 From: tilacog Date: Tue, 3 Feb 2026 11:16:23 -0300 Subject: [PATCH 028/219] =?UTF-8?q?Trait=E2=80=91Based=20Architecture=20fo?= =?UTF-8?q?r=20Refunder=20Service=20(#4051)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### Description This PR follows up on #4029 and introduces a **trait‑based architecture** for the `refunder` crate. By decoupling the `RefundService` from concrete database and blockchain implementations, we can now write unit tests without relying on (heavyweight) integration tests. ### Changes - Added a new `traits.rs` module that defines `DbRead`, `ChainRead`, and `ChainWrite` traits to abstract the two main boundaries of the system. - Created an `infra/` module housing the previous concrete implementations of those traits: - `AlloyChain` implements `ChainRead` - `Postgres` implements `DbRead` - Made `RefundService` generic over those traits, so we can use mocks for unit testing it (and thes real/production implementations) as needed. - Extracted `identify_uids_refunding_status` into its own function, to simplify testing. - Moved the `RefundStatus` enum into `traits.rs` so it lives alongside the related abstractions. - Reorganized the service construction inside `run()` for clearer flow. - Added a suite of unit tests that use mocks to cover a variety of scenarios. ### How to test Run the unit tests with: ```bash cargo nextest run -p refunder ``` --- Cargo.lock | 111 +-- crates/e2e/tests/e2e/refunder.rs | 24 +- crates/refunder/Cargo.toml | 5 + crates/refunder/src/infra/chain.rs | 64 ++ crates/refunder/src/infra/database.rs | 89 ++ crates/refunder/src/infra/mod.rs | 6 + crates/refunder/src/lib.rs | 77 +- crates/refunder/src/refund_service.rs | 1229 +++++++++++++++++++------ crates/refunder/src/submitter.rs | 112 +-- crates/refunder/src/traits.rs | 142 +++ 10 files changed, 1440 insertions(+), 419 deletions(-) create mode 100644 crates/refunder/src/infra/chain.rs create mode 100644 crates/refunder/src/infra/database.rs create mode 100644 crates/refunder/src/infra/mod.rs create mode 100644 crates/refunder/src/traits.rs diff --git a/Cargo.lock b/Cargo.lock index f94928ee4f..ffdab7f40b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -47,9 +47,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05c97aa0031055a663e364890f2bc15879d6ec38dae9fbeece68fcc82d9cdb81" +checksum = "3cb837e538ce3eac04e357ef47b8acead0b14c83ec6bcafedd167e6a60c40876" dependencies = [ "alloy-consensus", "alloy-contract", @@ -84,9 +84,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e30ab0d3e3c32976f67fc1a96179989e45a69594af42003a6663332f9b0bb9d" +checksum = "12870ab65b131f609257436935047eec3cfabee8809732f6bf5a69fe2a18cf2e" dependencies = [ "alloy-eips", "alloy-primitives", @@ -111,9 +111,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20736b1f9d927d875d8777ef0c2250d4c57ea828529a9dbfa2c628db57b911e" +checksum = "47c66b14d2187de0c4efe4ef678aaa57a6a34cccdbea3a0773627fac9bd128f4" dependencies = [ "alloy-consensus", "alloy-eips", @@ -125,9 +125,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "008aba161fce2a0d94956ae09d7d7a09f8fbdf18acbef921809ef126d6cdaf97" +checksum = "e9bf6afe8c25b63c98927c6f76d90cf8dc443cc4980a7d824151c84a6e568934" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -216,9 +216,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15b85157b7be31fc4adf6acfefcb0d4308cba5dbd7a8d8e62bcc02ff37d6131a" +checksum = "f076d25ddfcd2f1cbcc234e072baf97567d1df0e3fccdc1f8af8cc8b18dc6299" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -251,9 +251,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60f045b69b5e80b8944b25afe74ae6b974f3044d84b4a7a113da04745b2524cc" +checksum = "250dbd8496f04eabe997e6e4c5186a0630b8bc3dbe7552e1fd917d491ef811e9" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -266,9 +266,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b314ed5bdc7f449c53853125af2db5ac4d3954a9f4b205e7d694f02fc1932d1" +checksum = "fd45cdac957d1fa1d0c18f54f262350eb72f1adc38dd1f8b15f33f0747c6a60c" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -292,9 +292,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e9762ac5cca67b0f6ab614f7f8314942eead1c8eeef61511ea43a6ff048dbe0" +checksum = "fba5c43e055effb5bd33dbc74b1ab7fe0f367d8801a25af9e7c716b3ef5e440b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -334,9 +334,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea8f7ca47514e7f552aa9f3f141ab17351332c6637e3bf00462d8e7c5f10f51f" +checksum = "9e87a90cacc27dffd91fa6440145934a782227d31b9876444c5924d3607084ea" dependencies = [ "alloy-chains", "alloy-consensus", @@ -379,9 +379,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4082778c908aa801a1f9fdc85d758812842ab4b2aaba58e9dbe7626d708ab7e1" +checksum = "c24a102935aa9d5a8b8fc8c47f39a0823672c33f0b27b5806292cb80988e6345" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -423,9 +423,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26dd083153d2cb73cce1516f5a3f9c3af74764a2761d901581a355777468bd8f" +checksum = "57a65bb9060e43e9738bbd7c30d742ed962d609f2123a665bbdab7e6e0f13fd3" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -448,9 +448,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c998214325cfee1fbe61e5abaed3a435f4ca746ac7399b46feb57c364552452" +checksum = "98bfd40f4e36cb29015ec744bc764629edbe823ec6b95aceef2684090c142976" dependencies = [ "alloy-primitives", "alloy-rpc-types-anvil", @@ -464,9 +464,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2b03d65fcf579fbf17d3aac32271f99e2b562be04097436cd6e766b3e06613b" +checksum = "1ac7d0dbb62e807028554e34c2b5724a1f57132792684107c32009e84fcf4044" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -476,9 +476,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b4a6f49d161ef83354d5ba3c8bc83c8ee464cb90182b215551d5c4b846579be" +checksum = "8faa6f22068857f58579271b15e042f4725ad35cdce2ed4778ba32ffd3102b92" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -487,9 +487,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "467025b916f32645f322a085d0017f2996d0200ac89dd82a4fc2bf0f17b9afa3" +checksum = "ccb37a9eee8e7a19bb07b5cd55d33457884e44b212588b7429c5d318d2b90295" dependencies = [ "alloy-primitives", "derive_more 2.1.1", @@ -499,9 +499,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11920b16ab7c86052f990dcb4d25312fb2889faf506c4ee13dc946b450536989" +checksum = "1ec734cce11f7fe889950b36b51589397528b26beb6f890834a2131ee9f174d7" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -520,9 +520,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "498375e6a13b6edd04422a13d2b1a6187183e5a3aa14c5907b4c566551248bab" +checksum = "7fe64cd4af2e68b2154ac02a7908249a448fbd3d1d05890786a5af93686083cc" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -534,9 +534,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d9123d321ecd70925646eb2c60b1d9b7a965f860fbd717643e2c20fcf85d48d" +checksum = "9504c0f00a72883e640abc4681a5691a57dec693bc28d4aa80257c8e1e9e6e1f" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -546,9 +546,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1a0d2d5c64881f3723232eaaf6c2d9f4f88b061c63e87194b2db785ff3aa31f" +checksum = "27f076bfd74fccc63d50546e1765359736357a953de2eb778b7b6191571735e6" dependencies = [ "alloy-primitives", "serde", @@ -557,9 +557,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ea4ac9765e5a7582877ca53688e041fe184880fe75f16edf0945b24a319c710" +checksum = "d80748c209a68421ab6f737828ce6ede7543569a5cad099c1ec16fc1baa05620" dependencies = [ "alloy-dyn-abi", "alloy-primitives", @@ -574,9 +574,9 @@ dependencies = [ [[package]] name = "alloy-signer-aws" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d4cf9b92d8e2a467942397b8b07c75bf05f32d0cbe290959a75518f18141ae8" +checksum = "a2a0b80a0e21c1b8d7321d0a88bd115ad1182c293ca7e3dd0217c156f98b5b1e" dependencies = [ "alloy-consensus", "alloy-network", @@ -593,9 +593,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c9d85b9f7105ab5ce7dae7b0da33cd9d977601a48f759e1c82958978dd1a905" +checksum = "17eb1eb39351b4bf20bb0710d8d3a91eb7918d3f3de2f3835f556842e33865cb" dependencies = [ "alloy-consensus", "alloy-network", @@ -685,9 +685,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e72f5c4ba505ebead6a71144d72f21a70beadfb2d84e0a560a985491ecb71de" +checksum = "4a0c1a0288cdff6ee2b2c2c98ab42889d221ca8a9ee4120ede59b5449e0dcb20" dependencies = [ "alloy-json-rpc", "auto_impl", @@ -708,9 +708,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "400dc298aaabdbd48be05448c4a19eaa38416c446043f3e54561249149269c32" +checksum = "36dfa207caf6b528b9466c714626f5b2dfd5e8d4595a74631d5670672dac102b" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -723,9 +723,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c38b4472f2bbd96a27f393de9e2f12adca0dc1075fb4d0f7c8f3557c5c600392" +checksum = "91620efb46f8d011e37f74fac53a643e830a7bb24982143094b887003cbfb6be" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -756,9 +756,9 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2183706e24173309b0ab0e34d3e53cf3163b71a419803b2b3b0c1fb7ff7a941" +checksum = "bb0d567f4830dea921868c7680004ae0c7f221b05e6477db6c077c7953698f56" dependencies = [ "darling", "proc-macro2", @@ -5395,10 +5395,13 @@ dependencies = [ "futures", "humantime", "mimalloc", + "mockall", "number", "observe", "prometheus", "prometheus-metric-storage", + "rand 0.8.5", + "rstest", "shared", "sqlx", "tikv-jemallocator", @@ -5658,9 +5661,9 @@ checksum = "48fd7bd8a6377e15ad9d42a8ec25371b94ddc67abe7c8b9127bec79bebaaae18" [[package]] name = "rust_decimal" -version = "1.39.0" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35affe401787a9bd846712274d97654355d21b2a2c092a3139aabe31e9022282" +checksum = "61f703d19852dbf87cbc513643fa81428361eb6940f1ac14fd58155d295a3eb0" dependencies = [ "arrayvec", "num-traits", diff --git a/crates/e2e/tests/e2e/refunder.rs b/crates/e2e/tests/e2e/refunder.rs index d33ade45aa..01ace9ad94 100644 --- a/crates/e2e/tests/e2e/refunder.rs +++ b/crates/e2e/tests/e2e/refunder.rs @@ -292,13 +292,13 @@ async fn run_refunder_threshold_test( advance_time_past_expiration(&web3, valid_to).await; - let mut refund_service = RefundService::new( + let mut refund_service = RefundService::from_components( services.db().clone(), web3.clone(), - vec![ethflow_contract.clone()], + vec![*ethflow_contract.address()], validity.enforced, slippage.enforced, - Box::new(refunder_account.signer.clone()), + refunder_account.signer.clone(), MAX_GAS_PRICE, START_PRIORITY_FEE_TIP, None, @@ -419,13 +419,13 @@ async fn refunder_skips_invalidated_orders(web3: Web3) { advance_time_past_expiration(&web3, valid_to).await; - let mut refund_service = RefundService::new( + let mut refund_service = RefundService::from_components( services.db().clone(), web3.clone(), - vec![ethflow_contract.clone()], + vec![*ethflow_contract.address()], 0, // min_validity_duration = 0 (permissive) 0, // min_price_deviation_bps = 0 (permissive) - Box::new(refunder_account.signer.clone()), + refunder_account.signer.clone(), MAX_GAS_PRICE, START_PRIORITY_FEE_TIP, None, @@ -527,13 +527,13 @@ async fn refunder_skips_settled_orders(web3: Web3) { "Settled order should not be invalidated on-chain" ); - let mut refund_service = RefundService::new( + let mut refund_service = RefundService::from_components( services.db().clone(), web3.clone(), - vec![ethflow_contract.clone()], + vec![*ethflow_contract.address()], 0, // min_validity_duration = 0 (permissive) 0, // min_price_deviation_bps = 0 (permissive) - Box::new(refunder_account.signer), + refunder_account.signer, MAX_GAS_PRICE, START_PRIORITY_FEE_TIP, None, @@ -624,13 +624,13 @@ async fn refunder_multiple_ethflow_contracts(web3: Web3) { advance_time_past_expiration(&web3, valid_to).await; - let mut refund_service = RefundService::new( + let mut refund_service = RefundService::from_components( services.db().clone(), web3, - vec![ethflow_contract.clone(), ethflow_contract_2.clone()], + vec![*ethflow_contract.address(), *ethflow_contract_2.address()], validity_duration as i64 / 2, 10, - Box::new(refunder.signer), + refunder.signer, MAX_GAS_PRICE, START_PRIORITY_FEE_TIP, None, diff --git a/crates/refunder/Cargo.toml b/crates/refunder/Cargo.toml index e81abd9417..f6d1d7bac3 100644 --- a/crates/refunder/Cargo.toml +++ b/crates/refunder/Cargo.toml @@ -28,6 +28,11 @@ tokio = { workspace = true, features = ["macros", "time", "rt-multi-thread"] } tracing = { workspace = true } url = { workspace = true } +[dev-dependencies] +mockall = { workspace = true } +rand = { workspace = true } +rstest = { workspace = true } + [lints] workspace = true diff --git a/crates/refunder/src/infra/chain.rs b/crates/refunder/src/infra/chain.rs new file mode 100644 index 0000000000..f7b660a5f9 --- /dev/null +++ b/crates/refunder/src/infra/chain.rs @@ -0,0 +1,64 @@ +//! Blockchain access via Alloy. + +use { + crate::traits::{ChainRead, RefundStatus}, + alloy::{primitives::Address, providers::Provider, rpc::types::TransactionRequest}, + anyhow::{Result, anyhow}, + contracts::alloy::CoWSwapEthFlow, + ethrpc::{AlloyProvider, block_stream::timestamp_of_current_block_in_seconds}, + std::collections::HashMap, +}; + +/// [`ChainRead`] implementation using Alloy. +pub struct AlloyChain { + provider: AlloyProvider, + ethflow_contracts: HashMap, +} + +impl AlloyChain { + pub fn new(provider: AlloyProvider, ethflow_addresses: Vec
) -> Self { + let ethflow_contracts = ethflow_addresses + .into_iter() + .map(|addr| { + let instance = CoWSwapEthFlow::Instance::new(addr, provider.clone()); + (addr, instance) + }) + .collect(); + Self { + provider, + ethflow_contracts, + } + } +} + +impl ChainRead for AlloyChain { + async fn current_block_timestamp(&self) -> Result { + timestamp_of_current_block_in_seconds(&self.provider).await + } + + async fn can_receive_eth(&self, address: Address) -> bool { + let tx = TransactionRequest::default() + .to(address) + .value(alloy::primitives::U256::from(1)); + + self.provider.estimate_gas(tx).await.is_ok() + } + + fn ethflow_addresses(&self) -> Vec
{ + self.ethflow_contracts.keys().copied().collect() + } + + async fn get_order_status( + &self, + ethflow_address: Address, + order_hash: alloy::primitives::B256, + ) -> Result { + let contract = self + .ethflow_contracts + .get(ðflow_address) + .ok_or_else(|| anyhow!("Unknown EthFlow contract: {ethflow_address}"))?; + + let order = contract.orders(order_hash).call().await?; + Ok(order.into()) + } +} diff --git a/crates/refunder/src/infra/database.rs b/crates/refunder/src/infra/database.rs new file mode 100644 index 0000000000..2bbbf246d2 --- /dev/null +++ b/crates/refunder/src/infra/database.rs @@ -0,0 +1,89 @@ +//! Database access via PostgreSQL. + +use { + crate::traits::DbRead, + alloy::primitives::Address, + anyhow::{Context, Result}, + contracts::alloy::CoWSwapEthFlow::EthFlowOrder, + database::{ + OrderUid, + ethflow_orders::{EthOrderPlacement, read_order, refundable_orders}, + orders::read_order as read_db_order, + }, + number::conversions::big_decimal_to_u256, + sqlx::PgPool, + std::time::Duration, +}; + +/// [`DbRead`] implementation using PostgreSQL. +pub struct Postgres { + pool: PgPool, + lookback_time: Option, +} + +impl Postgres { + pub fn new(pool: PgPool, lookback_time: Option) -> Self { + Self { + pool, + lookback_time, + } + } +} + +impl DbRead for Postgres { + async fn get_refundable_orders( + &self, + block_time: i64, + min_validity_duration: i64, + min_price_deviation: f64, + ) -> Result> { + let mut ex = self.pool.acquire().await?; + refundable_orders( + &mut ex, + block_time, + min_validity_duration, + min_price_deviation, + self.lookback_time, + ) + .await + .context("Error while retrieving the refundable ethflow orders from db") + } + + async fn get_ethflow_order_data(&self, uid: &OrderUid) -> Result { + let mut ex = self + .pool + .acquire() + .await + .with_context(|| format!("acquire connection for uid {uid:?}"))?; + let order = read_db_order(&mut ex, uid) + .await + .with_context(|| format!("read order {uid:?}"))? + .with_context(|| format!("missing order {uid:?}"))?; + let ethflow_order = read_order(&mut ex, uid) + .await + .with_context(|| format!("read ethflow order {uid:?}"))? + .with_context(|| format!("missing ethflow order {uid:?}"))?; + + let receiver = order + .receiver + .with_context(|| format!("order {uid:?} missing receiver"))?; + let sell_amount = big_decimal_to_u256(&order.sell_amount) + .with_context(|| format!("order {uid:?} invalid sell_amount"))?; + let buy_amount = big_decimal_to_u256(&order.buy_amount) + .with_context(|| format!("order {uid:?} invalid buy_amount"))?; + let fee_amount = big_decimal_to_u256(&order.fee_amount) + .with_context(|| format!("order {uid:?} invalid fee_amount"))?; + + Ok(EthFlowOrder::Data { + buyToken: Address::from(order.buy_token.0), + receiver: Address::from(receiver.0), + sellAmount: sell_amount, + buyAmount: buy_amount, + appData: order.app_data.0.into(), + feeAmount: fee_amount, + validTo: ethflow_order.valid_to as u32, + partiallyFillable: order.partially_fillable, + quoteId: 0i64, // quoteId is not important for refunding and will be ignored + }) + } +} diff --git a/crates/refunder/src/infra/mod.rs b/crates/refunder/src/infra/mod.rs new file mode 100644 index 0000000000..fc1819c3cf --- /dev/null +++ b/crates/refunder/src/infra/mod.rs @@ -0,0 +1,6 @@ +//! Production implementations of `DbRead`, `ChainRead`, and `ChainWrite`. + +mod chain; +mod database; + +pub use {chain::AlloyChain, database::Postgres}; diff --git a/crates/refunder/src/lib.rs b/crates/refunder/src/lib.rs index c5a13365d4..92815ec760 100644 --- a/crates/refunder/src/lib.rs +++ b/crates/refunder/src/lib.rs @@ -1,14 +1,24 @@ +//! Automated refunder for expired EthFlow orders. +//! +//! Monitors EthFlow orders and returns ETH to users whose orders expired +//! without filling. Runs every 30 seconds: queries database, validates on-chain +//! status, submits batch refunds. +//! +//! Shares PostgreSQL with orderbook (read-only). Refunds tracked via on-chain +//! events. + pub mod arguments; +pub mod infra; pub mod refund_service; pub mod submitter; +pub mod traits; // Re-export commonly used types for external consumers (e.g., e2e tests) -pub use refund_service::RefundStatus; +pub use traits::RefundStatus; use { crate::arguments::Arguments, alloy::{providers::Provider, signers::local::PrivateKeySigner}, clap::Parser, - contracts::alloy::CoWSwapEthFlow, observe::metrics::LivenessChecking, refund_service::RefundService, sqlx::postgres::PgPoolOptions, @@ -39,7 +49,26 @@ pub async fn start(args: impl Iterator) { } pub async fn run(args: arguments::Arguments) { + // Observability setup + let liveness = Arc::new(Liveness { + last_successful_loop: RwLock::new(Instant::now()), + }); + observe::metrics::serve_metrics( + liveness.clone(), + ([0, 0, 0, 0], args.metrics_port).into(), + Default::default(), + Default::default(), + ); + + // Database initialization + let pg_pool = PgPoolOptions::new() + .max_connections(args.database_pool.db_max_connections.get()) + .connect_lazy(args.db_url.as_str()) + .expect("failed to create database"); + + // Blockchain/RPC setup let web3 = shared::ethrpc::web3(&args.ethrpc, &args.node_url, "base"); + if let Some(expected_chain_id) = args.chain_id { let chain_id = web3 .alloy @@ -52,45 +81,31 @@ pub async fn run(args: arguments::Arguments) { ); } - let pg_pool = PgPoolOptions::new() - .max_connections(args.database_pool.db_max_connections.get()) - .connect_lazy(args.db_url.as_str()) - .expect("failed to create database"); + // Signer configuration + let signer = args + .refunder_pk + .parse::() + .expect("couldn't parse refunder private key"); - let liveness = Arc::new(Liveness { - // Program will be healthy at the start even if no loop was ran yet. - last_successful_loop: RwLock::new(Instant::now()), - }); - observe::metrics::serve_metrics( - liveness.clone(), - ([0, 0, 0, 0], args.metrics_port).into(), - Default::default(), - Default::default(), - ); + // Service construction + let min_validity_duration = + i64::try_from(args.min_validity_duration.as_secs()).unwrap_or(i64::MAX); - let ethflow_contracts = args - .ethflow_contracts - .iter() - .map(|contract| CoWSwapEthFlow::Instance::new(*contract, web3.alloy.clone())) - .collect(); - let refunder_account = Box::new( - args.refunder_pk - .parse::() - .expect("couldn't parse refunder private key"), - ); - let mut refunder = RefundService::new( + let mut refunder = RefundService::from_components( pg_pool, web3, - ethflow_contracts, - i64::try_from(args.min_validity_duration.as_secs()).unwrap_or(i64::MAX), + args.ethflow_contracts, + min_validity_duration, args.min_price_deviation_bps, - refunder_account, + signer, args.max_gas_price, args.start_priority_fee_tip, Some(args.lookback_time), ); + + // Main loop loop { - tracing::info!("Staring a new refunding loop"); + tracing::info!("Starting a new refunding loop"); match refunder.try_to_refund_all_eligible_orders().await { Ok(_) => { track_refunding_loop_result("success"); diff --git a/crates/refunder/src/refund_service.rs b/crates/refunder/src/refund_service.rs index 2c7924d6da..ca1e087589 100644 --- a/crates/refunder/src/refund_service.rs +++ b/crates/refunder/src/refund_service.rs @@ -1,271 +1,194 @@ +//! Refund eligibility checks and batch submission. + use { - crate::submitter::Submitter, - alloy::{ - network::TxSigner, - primitives::{Address, B256, Signature}, - providers::Provider, - rpc::types::TransactionRequest, - }, - anyhow::{Context, Result, anyhow}, - contracts::alloy::CoWSwapEthFlow, - database::{ - OrderUid, - ethflow_orders::{EthOrderPlacement, read_order, refundable_orders}, - orders::read_order as read_db_order, + crate::{ + infra::{AlloyChain, Postgres}, + submitter::Submitter, + traits::{ChainRead, ChainWrite, DbRead, RefundStatus}, }, - ethrpc::{Web3, block_stream::timestamp_of_current_block_in_seconds}, + alloy::primitives::{Address, B256}, + anyhow::Result, + database::{OrderUid, ethflow_orders::EthOrderPlacement}, futures::{StreamExt, stream}, - number::conversions::big_decimal_to_u256, - shared::gas_price_estimation::eth_node::NodeGasPriceEstimator, - sqlx::PgPool, - std::{collections::HashMap, time::Duration}, + std::collections::HashMap, }; -pub const NO_OWNER: Address = Address::ZERO; -pub const INVALIDATED_OWNER: Address = Address::repeat_byte(0xff); const MAX_NUMBER_OF_UIDS_PER_REFUND_TX: usize = 30; +const BASIS_POINT_DENOMINATOR: f64 = 10_000.0; type CoWSwapEthFlowAddress = Address; -pub struct RefundService { - pub db: PgPool, - pub web3: Web3, - pub ethflow_contracts: Vec, - pub min_validity_duration: i64, - pub min_price_deviation: f64, - pub submitter: Submitter, - pub max_gas_price: u64, - pub start_priority_fee_tip: u64, - pub lookback_time: Option, +/// Extracts the EthFlow contract address from an order if it's in the +/// allowlist. +/// +/// Returns `None` (with a warning log) if the contract is not enabled for +/// refunds. +fn extract_known_ethflow_address( + eth_order_placement: &EthOrderPlacement, + known_ethflow_addresses: &[Address], +) -> Option
{ + let ethflow_contract_address = Address::from_slice(ð_order_placement.uid.0[32..52]); + if known_ethflow_addresses.contains(ðflow_contract_address) { + Some(ethflow_contract_address) + } else { + tracing::warn!( + uid = const_hex::encode_prefixed(eth_order_placement.uid.0), + ethflow = ?ethflow_contract_address, + "refunding orders from specific contract is not enabled", + ); + None + } } -/// Status of an EthFlow order refund eligibility. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum RefundStatus { - /// Order has already been refunded or cancelled. - Refunded, - /// Order is still active and eligible for refund, with the given owner - /// address. - NotYetRefunded(Address), - /// Order is invalid (never created, already freed, or owner cannot receive - /// ETH). - Invalid, +/// Queries on-chain refund status for a single order. +/// +/// Returns `None` if the status query fails. Marks orders as `Invalid` if +/// the owner cannot receive ETH. +async fn query_order_refund_status( + chain: &C, + eth_order_placement: &EthOrderPlacement, + ethflow_contract_address: Address, +) -> Option<(OrderUid, RefundStatus, Address)> { + use RefundStatus::*; + let order_hash: [u8; 32] = eth_order_placement.uid.0[0..32] + .try_into() + .expect("order_uid slice with incorrect length"); + + let status = match chain + .get_order_status(ethflow_contract_address, B256::from(order_hash)) + .await + { + Err(err) => { + tracing::error!( + uid =? B256::from(order_hash), + ?err, + "Error while getting the current onchain status of the order" + ); + return None; + } + Ok(NotYetRefunded(owner)) if !chain.can_receive_eth(owner).await => { + tracing::warn!( + uid = const_hex::encode_prefixed(eth_order_placement.uid.0), + ?owner, + "Order owner cannot receive ETH - marking as invalid" + ); + Invalid + } + Ok(other) => other, + }; + + Some((eth_order_placement.uid, status, ethflow_contract_address)) } -impl From for RefundStatus { - fn from(value: CoWSwapEthFlow::CoWSwapEthFlow::ordersReturn) -> Self { - match value.owner { - NO_OWNER => Self::Invalid, - INVALIDATED_OWNER => Self::Refunded, - owner => Self::NotYetRefunded(owner), +/// Filters `refundable_order_uids` by on-chain status, returning only orders +/// that need refund, grouped by EthFlow contract. +/// +/// Excludes orders from unknown contracts, failed status queries, already +/// refunded orders, and owners that can't receive ETH. +async fn identify_uids_refunding_status( + chain: &C, + refundable_order_uids: &[EthOrderPlacement], +) -> HashMap> { + let known_ethflow_addresses = chain.ethflow_addresses(); + + let futures = refundable_order_uids + .iter() + .filter_map(|eth_order_placement| { + let address = + extract_known_ethflow_address(eth_order_placement, &known_ethflow_addresses)?; + Some((eth_order_placement, address)) + }) + .map(|(eth_order_placement, ethflow_contract_address)| { + query_order_refund_status(chain, eth_order_placement, ethflow_contract_address) + }); + + let uid_with_latest_refundablility = futures::future::join_all(futures).await; + let mut to_be_refunded_uids = HashMap::<_, Vec<_>>::new(); + let mut invalid_uids = Vec::new(); + for (uid, status, contract_address) in uid_with_latest_refundablility.into_iter().flatten() { + match status { + RefundStatus::Refunded => (), + RefundStatus::Invalid => invalid_uids.push(uid), + RefundStatus::NotYetRefunded(_) => { + to_be_refunded_uids + .entry(contract_address) + .or_default() + .push(uid); + } } } + if !invalid_uids.is_empty() { + // In exceptional cases, e.g. if the refunder tries to refund orders from a + // previous contract, the order_owners could be zero, or the owner cannot + // receive ETH (e.g. EOF contracts or contracts with restrictive receive logic) + tracing::warn!( + "Skipping invalid orders (not created in current contract or owner cannot receive \ + ETH). Uids: {:?}", + invalid_uids + ); + } + to_be_refunded_uids } -impl RefundService { - #[allow(clippy::too_many_arguments)] +pub struct RefundService +where + D: DbRead, + CR: ChainRead, + CW: ChainWrite, +{ + pub database: D, + pub chain: CR, + pub submitter: CW, + pub min_validity_duration: i64, + pub min_price_deviation_bps: i64, +} + +impl RefundService +where + D: DbRead, + CR: ChainRead, + CW: ChainWrite, +{ pub fn new( - db: PgPool, - web3: Web3, - ethflow_contracts: Vec, + database: D, + chain: CR, + submitter: CW, min_validity_duration: i64, min_price_deviation_bps: i64, - signer: Box + Send + Sync + 'static>, - max_gas_price: u64, - start_priority_fee_tip: u64, - lookback_time: Option, ) -> Self { - let signer_address = signer.address(); - let gas_estimator = Box::new(NodeGasPriceEstimator::new(web3.alloy.clone())); - web3.wallet.register_signer(signer); RefundService { - db, - web3: web3.clone(), - ethflow_contracts, + database, + chain, + submitter, min_validity_duration, - min_price_deviation: min_price_deviation_bps as f64 / 10000f64, - max_gas_price, - start_priority_fee_tip, - submitter: Submitter { - web3, - signer_address, - gas_estimator, - gas_parameters_of_last_tx: None, - nonce_of_last_submission: None, - max_gas_price, - start_priority_fee_tip, - }, - lookback_time, + min_price_deviation_bps, } } + /// Fetches refundable orders from DB, validates on-chain, and submits batch + /// refunds. Individual failures are logged and skipped. pub async fn try_to_refund_all_eligible_orders(&mut self) -> Result<()> { let refundable_order_uids = self.get_refundable_ethflow_orders_from_db().await?; - let to_be_refunded_uids = self - .identify_uids_refunding_status_via_web3_calls(refundable_order_uids) - .await; + let to_be_refunded_uids = + identify_uids_refunding_status(&self.chain, &refundable_order_uids).await; self.send_out_refunding_tx(to_be_refunded_uids).await?; Ok(()) } + /// Fetches expired EthFlow orders that haven't been refunded, invalidated, + /// or filled. pub async fn get_refundable_ethflow_orders_from_db(&self) -> Result> { - let block_time = timestamp_of_current_block_in_seconds(&self.web3.alloy).await? as i64; - - let mut ex = self.db.acquire().await?; - refundable_orders( - &mut ex, - block_time, - self.min_validity_duration, - self.min_price_deviation, - self.lookback_time, - ) - .await - .map_err(|err| { - anyhow!( - "Error while retrieving the refundable ethflow orders from db: {:?}", - err - ) - }) - } + let block_time = self.chain.current_block_timestamp().await? as i64; - /// Checks if an address can receive ETH by simulating a small transfer. - /// Returns true for EOAs and contracts with working receive/fallback - /// functions. - async fn can_receive_eth(&self, address: Address) -> bool { - // Try to estimate gas for sending a minimal amount of ETH - let tx = TransactionRequest::default() - .to(address) - .value(alloy::primitives::U256::from(1)); - - self.web3 - .alloy - .estimate_gas(tx) - .await - .inspect_err(|err| { - tracing::warn!( - ?address, - ?err, - "Address cannot receive ETH - will skip refund" - ); - }) - .is_ok() - } - - async fn identify_uids_refunding_status_via_web3_calls( - &self, - refundable_order_uids: Vec, - ) -> HashMap> { - let futures = refundable_order_uids - .into_iter() - .filter_map(|eth_order_placement| { - // Owner of the ethflow order is always the ethflow contract itself - let ethflow_contract_address = - Address::from_slice(ð_order_placement.uid.0[32..52]); - let ethflow_contract = self - .ethflow_contracts - .iter() - .find(|contract| *contract.address() == ethflow_contract_address); - if ethflow_contract.is_none() { - tracing::warn!( - uid = const_hex::encode_prefixed(eth_order_placement.uid.0), - ethflow = ?ethflow_contract_address, - "refunding orders from specific contract is not enabled", - ); - } - ethflow_contract.map(|contract| (eth_order_placement, contract)) - }) - .map(|(eth_order_placement, ethflow_contract)| async move { - let order_hash: [u8; 32] = eth_order_placement.uid.0[0..32] - .try_into() - .expect("order_uid slice with incorrect length"); - let order = ethflow_contract.orders(order_hash.into()).call().await; - let order_owner = match order { - Ok(order) => order.owner, - Err(err) => { - tracing::error!( - uid =? B256::from(order_hash), - ?err, - "Error while getting the current onchain status ot the order" - ); - return None; - } - }; - let refund_status = if order_owner == INVALIDATED_OWNER { - RefundStatus::Refunded - } else if order_owner == NO_OWNER { - RefundStatus::Invalid - } else if !self.can_receive_eth(order_owner).await { - tracing::warn!( - uid = const_hex::encode_prefixed(eth_order_placement.uid.0), - owner = ?order_owner, - "Order owner cannot receive ETH - marking as invalid" - ); - RefundStatus::Invalid - } else { - RefundStatus::NotYetRefunded(order_owner) - }; - - Some((eth_order_placement.uid, refund_status, ethflow_contract)) - }); - - let uid_with_latest_refundablility = futures::future::join_all(futures).await; - let mut to_be_refunded_uids = HashMap::<_, Vec<_>>::new(); - let mut invalid_uids = Vec::new(); - for (uid, refund_status, ethflow_contract) in - uid_with_latest_refundablility.into_iter().flatten() - { - match refund_status { - RefundStatus::Refunded => (), - RefundStatus::Invalid => invalid_uids.push(uid), - RefundStatus::NotYetRefunded(_) => { - to_be_refunded_uids - .entry(*ethflow_contract.address()) - .or_default() - .push(uid); - } - } - } - if !invalid_uids.is_empty() { - // In exceptional cases, e.g. if the refunder tries to refund orders from a - // previous contract, the order_owners could be zero, or the owner cannot - // receive ETH (e.g. EOF contracts or contracts with restrictive receive logic) - tracing::warn!( - "Skipping invalid orders (not created in current contract or owner cannot receive \ - ETH). Uids: {:?}", - invalid_uids - ); - } - to_be_refunded_uids - } - - async fn get_ethflow_data_from_db( - &self, - uid: &OrderUid, - ) -> Result { - let mut ex = self.db.acquire().await.context("acquire")?; - let order = read_db_order(&mut ex, uid) - .await - .context("read order")? - .context("missing order")?; - let ethflow_order = read_order(&mut ex, uid) + self.database + .get_refundable_orders( + block_time, + self.min_validity_duration, + self.min_price_deviation_bps as f64 / BASIS_POINT_DENOMINATOR, + ) .await - .context("read ethflow order")? - .context("missing ethflow order")?; - - Ok(CoWSwapEthFlow::EthFlowOrder::Data { - buyToken: Address::from(order.buy_token.0), - // ethflow orders have always a receiver. It's enforced by the contract. - receiver: Address::from(order.receiver.unwrap().0), - sellAmount: big_decimal_to_u256(&order.sell_amount).unwrap(), - buyAmount: big_decimal_to_u256(&order.buy_amount).unwrap(), - appData: order.app_data.0.into(), - feeAmount: big_decimal_to_u256(&order.fee_amount).unwrap(), - validTo: ethflow_order.valid_to as u32, - partiallyFillable: order.partially_fillable, - quoteId: 0i64, // quoteId is not important for refunding and will be ignored - }) } async fn send_out_refunding_tx( @@ -285,13 +208,8 @@ impl RefundService { tracing::debug!("Trying to refund the following uids: {:?}", uids); let futures = uids.iter().map(|uid| { - let (uid, self_) = (*uid, &self); - async move { - self_ - .get_ethflow_data_from_db(&uid) - .await - .context(format!("uid {uid:?}")) - } + let (uid, database) = (*uid, &self.database); + async move { database.get_ethflow_order_data(&uid).await } }); let encoded_ethflow_orders: Vec<_> = stream::iter(futures) .buffer_unordered(10) @@ -303,7 +221,7 @@ impl RefundService { .collect() .await; self.submitter - .submit(uids, encoded_ethflow_orders, contract) + .submit_batch(&uids, encoded_ethflow_orders, contract) .await?; } @@ -311,37 +229,804 @@ impl RefundService { } } +impl RefundService { + /// Creates a `RefundService` from its (concrete) components. + #[allow(clippy::too_many_arguments)] + pub fn from_components( + db: sqlx::PgPool, + web3: ethrpc::Web3, + ethflow_addresses: Vec
, + min_validity_duration: i64, + min_price_deviation_bps: i64, + signer: alloy::signers::local::PrivateKeySigner, + max_gas_price: u64, + start_priority_fee_tip: u64, + lookback_time: Option, + ) -> Self { + // Database layer + let database = Postgres::new(db, lookback_time); + + // Chain reader + let chain = AlloyChain::new(web3.alloy.clone(), ethflow_addresses); + + // Signer/wallet configuration + let signer_address = signer.address(); + web3.wallet.register_signer(signer); + + // Transaction submitter + let gas_estimator = Box::new( + shared::gas_price_estimation::eth_node::NodeGasPriceEstimator::new(web3.alloy.clone()), + ); + let submitter = Submitter { + web3, + signer_address, + gas_estimator, + gas_parameters_of_last_tx: None, + nonce_of_last_submission: None, + max_gas_price, + start_priority_fee_tip, + }; + + Self::new( + database, + chain, + submitter, + min_validity_duration, + min_price_deviation_bps, + ) + } +} + #[cfg(test)] mod tests { use { super::*, - alloy::primitives::address, - shared::gas_price_estimation::eth_node::NodeGasPriceEstimator, + crate::traits::{ + MockChainRead, + MockChainWrite, + MockDbRead, + test::{MockChainReadExt, MockDbReadExt}, + }, + alloy::primitives::Address, + anyhow::anyhow, + contracts::alloy::CoWSwapEthFlow::EthFlowOrder, + database::{byte_array::ByteArray, ethflow_orders::EthOrderPlacement}, + rand::random, + rstest::rstest, + std::collections::{HashMap, HashSet}, }; - /// Creates a minimal RefundService for testing purposes. - fn new_test_service(web3: Web3) -> RefundService { - RefundService { - db: PgPool::connect_lazy("postgresql://").unwrap(), - web3: web3.clone(), - ethflow_contracts: vec![], - min_validity_duration: 0, - min_price_deviation: 0.0, - max_gas_price: 0, - start_priority_fee_tip: 0, - submitter: Submitter { - web3: web3.clone(), - signer_address: Address::ZERO, - gas_estimator: Box::new(NodeGasPriceEstimator::new(web3.alloy.clone())), - gas_parameters_of_last_tx: None, - nonce_of_last_submission: None, - max_gas_price: 0, - start_priority_fee_tip: 0, - }, - lookback_time: None, + /// Test addresses with semantic meaning for filtering logic. + pub const KNOWN_ETHFLOW: Address = Address::repeat_byte(0x11); + pub const KNOWN_ETHFLOW_2: Address = Address::repeat_byte(0x22); + pub const EOA_OWNER: Address = Address::repeat_byte(0x44); + pub const CONTRACT_REJECTING_ETH: Address = Address::repeat_byte(0x55); + pub const UNKNOWN_ETHFLOW: Address = Address::repeat_byte(0x66); + + /// Asserts the expected number of orders per contract in a grouped result. + /// + /// # Panics + /// Panics if the number of orders for the specified contract does not match + /// the expected count, or if the set of UID suffixes differs. + #[track_caller] + fn assert_orders_by_contract( + result: &HashMap>, + contract: Address, + expected_uid_suffixes: &[u8], + ) { + // Retrieve the order list for the contract (empty slice if missing) + let orders = result.get(&contract).map(|v| v.as_slice()).unwrap_or(&[]); + + // Verify the count + let actual_count = orders.len(); + let expected_count = expected_uid_suffixes.len(); + assert_eq!( + actual_count, expected_count, + "Expected {expected_count} orders for contract {contract}, got {actual_count}" + ); + + // Verify the UID suffixes (order‑independent) + let actual_suffixes: HashSet = orders.iter().map(|uid| uid.0[31]).collect(); + let expected_suffixes: HashSet = expected_uid_suffixes.iter().copied().collect(); + + assert_eq!( + actual_suffixes, expected_suffixes, + "Order uid_suffixes mismatch for contract {contract}" + ); + } + + /// Builds an `EthOrderPlacement` with the given contract address embedded + /// in the UID. + /// + /// # UID Structure (56 bytes total) + /// + /// The CoW Protocol Order UID has the following layout: + /// - Bytes 0-31: Order hash (keccak256 of order data) + /// - Bytes 32-51: Contract address (EthFlow contract that created the + /// order) + /// - Bytes 52-55: Valid-to timestamp (big-endian u32) + /// + /// This function creates a test UID where: + /// - `uid_suffix` is placed at byte 31 (end of order hash) for easy + /// identification + /// - `contract_addr` occupies bytes 32-52 to simulate the EthFlow contract + /// address + /// + /// # Arguments + /// - `uid_suffix`: A byte value placed at position 31 to distinguish test + /// orders + /// - `contract_addr`: The EthFlow contract address to embed in bytes 32-52 + fn create_test_order_placement(uid_suffix: u8, contract_addr: Address) -> EthOrderPlacement { + let mut uid_bytes = [0u8; 56]; + uid_bytes[31] = uid_suffix; + uid_bytes[32..52].copy_from_slice(contract_addr.as_slice()); + EthOrderPlacement { + uid: ByteArray(uid_bytes), + valid_to: 1000, } } + // Tests + + /// Orders with owners that cannot receive ETH are filtered out. + #[rstest] + #[case::eoa_can_receive_eth(EOA_OWNER, true)] + #[case::contract_rejects_eth(CONTRACT_REJECTING_ETH, false)] + #[tokio::test] + async fn test_eth_receivability_filtering(#[case] owner: Address, #[case] can_receive: bool) { + let mut mock_chain = MockChainRead::new(); + + mock_chain + // Configure the known EthFlow contracts so orders from KNOWN_ETHFLOW pass the + // allowlist check + .with_ethflow_addresses(vec![KNOWN_ETHFLOW]) + // All orders report as "not yet refunded" with the parameterized owner address + // This sets up the precondition for testing the ETH receivability check + .with_order_status(RefundStatus::NotYetRefunded(owner)); + + // Return parameterized ETH receivability result to test both EOA (can receive) + // and contract-rejecting-ETH (cannot receive) scenarios + mock_chain + .expect_can_receive_eth() + .withf(move |addr| *addr == owner) + .returning(move |_| can_receive); + + let order = create_test_order_placement(1, KNOWN_ETHFLOW); + let result = identify_uids_refunding_status(&mock_chain, &[order]).await; + + let expected_orders: &[u8] = if can_receive { &[1] } else { &[] }; + assert_orders_by_contract(&result, KNOWN_ETHFLOW, expected_orders); + } + + /// Orders from unknown EthFlow contracts are filtered out. + #[rstest] + #[case::known_contract_included(KNOWN_ETHFLOW)] + #[case::unknown_contract_filtered(UNKNOWN_ETHFLOW)] + #[tokio::test] + async fn test_ethflow_contract_filtering(#[case] contract: Address) { + let mut mock_chain = MockChainRead::new(); + mock_chain + .with_ethflow_addresses(vec![KNOWN_ETHFLOW]) + .with_order_status(RefundStatus::NotYetRefunded(EOA_OWNER)) + .receiving_eth(); + + let order = create_test_order_placement(1, contract); + let result = identify_uids_refunding_status(&mock_chain, &[order]).await; + + let expected_suffixes: &[u8] = if contract == KNOWN_ETHFLOW { &[1] } else { &[] }; + assert_orders_by_contract(&result, contract, expected_suffixes); + } + + /// Orders with non-refundable status or status query errors are excluded. + #[rstest] + #[case::already_refunded(Some(RefundStatus::Refunded))] + #[case::invalid_order(Some(RefundStatus::Invalid))] + #[case::status_query_error(None)] + #[tokio::test] + async fn test_non_refundable_status_excludes_order(#[case] status: Option) { + let mut mock_chain = MockChainRead::new(); + + // Allow the order through the allowlist check + mock_chain.with_ethflow_addresses(vec![KNOWN_ETHFLOW]); + + // Return the parameterized status (Refunded, Invalid, or Error) to verify + // that orders with non-refundable statuses are excluded from the result + mock_chain + .expect_get_order_status() + .returning(move |_, _| status.ok_or(anyhow!("RPC error"))); + + let order = create_test_order_placement(1, KNOWN_ETHFLOW); + let result = identify_uids_refunding_status(&mock_chain, &[order]).await; + + assert!(result.is_empty()); + } + + /// When all orders are already refunded on-chain, the result is empty. + /// + /// This verifies the edge case where no orders need refund, which should + /// result in an empty map (and thus no submission). + #[tokio::test] + async fn test_all_orders_already_refunded() { + let mut mock_chain = MockChainRead::new(); + mock_chain + .with_ethflow_addresses(vec![KNOWN_ETHFLOW]) + .with_order_status(RefundStatus::Refunded); + + let orders: Vec<_> = (1..=5) + .map(|i| create_test_order_placement(i, KNOWN_ETHFLOW)) + .collect(); + + let result = identify_uids_refunding_status(&mock_chain, &orders).await; + + assert!( + result.is_empty(), + "All orders were refunded; result should be empty" + ); + } + + /// A single order is forwarded to the submitter with correct arguments. + #[tokio::test] + async fn test_send_out_refunding_tx_calls_submitter() { + let order = create_test_order_placement(1, KNOWN_ETHFLOW); + let uid = order.uid; + + let mut mock_db = MockDbRead::new(); + mock_db.with_default_ethflow_order_data(); + + let mock_chain = MockChainRead::new(); + + let mut mock_submitter = MockChainWrite::new(); + mock_submitter + .expect_submit_batch() + .times(1) + .withf(|uids, orders, contract| { + uids.len() == 1 && orders.len() == 1 && *contract == KNOWN_ETHFLOW + }) + .returning(|_, _, _| Ok(())); + + let mut service = RefundService::new(mock_db, mock_chain, mock_submitter, 3600, 100); + + let mut uids_by_contract = HashMap::new(); + uids_by_contract.insert(KNOWN_ETHFLOW, vec![uid]); + + let result = service.send_out_refunding_tx(uids_by_contract).await; + assert!(result.is_ok()); + } + + /// Empty order map does not trigger any submission. + #[tokio::test] + async fn test_send_out_refunding_tx_empty_map_skips_submission() { + // No expectations needed for DB or chain because empty input short-circuits + // before any DB and chain calls + let mock_db = MockDbRead::new(); + let mock_chain = MockChainRead::new(); + + // Submitter has no expectations: test will fail if submit is called, + // verifying that empty input correctly skips submission + let mock_submitter = MockChainWrite::new(); + + let mut service = RefundService::new(mock_db, mock_chain, mock_submitter, 3600, 100); + + let result = service.send_out_refunding_tx(HashMap::new()).await; + assert!(result.is_ok()); + } + + /// Orders are capped at `MAX_NUMBER_OF_UIDS_PER_REFUND_TX` per contract. + #[rstest] + #[case::below_max_no_truncation(29, 29)] + #[case::at_max_no_truncation( + MAX_NUMBER_OF_UIDS_PER_REFUND_TX, + MAX_NUMBER_OF_UIDS_PER_REFUND_TX + )] + #[case::one_above_max_truncates(31, MAX_NUMBER_OF_UIDS_PER_REFUND_TX)] + #[case::above_max_truncates(35, MAX_NUMBER_OF_UIDS_PER_REFUND_TX)] + #[case::double_max_truncates(60, MAX_NUMBER_OF_UIDS_PER_REFUND_TX)] + #[tokio::test] + async fn test_send_out_refunding_tx_order_count_boundary( + #[case] input_count: usize, + #[case] expected_count: usize, + ) { + let mut mock_db = MockDbRead::new(); + mock_db.with_default_ethflow_order_data(); + + let mock_chain = MockChainRead::new(); + + let mut mock_submitter = MockChainWrite::new(); + mock_submitter + .expect_submit_batch() + .times(1) + .withf(move |uids, orders, _| { + uids.len() == expected_count && orders.len() == expected_count + }) + .returning(|_, _, _| Ok(())); + + let mut service = RefundService::new(mock_db, mock_chain, mock_submitter, 3600, 100); + + let mut uids_by_contract = HashMap::new(); + let uids = (0..input_count as u8) + .map(|i| create_test_order_placement(i, KNOWN_ETHFLOW).uid) + .collect(); + uids_by_contract.insert(KNOWN_ETHFLOW, uids); + + let result = service.send_out_refunding_tx(uids_by_contract).await; + assert!(result.is_ok()); + } + + /// Orders from multiple contracts trigger separate submissions. + #[tokio::test] + async fn test_send_out_refunding_tx_multiple_contracts() { + let mut mock_db = MockDbRead::new(); + mock_db.with_default_ethflow_order_data(); + + let mock_chain = MockChainRead::new(); + + let mut mock_submitter = MockChainWrite::new(); + + // Expect exactly 2 submissions because orders are grouped by contract, + // and each contract gets its own refund transaction + mock_submitter + .expect_submit_batch() + .times(2) + .returning(|_, _, _| Ok(())); + + let mut service = RefundService::new(mock_db, mock_chain, mock_submitter, 3600, 100); + + let uid1 = create_test_order_placement(1, KNOWN_ETHFLOW).uid; + let uid2 = create_test_order_placement(2, KNOWN_ETHFLOW_2).uid; + let mut uids_by_contract = HashMap::new(); + uids_by_contract.insert(KNOWN_ETHFLOW, vec![uid1]); + uids_by_contract.insert(KNOWN_ETHFLOW_2, vec![uid2]); + + let result = service.send_out_refunding_tx(uids_by_contract).await; + assert!(result.is_ok()); + } + + /// DB errors for individual orders are skipped; other orders proceed. + /// + /// # Current Behavior (documented, not necessarily ideal) + /// + /// When a DB lookup fails for an order: + /// - The error is logged and the order data is excluded from the submission + /// - However, the UID is still included in the submission + /// + /// This means `submit` receives: + /// - `uids`: ALL original UIDs (including those with failed lookups) + /// - `orders`: Only the order data for successful lookups + /// + /// This creates a mismatch between UIDs and order data. See the TODO in + /// `test_send_out_refunding_tx_all_db_calls_fail_still_submits` for + /// discussion of potential fixes. + #[tokio::test] + async fn test_send_out_refunding_tx_db_error_skips_order() { + let uid1 = create_test_order_placement(1, KNOWN_ETHFLOW).uid; + let uid2 = create_test_order_placement(2, KNOWN_ETHFLOW).uid; + + let mut mock_db = MockDbRead::new(); + + // First order (uid_suffix=1) fails DB lookup to test error handling + mock_db + .expect_get_ethflow_order_data() + .withf(|uid| uid.0[31] == 1) + .returning(|_| Err(anyhow!("DB error"))); + + // Second order (uid_suffix=2) succeeds to verify partial success behavior + mock_db + .expect_get_ethflow_order_data() + .withf(|uid| uid.0[31] == 2) + .returning(|_| Ok(EthFlowOrder::Data::default())); + + let mock_chain = MockChainRead::new(); + + let mut mock_submitter = MockChainWrite::new(); + + // Current behavior: ALL UIDs are passed, but only successful order data. + // - uids contains both uid1 (suffix=1) and uid2 (suffix=2) + // - orders contains only 1 entry (from uid2's successful lookup) + mock_submitter + .expect_submit_batch() + .times(1) + .withf(|uids, orders, _| { + let has_both_uids = uids.len() == 2 + && uids.iter().any(|uid| uid.0[31] == 1) + && uids.iter().any(|uid| uid.0[31] == 2); + let has_one_order = orders.len() == 1; + has_both_uids && has_one_order + }) + .returning(|_, _, _| Ok(())); + + let mut service = RefundService::new(mock_db, mock_chain, mock_submitter, 3600, 100); + + let mut uids_by_contract = HashMap::new(); + uids_by_contract.insert(KNOWN_ETHFLOW, vec![uid1, uid2]); + + let result = service.send_out_refunding_tx(uids_by_contract).await; + assert!(result.is_ok()); + } + + /// If every DB lookup fails, we still call the submitter with the original + /// UIDs but without any order data. + /// + /// What actually happens: + /// - Each failed order‑data fetch is logged and ignored (it doesn't stop + /// the whole batch). + /// - The submitter gets the same list of UIDs we started with, but the + /// `orders` slice may be empty (or contain fewer entries) because some or + /// all lookups failed. + /// + /// TODO: Is this the behavior we really want? Submitting a refund that + /// contains UIDs but no order details feels off. Possible fixes: + /// 1. Skip the submission entirely when `encoded_ethflow_orders` is empty. + /// 2. Return an error if *all* order‑data lookups fail. + /// 3. Filter the UID list so it only includes IDs with successful lookups. + /// + /// NOTE: This test complements + /// `test_send_out_refunding_tx_db_error_skips_order`. That test covers + /// partial DB failure (some lookups succeed); this one covers + /// total DB failure (all lookups fail). Together they verify that DB errors + /// are non-fatal and UIDs are always preserved regardless of lookup + /// success. + #[tokio::test] + async fn test_send_out_refunding_tx_all_db_calls_fail_still_submits() { + let uid1 = create_test_order_placement(1, KNOWN_ETHFLOW).uid; + let uid2 = create_test_order_placement(2, KNOWN_ETHFLOW).uid; + + let mut mock_db = MockDbRead::new(); + + // All DB lookups fail to test edge case where no order data is available + mock_db + .expect_get_ethflow_order_data() + .returning(|_| Err(anyhow!("DB connection lost"))); + + let mock_chain = MockChainRead::new(); + + let mut mock_submitter = MockChainWrite::new(); + + // Verify submission still happens with original UIDs but empty orders list + // This documents current (possibly unintended) behavior where UIDs and orders + // mismatch + mock_submitter + .expect_submit_batch() + .times(1) + .withf(|uids, orders, contract| { + // UIDs are preserved, but orders is empty because all DB lookups failed + uids.len() == 2 && orders.is_empty() && *contract == KNOWN_ETHFLOW + }) + .returning(|_, _, _| Ok(())); + + let mut service = RefundService::new(mock_db, mock_chain, mock_submitter, 3600, 100); + + let mut uids_by_contract = HashMap::new(); + uids_by_contract.insert(KNOWN_ETHFLOW, vec![uid1, uid2]); + + let result = service.send_out_refunding_tx(uids_by_contract).await; + assert!(result.is_ok()); + } + + /// Submitter error on first contract short-circuits; remaining contracts + /// are not attempted. + /// + /// NOTE: HashMap iteration order is non-deterministic, so we cannot predict + /// which contract will be processed first. This test verifies that: + /// 1. The error propagates (result is Err) + /// 2. Only one submission is attempted (times(1)) + /// + /// The test remains valid regardless of iteration order because both + /// contracts would fail with the same error. + #[tokio::test] + async fn test_send_out_refunding_tx_error_short_circuits() { + let mut mock_db = MockDbRead::new(); + // Return order data successfully; the error will come from submission + mock_db.with_default_ethflow_order_data(); + + let mock_chain = MockChainRead::new(); + + let mut mock_submitter = MockChainWrite::new(); + + // Fail on first submission to verify error propagation stops processing + // Due to HashMap's non-deterministic iteration order, we cannot predict + // which contract will be attempted first, but we know only one will be tried + mock_submitter + .expect_submit_batch() + .times(1) + .returning(|_, _, _| Err(anyhow!("Submission failed"))); + + let mut service = RefundService::new(mock_db, mock_chain, mock_submitter, 3600, 100); + + let uid1 = create_test_order_placement(1, KNOWN_ETHFLOW).uid; + let uid2 = create_test_order_placement(2, KNOWN_ETHFLOW_2).uid; + let mut uids_by_contract = HashMap::new(); + uids_by_contract.insert(KNOWN_ETHFLOW, vec![uid1]); + uids_by_contract.insert(KNOWN_ETHFLOW_2, vec![uid2]); + + let result = service.send_out_refunding_tx(uids_by_contract).await; + assert!(result.is_err()); + } + + /// An eligible order is fetched, validated, and submitted for refund. + #[tokio::test] + async fn test_try_to_refund_happy_path() { + let mut mock_db = MockDbRead::new(); + let order = vec![create_test_order_placement(1, KNOWN_ETHFLOW)]; + mock_db.with_refundable_orders(order); + + mock_db.with_default_ethflow_order_data(); + + let mut mock_chain = MockChainRead::new(); + mock_chain + .with_block_timestamp(1000) + .with_ethflow_addresses(vec![KNOWN_ETHFLOW]) + .with_order_status(RefundStatus::NotYetRefunded(EOA_OWNER)) + .receiving_eth(); + + let mut mock_submitter = MockChainWrite::new(); + mock_submitter + .expect_submit_batch() + .times(1) + .returning(|_, _, _| Ok(())); + + let mut service = RefundService::new(mock_db, mock_chain, mock_submitter, 3600, 100); + + let result = service.try_to_refund_all_eligible_orders().await; + assert!(result.is_ok()); + } + + /// Empty database result does not trigger any submission. + #[tokio::test] + async fn test_try_to_refund_empty_db_result() { + let mut mock_db = MockDbRead::new(); + mock_db.with_refundable_orders(vec![]); + + let mut mock_chain = MockChainRead::new(); + mock_chain + .with_block_timestamp(1000) + .with_ethflow_addresses(vec![KNOWN_ETHFLOW]); + + // Submitter has no expectations: test fails if submit is called + let mock_submitter = MockChainWrite::new(); + + let mut service = RefundService::new(mock_db, mock_chain, mock_submitter, 3600, 100); + + let result = service.try_to_refund_all_eligible_orders().await; + assert!(result.is_ok()); + } + + /// When some orders are already refunded on-chain, only pending orders are + /// submitted. + #[tokio::test] + async fn test_try_to_refund_mixed_orders() { + let order_valid = create_test_order_placement(1, KNOWN_ETHFLOW); + let order_refunded = create_test_order_placement(2, KNOWN_ETHFLOW); + + let mut mock_db = MockDbRead::new(); + + // Return two orders from DB: one still needs refund, one already refunded + // on-chain + mock_db.with_refundable_orders(vec![order_valid, order_refunded]); + + // Return order data for the order that passes on-chain validation + mock_db.with_default_ethflow_order_data(); + + let mut mock_chain = MockChainRead::new(); + + // Block timestamp and known contracts for DB query + mock_chain + .with_block_timestamp(1000) + .with_ethflow_addresses(vec![KNOWN_ETHFLOW]) + .receiving_eth(); + + // Order 1 (uid_suffix=1) is eligible for refund + mock_chain + .expect_get_order_status() + .withf(|_, order_hash| order_hash.0[31] == 1) + .returning(|_, _| Ok(RefundStatus::NotYetRefunded(EOA_OWNER))); + + // Order 2 (uid_suffix=2) was already refunded on-chain, should be filtered out + mock_chain + .expect_get_order_status() + .withf(|_, order_hash| order_hash.0[31] == 2) + .returning(|_, _| Ok(RefundStatus::Refunded)); + + let mut mock_submitter = MockChainWrite::new(); + + // Only 1 order should be submitted (order 2 is filtered out as already + // refunded) + mock_submitter + .expect_submit_batch() + .times(1) + .withf(|uids, _, _| uids.len() == 1) + .returning(|_, _, _| Ok(())); + + let mut service = RefundService::new(mock_db, mock_chain, mock_submitter, 3600, 100); + + let result = service.try_to_refund_all_eligible_orders().await; + assert!(result.is_ok()); + } + + /// Orders are grouped by their originating EthFlow contract. + #[tokio::test] + async fn test_identify_groups_orders_by_contract() { + let order1 = create_test_order_placement(1, KNOWN_ETHFLOW); + let order2 = create_test_order_placement(2, KNOWN_ETHFLOW); + let order3 = create_test_order_placement(3, KNOWN_ETHFLOW_2); + + let mut mock_chain = MockChainRead::new(); + mock_chain + .with_ethflow_addresses(vec![KNOWN_ETHFLOW, KNOWN_ETHFLOW_2]) + .with_order_status(RefundStatus::NotYetRefunded(EOA_OWNER)) + .receiving_eth(); + + let result = identify_uids_refunding_status(&mock_chain, &[order1, order2, order3]).await; + + assert_eq!(result.len(), 2); + assert_orders_by_contract(&result, KNOWN_ETHFLOW, &[1, 2]); + assert_orders_by_contract(&result, KNOWN_ETHFLOW_2, &[3]); + } + + /// Empty input returns empty result. + #[tokio::test] + async fn test_identify_empty_input() { + let mut mock_chain = MockChainRead::new(); + mock_chain.with_ethflow_addresses(vec![KNOWN_ETHFLOW]); + + let result = identify_uids_refunding_status(&mock_chain, &[]).await; + + assert!(result.is_empty()); + } + + /// Verifies that the order hash is correctly extracted from UID bytes + /// [0..32]. + /// + /// The refund service extracts the order hash from the first 32 bytes of + /// the UID to query on-chain status. + #[tokio::test] + async fn test_order_hash_extraction_from_uid() { + let expected_hash: [u8; 32] = random(); + + let mut mock_chain = MockChainRead::new(); + mock_chain + .with_ethflow_addresses(vec![KNOWN_ETHFLOW]) + .receiving_eth(); + + // Capture the order_hash passed to get_order_status and verify it matches + mock_chain + .expect_get_order_status() + .withf(move |_, order_hash| order_hash.0 == expected_hash) + .returning(|_, _| Ok(RefundStatus::NotYetRefunded(EOA_OWNER))); + + // Build a UID with the expected hash in bytes [0..32] + let mut uid_bytes = [0u8; 56]; + uid_bytes[0..32].copy_from_slice(&expected_hash); + uid_bytes[32..52].copy_from_slice(KNOWN_ETHFLOW.as_slice()); + + let order = EthOrderPlacement { + uid: ByteArray(uid_bytes), + valid_to: 1000, + }; + + let result = identify_uids_refunding_status(&mock_chain, &[order]).await; + + // Verify the result contains our order with the expected hash + let uids = result + .get(&KNOWN_ETHFLOW) + .expect("should have orders for contract"); + assert_eq!(uids.len(), 1); + assert_eq!(&uids[0].0[0..32], &expected_hash); + } + + /// Verifies that the contract address is correctly extracted from UID bytes + /// [32..52]. + /// + /// The refund service extracts the EthFlow contract address from bytes + /// 32-52 to determine which contract the order belongs to. This test + /// ensures the extraction produces the correct Address value. + #[rstest] + #[case::known_ethflow(KNOWN_ETHFLOW)] + #[case::known_ethflow_2(KNOWN_ETHFLOW_2)] + #[case::all_zeros(Address::ZERO)] + #[case::all_ones(Address::repeat_byte(0xFF))] + #[tokio::test] + async fn test_contract_address_extraction_from_uid(#[case] contract: Address) { + let mut mock_chain = MockChainRead::new(); + + // Only allow the specific contract being tested + mock_chain + .with_ethflow_addresses(vec![contract]) + .with_order_status(RefundStatus::NotYetRefunded(EOA_OWNER)) + .receiving_eth(); + + // Build a UID with the contract address in bytes [32..52] + let mut uid_bytes = [0u8; 56]; + uid_bytes[31] = 1; // uid_suffix for identification + uid_bytes[32..52].copy_from_slice(contract.as_slice()); + + let order = EthOrderPlacement { + uid: ByteArray(uid_bytes), + valid_to: 1000, + }; + + let result = identify_uids_refunding_status(&mock_chain, &[order]).await; + + // The order should be grouped under the correct contract + assert_orders_by_contract(&result, contract, &[1]); + } + + /// Multiple orders with different owners are filtered based on each owner's + /// ability to receive ETH. + /// + /// This test verifies that the ETH receivability check is performed + /// per-owner, not globally. Orders from owners that can receive ETH are + /// included; orders from owners that cannot receive ETH are excluded. + #[tokio::test] + async fn test_mixed_eth_receivability_multiple_owners() { + // Define owners with different ETH receivability + const OWNER_CAN_RECEIVE_1: Address = Address::repeat_byte(0xA1); + const OWNER_CAN_RECEIVE_2: Address = Address::repeat_byte(0xA2); + const OWNER_CANNOT_RECEIVE_1: Address = Address::repeat_byte(0xB1); + const OWNER_CANNOT_RECEIVE_2: Address = Address::repeat_byte(0xB2); + + let mut mock_chain = MockChainRead::new(); + mock_chain.with_ethflow_addresses(vec![KNOWN_ETHFLOW]); + + // Each order has a different owner + mock_chain + .expect_get_order_status() + .returning(|_, order_hash| { + let owner = match order_hash.0[31] { + 1 => OWNER_CAN_RECEIVE_1, + 2 => OWNER_CANNOT_RECEIVE_1, + 3 => OWNER_CAN_RECEIVE_2, + 4 => OWNER_CANNOT_RECEIVE_2, + _ => unreachable!(), + }; + Ok(RefundStatus::NotYetRefunded(owner)) + }); + + // ETH receivability depends on owner address + mock_chain + .expect_can_receive_eth() + .returning(|owner| [OWNER_CAN_RECEIVE_1, OWNER_CAN_RECEIVE_2].contains(&owner)); + + let orders: Vec<_> = (1..=4) + .map(|i| create_test_order_placement(i, KNOWN_ETHFLOW)) + .collect(); + + let result = identify_uids_refunding_status(&mock_chain, &orders).await; + + // Only orders 1 and 3 (owners that can receive ETH) should be included + assert_orders_by_contract(&result, KNOWN_ETHFLOW, &[1, 3]); + } + + /// When multiple status queries fail, all failed orders are excluded. + #[tokio::test] + async fn test_multiple_status_query_failures() { + // Orders 1 and 2 fail with RPC errors to test partial failure handling + // Order 3 succeeds to verify successful orders are still processed + let order1 = create_test_order_placement(1, KNOWN_ETHFLOW); + let order2 = create_test_order_placement(2, KNOWN_ETHFLOW); + let order3 = create_test_order_placement(3, KNOWN_ETHFLOW); + + let mut mock_chain = MockChainRead::new(); + + // All orders pass the allowlist check + mock_chain + .with_ethflow_addresses(vec![KNOWN_ETHFLOW]) + .receiving_eth() // Owner can receive ETH (only relevant for order 3 which passes status check) + + .expect_get_order_status() + .returning(|_, order_hash| { + match order_hash.0[31] { + 1 | 2 => Err(anyhow!("RPC timeout")), + 3 => Ok(RefundStatus::NotYetRefunded(EOA_OWNER)), + _ => unreachable!() + } + }); + + let result = identify_uids_refunding_status(&mock_chain, &[order1, order2, order3]).await; + + // Only order 3 should be included (orders 1 and 2 failed status check) + assert_orders_by_contract(&result, KNOWN_ETHFLOW, &[3]); + } + /// Verifies that `can_receive_eth()` correctly identifies addresses that /// cannot receive ETH transfers. Some smart contracts reject ETH transfers /// (e.g., EOF contracts or contracts without receive/fallback functions), @@ -352,24 +1037,34 @@ mod tests { #[tokio::test] #[ignore] // Run with: cargo test --package refunder --lib test_problematic_sepolia_address -- --ignored async fn test_problematic_sepolia_address() { - let web3 = Web3::new_from_url("https://ethereum-sepolia-rpc.publicnode.com"); - let service = new_test_service(web3); + use crate::infra::AlloyChain; + + let (provider, _wallet) = ethrpc::alloy::provider( + "https://ethereum-sepolia-rpc.publicnode.com", + ethrpc::Config::default(), + None, + ); + let chain = AlloyChain::new(provider, vec![]); // EOF contract that cannot receive ETH (0xef01... bytecode prefix) - let problematic = address!("0x66C9152339ce05EE0C8A8eff9EeF8230AbFe8350"); + let problematic: Address = "0x66C9152339ce05EE0C8A8eff9EeF8230AbFe8350" + .parse() + .unwrap(); // Normal EOA for comparison - let working = address!("0x5b485e4431853F82d89dba68220A422CC17cE024"); + let working: Address = "0x5b485e4431853F82d89dba68220A422CC17cE024" + .parse() + .unwrap(); // Test that can_receive_eth correctly identifies the problematic address assert!( - !service.can_receive_eth(problematic).await, + !chain.can_receive_eth(problematic).await, "EOF contract should be identified as unable to receive ETH" ); // Test that can_receive_eth correctly identifies a working address assert!( - service.can_receive_eth(working).await, + chain.can_receive_eth(working).await, "Normal EOA should be identified as able to receive ETH" ); } diff --git a/crates/refunder/src/submitter.rs b/crates/refunder/src/submitter.rs index 73d8a074af..6d6075d304 100644 --- a/crates/refunder/src/submitter.rs +++ b/crates/refunder/src/submitter.rs @@ -1,14 +1,12 @@ -// This submitter has the following logic: -// It tries to submit a tx - as EIP1559 - with a small tx tip, -// but a quite high max_fee_per_gas such that it's likely being mined quickly -// -// Then it waits for 5 blocks. If the tx is not mined, it will return an error -// and it needs to be called again. If the last submission was not successful, -// this submitter stores the last gas_price in order to submit the new tx with -// a higher gas price, in order to avoid: ErrReplaceUnderpriced erros -// In the re-newed attempt for submission the same nonce is used as before. +//! Refund transaction submitter. +//! +//! Submits EIP-1559 transactions with a small tip but high `max_fee_per_gas` +//! to get mined quickly. Waits 5 blocks for confirmation; on timeout, the next +//! call bumps gas price (using the same nonce) to avoid +//! `ErrReplaceUnderpriced`. use { + crate::traits::ChainWrite, alloy::{eips::eip1559::Eip1559Estimation, primitives::Address, providers::Provider}, anyhow::{Context, Result}, contracts::alloy::CoWSwapEthFlow::{self, EthFlowOrder}, @@ -30,6 +28,8 @@ const GAS_PRICE_BUFFER_PCT: u64 = 30; // max_fee_per_gas needs to be increased by at least 10 percent. const GAS_PRICE_BUMP_PERMIL: u64 = 125; +const TIMEOUT_5_BLOCKS: Duration = Duration::from_secs(60); + pub struct Submitter { pub web3: Web3, pub signer_address: Address, @@ -40,47 +40,31 @@ pub struct Submitter { pub start_priority_fee_tip: u64, } -impl Submitter { - async fn get_submission_nonce(&self) -> Result { - // this command returns the tx count ever mined at the latest block - // Mempool tx are not considered. - self.web3 - .alloy - .get_transaction_count(self.signer_address) - .await - .with_context(|| { - format!( - "could not get latest nonce for address {:?}", - self.signer_address - ) - }) - } - - pub async fn submit( +impl ChainWrite for Submitter { + async fn submit_batch( &mut self, - uids: Vec, + uids: &[OrderUid], encoded_ethflow_orders: Vec, ethflow_contract: Address, ) -> Result<()> { - const TIMEOUT_5_BLOCKS: Duration = Duration::from_secs(60); - - let gas_price_estimation = self.gas_estimator.estimate().await?; - let nonce = self.get_submission_nonce().await?; - let gas_price = calculate_submission_gas_price( - self.gas_parameters_of_last_tx, - gas_price_estimation, - nonce, - self.nonce_of_last_submission, - self.max_gas_price, - self.start_priority_fee_tip, - )?; - - self.gas_parameters_of_last_tx = Some(gas_price); - self.nonce_of_last_submission = Some(nonce); - - let ethflow_contract = - CoWSwapEthFlow::Instance::new(ethflow_contract, self.web3.alloy.clone()); - let tx_result = ethflow_contract + { + let gas_price_estimation = self.gas_estimator.estimate().await?; + let nonce = self.get_submission_nonce().await?; + let gas_price = calculate_submission_gas_price( + self.gas_parameters_of_last_tx, + gas_price_estimation, + nonce, + self.nonce_of_last_submission, + self.max_gas_price, + self.start_priority_fee_tip, + )?; + + self.gas_parameters_of_last_tx = Some(gas_price); + self.nonce_of_last_submission = Some(nonce); + + let ethflow_contract = + CoWSwapEthFlow::Instance::new(ethflow_contract, self.web3.alloy.clone()); + let tx_result = ethflow_contract .invalidateOrdersIgnoringNotAllowed(encoded_ethflow_orders) // Gas conversions are lossy but technically the should not have decimal points even though they're floats .max_priority_fee_per_gas(gas_price.max_priority_fee_per_gas) @@ -90,17 +74,35 @@ impl Submitter { .send() .await?.with_timeout(Some(TIMEOUT_5_BLOCKS)).get_receipt().await; - match tx_result { - Ok(receipt) => { - tracing::debug!( - "Tx to refund the orderuids {:?} yielded following result {:?}", - uids, - receipt - ); + match tx_result { + Ok(receipt) => { + tracing::debug!( + "Tx to refund the orderuids {:?} yielded following result {:?}", + uids, + receipt + ); + } + Err(err) => tracing::debug!("transaction failed with: {err}"), } - Err(err) => tracing::debug!("transaction failed with: {err}"), + Ok(()) } - Ok(()) + } +} + +impl Submitter { + async fn get_submission_nonce(&self) -> Result { + // this command returns the tx count ever mined at the latest block + // Mempool tx are not considered. + self.web3 + .alloy + .get_transaction_count(self.signer_address) + .await + .with_context(|| { + format!( + "could not get latest nonce for address {:?}", + self.signer_address + ) + }) } } diff --git a/crates/refunder/src/traits.rs b/crates/refunder/src/traits.rs new file mode 100644 index 0000000000..197e9837dc --- /dev/null +++ b/crates/refunder/src/traits.rs @@ -0,0 +1,142 @@ +//! Trait definitions for database and blockchain access. + +#![allow(async_fn_in_trait)] + +use { + alloy::primitives::{Address, B256}, + anyhow::Result, + contracts::alloy::CoWSwapEthFlow::{self, EthFlowOrder}, + database::{OrderUid, ethflow_orders::EthOrderPlacement}, +}; + +const NO_OWNER: Address = Address::ZERO; +const INVALIDATED_OWNER: Address = Address::repeat_byte(0xff); + +/// Status of an EthFlow order refund eligibility. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum RefundStatus { + /// Order has already been refunded or cancelled. + Refunded, + /// Order is still active and eligible for refund, with the given owner + /// address. + NotYetRefunded(Address), + /// Order is invalid (never created, already freed, or owner cannot receive + /// ETH). + Invalid, +} + +impl From for RefundStatus { + fn from(value: CoWSwapEthFlow::CoWSwapEthFlow::ordersReturn) -> Self { + match value.owner { + NO_OWNER => Self::Invalid, + INVALIDATED_OWNER => Self::Refunded, + owner => Self::NotYetRefunded(owner), + } + } +} + +/// Database read operations. +#[cfg_attr(test, mockall::automock)] +pub trait DbRead: Send + Sync { + /// Fetches orders eligible for refund (expired, not invalidated, not + /// filled, meets price deviation threshold). + async fn get_refundable_orders( + &self, + block_time: i64, + min_validity_duration: i64, + min_price_deviation: f64, + ) -> Result>; + + /// Fetches the EthFlow order data for `uid`. + async fn get_ethflow_order_data(&self, uid: &OrderUid) -> Result; +} + +/// Blockchain read operations. +#[cfg_attr(test, mockall::automock)] +pub trait ChainRead: Send + Sync { + /// Returns the current block's timestamp. + async fn current_block_timestamp(&self) -> Result; + + /// Returns `true` if `address` can receive ETH + async fn can_receive_eth(&self, address: Address) -> bool; + + /// Returns the configured EthFlow contract addresses. + fn ethflow_addresses(&self) -> Vec
; + + /// Queries the on-chain refund status of an order. + async fn get_order_status( + &self, + ethflow_address: Address, + order_hash: B256, + ) -> Result; +} + +/// Blockchain write operations. +#[cfg_attr(test, mockall::automock)] +pub trait ChainWrite: Send + Sync { + /// Submits a batch refund transaction. + async fn submit_batch( + &mut self, + uids: &[OrderUid], + encoded_ethflow_orders: Vec, + ethflow_contract: Address, + ) -> Result<()>; +} + +#[cfg(test)] +pub mod test { + use super::*; + + /// Extension trait for `MockChainRead` to reduce mock setup boilerplate. + pub trait MockChainReadExt { + fn with_block_timestamp(&mut self, timestamp: u32) -> &mut Self; + fn with_ethflow_addresses(&mut self, addresses: Vec
) -> &mut Self; + fn with_order_status(&mut self, status: RefundStatus) -> &mut Self; + fn receiving_eth(&mut self) -> &mut Self; + } + + impl MockChainReadExt for MockChainRead { + fn with_block_timestamp(&mut self, timestamp: u32) -> &mut Self { + self.expect_current_block_timestamp() + .returning(move || Ok(timestamp)); + self + } + + fn with_ethflow_addresses(&mut self, addresses: Vec
) -> &mut Self { + self.expect_ethflow_addresses() + .returning(move || addresses.clone()); + self + } + + fn with_order_status(&mut self, status: RefundStatus) -> &mut Self { + self.expect_get_order_status() + .returning(move |_, _| Ok(status)); + self + } + + fn receiving_eth(&mut self) -> &mut Self { + self.expect_can_receive_eth().returning(|_| true); + self + } + } + + /// Extension trait for `MockDbRead` to reduce mock setup boilerplate. + pub trait MockDbReadExt { + fn with_default_ethflow_order_data(&mut self) -> &mut Self; + fn with_refundable_orders(&mut self, orders: Vec) -> &mut Self; + } + + impl MockDbReadExt for MockDbRead { + fn with_default_ethflow_order_data(&mut self) -> &mut Self { + self.expect_get_ethflow_order_data() + .returning(|_| Ok(EthFlowOrder::Data::default())); + self + } + + fn with_refundable_orders(&mut self, orders: Vec) -> &mut Self { + self.expect_get_refundable_orders() + .returning(move |_, _, _| Ok(orders.clone())); + self + } + } +} From 838fafee62ea30c48de6d8c5e27156c611f66289 Mon Sep 17 00:00:00 2001 From: Anuj Bansal Date: Tue, 3 Feb 2026 20:35:40 +0530 Subject: [PATCH 029/219] Fix tini zombie reaping with shared process namespace (#4114) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary Adds the `-s` (subreaper) flag to tini in the Dockerfile entrypoint to fix zombie process reaping when `shareProcessNamespace: true` is set in Kubernetes deployments. ## Problem Our Kubernetes deployments use `shareProcessNamespace: true` to allow sidecar containers (like the memory monitor) to access `/proc` of the main process. However, this causes the following warning: ``` [WARN tini (82)] Tini is not running as PID 1 and isn't registered as a child subreaper. Zombie processes will not be re-parented to Tini, so zombie reaping won't work. To fix the problem, use the -s option or set the environment variable TINI_SUBREAPER to register Tini as a child subreaper, or run Tini as PID 1. ``` When `shareProcessNamespace` is enabled, Kubernetes' pause container becomes PID 1 instead of tini: ``` PID 1: pause (Kubernetes infrastructure) ├── tini -- autopilot │ └── autopilot └── /bin/sh -c (memory-monitor sidecar) ``` Without PID 1 status, tini cannot reap zombie (orphaned) child processes by default. ## Solution The `-s` flag tells tini to register as a **child subreaper** via the `PR_SET_CHILD_SUBREAPER` prctl. This Linux kernel feature allows a non-PID-1 process to adopt and reap orphaned descendant processes, restoring proper zombie cleanup. --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 96fc535b91..40a74e5626 100644 --- a/Dockerfile +++ b/Dockerfile @@ -61,4 +61,4 @@ COPY --from=cargo-build /orderbook /usr/local/bin/orderbook COPY --from=cargo-build /refunder /usr/local/bin/refunder COPY --from=cargo-build /solvers /usr/local/bin/solvers -ENTRYPOINT ["/usr/bin/tini", "--"] +ENTRYPOINT ["/usr/bin/tini", "-s", "--"] From e239bd38a12fc013d3bc8fd12f5067e9841c30a6 Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Tue, 3 Feb 2026 17:59:59 +0100 Subject: [PATCH 030/219] [TRIVIAL] Fix vulnerability by bumping bytes crate version (#4121) # Description The `cargo audit` action complained about the `bytes` crate being vulnerable. The recommended fix is to upgrade `bytes` to version `1.11.1` (patch version bump). ## How to test `cargo audit` action --- Cargo.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ffdab7f40b..8dd5ec4ef7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1929,9 +1929,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" +checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" dependencies = [ "serde", ] @@ -2538,7 +2538,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ab67060fc6b8ef687992d439ca0fa36e7ed17e9a0b16b25b601e8757df720de" dependencies = [ "data-encoding", - "syn 2.0.114", + "syn 1.0.109", ] [[package]] From be8ec55a3ff9ae54fb477bc85853042775b68dd2 Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Tue, 3 Feb 2026 18:00:15 +0100 Subject: [PATCH 031/219] [TRIVIAL] Fix verbose log (#4120) # Description There was a slight oversight in https://github.com/cowprotocol/services/pull/4084. Instead of printing only the solver name we now print all the internals which is quite a lot. # Changes * only log the solver name again * stop logging the weth address as well --- crates/driver/src/domain/competition/solution/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/driver/src/domain/competition/solution/mod.rs b/crates/driver/src/domain/competition/solution/mod.rs index 46404cc955..01bdac1626 100644 --- a/crates/driver/src/domain/competition/solution/mod.rs +++ b/crates/driver/src/domain/competition/solution/mod.rs @@ -62,7 +62,9 @@ pub struct Solution { interactions: Vec, #[debug(ignore)] post_interactions: Vec, + #[debug("{}", solver.name())] solver: Solver, + #[debug(ignore)] weth: eth::WethAddress, gas: Option, flashloans: HashMap, From 993e2d426f6ec1347757410b8cdb3453af2d63d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Duarte?= Date: Tue, 3 Feb 2026 17:56:56 +0000 Subject: [PATCH 032/219] [TRIVIAL] Remove unused error (#4124) # Description Removes an unused error, this could actually have made it in #4106 but VSCode looked like an Xmas tree, I wasn't expecting this to be this simple. # Changes - [ ] Removes unused error - [ ] Removes accompanying From --- crates/driver/src/infra/blockchain/contracts.rs | 7 ------- crates/driver/src/infra/blockchain/mod.rs | 8 -------- 2 files changed, 15 deletions(-) diff --git a/crates/driver/src/infra/blockchain/contracts.rs b/crates/driver/src/infra/blockchain/contracts.rs index 7ca0287e3f..3ef81c4b03 100644 --- a/crates/driver/src/infra/blockchain/contracts.rs +++ b/crates/driver/src/infra/blockchain/contracts.rs @@ -10,7 +10,6 @@ use { }, ethrpc::Web3, std::collections::HashMap, - thiserror::Error, }; #[derive(Debug, Clone)] @@ -164,9 +163,3 @@ impl Contracts { &self.cow_amm_helper_by_factory } } - -#[derive(Debug, Error)] -pub enum Error { - #[error("method error: {0:?}")] - Rpc(#[from] alloy::contract::Error), -} diff --git a/crates/driver/src/infra/blockchain/mod.rs b/crates/driver/src/infra/blockchain/mod.rs index 1e1bb52890..32abce8509 100644 --- a/crates/driver/src/infra/blockchain/mod.rs +++ b/crates/driver/src/infra/blockchain/mod.rs @@ -347,14 +347,6 @@ impl Error { } } -impl From for Error { - fn from(err: contracts::Error) -> Self { - match err { - contracts::Error::Rpc(err) => Self::ContractRpc(err), - } - } -} - impl From for Error { fn from(err: SimulationError) -> Self { match err { From fe6e19a124aa2036e937c6e7bca7206ce17f59e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Duarte?= Date: Tue, 3 Feb 2026 17:57:08 +0000 Subject: [PATCH 033/219] [TRIVIAL] Rename Web3's DynProvider alloy -> provider (#4123) # Description Renames alloy to provider. Done in a separate PR from the removal due to the number of changes # Changes - [ ] `alloy` -> `provider` --- .../ethflow_events/event_retriever.rs | 2 +- .../onchain_order_events/ethflow_events.rs | 6 +- .../onchain_order_events/event_retriever.rs | 2 +- .../src/database/onchain_order_events/mod.rs | 10 ++- .../src/infra/blockchain/contracts.rs | 14 ++-- crates/autopilot/src/infra/blockchain/mod.rs | 10 +-- crates/autopilot/src/run.rs | 18 ++-- crates/cow-amm/src/factory.rs | 2 +- crates/cow-amm/src/maintainers.rs | 2 +- crates/cow-amm/src/registry.rs | 4 +- .../src/boundary/liquidity/balancer/v2/mod.rs | 15 ++-- crates/driver/src/boundary/liquidity/mod.rs | 2 +- .../src/boundary/liquidity/uniswap/v2.rs | 2 +- .../driver/src/boundary/liquidity/zeroex.rs | 2 +- .../src/domain/competition/pre_processing.rs | 2 +- .../driver/src/infra/blockchain/contracts.rs | 12 +-- crates/driver/src/infra/blockchain/gas.rs | 4 +- crates/driver/src/infra/blockchain/mod.rs | 14 ++-- crates/driver/src/infra/blockchain/token.rs | 2 +- crates/driver/src/infra/mempool/mod.rs | 6 +- crates/driver/src/tests/cases/settle.rs | 2 +- crates/driver/src/tests/setup/blockchain.rs | 82 ++++++++++--------- crates/driver/src/tests/setup/mod.rs | 5 +- crates/driver/src/tests/setup/solver.rs | 2 +- crates/e2e/src/setup/deploy.rs | 70 ++++++++-------- .../e2e/src/setup/onchain_components/mod.rs | 25 +++--- crates/e2e/src/setup/services.rs | 2 +- crates/e2e/tests/e2e/app_data_signer.rs | 2 +- crates/e2e/tests/e2e/banned_users.rs | 8 +- crates/e2e/tests/e2e/cow_amm.rs | 44 +++++----- crates/e2e/tests/e2e/eth_integration.rs | 6 +- crates/e2e/tests/e2e/eth_safe.rs | 4 +- crates/e2e/tests/e2e/ethflow.rs | 12 +-- crates/e2e/tests/e2e/hooks.rs | 18 ++-- crates/e2e/tests/e2e/limit_orders.rs | 12 +-- crates/e2e/tests/e2e/liquidity.rs | 14 ++-- .../e2e/liquidity_source_notification.rs | 12 +-- .../e2e/tests/e2e/place_order_with_quote.rs | 4 +- crates/e2e/tests/e2e/quote_verification.rs | 12 +-- crates/e2e/tests/e2e/refunder.rs | 8 +- crates/e2e/tests/e2e/replace_order.rs | 4 +- crates/e2e/tests/e2e/smart_contract_orders.rs | 4 +- crates/e2e/tests/e2e/submission.rs | 24 +++--- crates/e2e/tests/e2e/wrapper.rs | 8 +- crates/ethrpc/src/alloy/instrumentation.rs | 6 +- crates/ethrpc/src/block_stream.rs | 8 +- crates/ethrpc/src/lib.rs | 12 ++- crates/ethrpc/src/mock.rs | 4 +- crates/orderbook/src/run.rs | 30 +++---- crates/refunder/src/lib.rs | 2 +- crates/refunder/src/refund_service.rs | 6 +- crates/refunder/src/submitter.rs | 4 +- .../shared/src/account_balances/simulation.rs | 10 +-- .../src/bad_token/token_owner_finder/mod.rs | 2 +- crates/shared/src/bad_token/trace_call.rs | 14 ++-- crates/shared/src/code_fetching.rs | 2 +- crates/shared/src/event_handling.rs | 46 ++++++----- crates/shared/src/gas_price_estimation/mod.rs | 8 +- crates/shared/src/order_validation.rs | 29 +++---- .../balance_overrides/detector.rs | 4 +- .../price_estimation/trade_verifier/mod.rs | 4 +- .../src/signature_validator/simulation.rs | 2 +- .../sources/balancer_v2/pool_fetching/mod.rs | 2 +- .../src/sources/balancer_v2/pools/common.rs | 4 +- .../src/sources/balancer_v2/pools/weighted.rs | 4 +- crates/shared/src/sources/swapr.rs | 5 +- crates/shared/src/sources/uniswap_v2/mod.rs | 14 ++-- .../src/sources/uniswap_v2/pool_fetching.rs | 6 +- .../src/sources/uniswap_v3/pool_fetching.rs | 2 +- crates/shared/src/token_info.rs | 2 +- crates/shared/src/trace_many.rs | 2 +- crates/solver/src/interactions/allowances.rs | 4 +- crates/solver/src/interactions/weth.rs | 14 ++-- crates/solver/src/liquidity/balancer_v2.rs | 2 +- crates/solver/src/liquidity/zeroex.rs | 4 +- .../src/settlement/settlement_encoder.rs | 10 +-- crates/solvers/src/domain/solver.rs | 2 +- 77 files changed, 413 insertions(+), 372 deletions(-) diff --git a/crates/autopilot/src/database/ethflow_events/event_retriever.rs b/crates/autopilot/src/database/ethflow_events/event_retriever.rs index aa7ae2e83a..b1ed267ad2 100644 --- a/crates/autopilot/src/database/ethflow_events/event_retriever.rs +++ b/crates/autopilot/src/database/ethflow_events/event_retriever.rs @@ -30,7 +30,7 @@ impl AlloyEventRetrieving for EthFlowRefundRetriever { type Event = CoWSwapEthFlow::CoWSwapEthFlowEvents; fn provider(&self) -> &alloy::providers::DynProvider { - &self.web3.alloy + &self.web3.provider } fn filter(&self) -> alloy::rpc::types::Filter { diff --git a/crates/autopilot/src/database/onchain_order_events/ethflow_events.rs b/crates/autopilot/src/database/onchain_order_events/ethflow_events.rs index 9dd67203b7..ab68bb1d4f 100644 --- a/crates/autopilot/src/database/onchain_order_events/ethflow_events.rs +++ b/crates/autopilot/src/database/onchain_order_events/ethflow_events.rs @@ -262,7 +262,7 @@ async fn find_indexing_start_block( if last_indexed_block > 0 { return block_number_to_block_number_hash( - &web3.alloy, + &web3.provider, BlockNumberOrTag::Number(last_indexed_block), ) .await @@ -271,7 +271,7 @@ async fn find_indexing_start_block( } if let Some(start_block) = fallback_start_block { return block_number_to_block_number_hash( - &web3.alloy, + &web3.provider, BlockNumberOrTag::Number(start_block), ) .await @@ -279,7 +279,7 @@ async fn find_indexing_start_block( .context("failed to fetch fallback indexing start block"); } if let Some(chain_id) = settlement_fallback_chain_id { - return settlement_deployment_block_number_hash(&web3.alloy, chain_id) + return settlement_deployment_block_number_hash(&web3.provider, chain_id) .await .map(Some) .context("failed to fetch settlement deployment block"); diff --git a/crates/autopilot/src/database/onchain_order_events/event_retriever.rs b/crates/autopilot/src/database/onchain_order_events/event_retriever.rs index bde093e170..a56e10fe68 100644 --- a/crates/autopilot/src/database/onchain_order_events/event_retriever.rs +++ b/crates/autopilot/src/database/onchain_order_events/event_retriever.rs @@ -43,6 +43,6 @@ impl AlloyEventRetrieving for CoWSwapOnchainOrdersContract { } fn provider(&self) -> &alloy::providers::DynProvider { - &self.web3.alloy + &self.web3.provider } } diff --git a/crates/autopilot/src/database/onchain_order_events/mod.rs b/crates/autopilot/src/database/onchain_order_events/mod.rs index bf4d352aff..dfa02e5700 100644 --- a/crates/autopilot/src/database/onchain_order_events/mod.rs +++ b/crates/autopilot/src/database/onchain_order_events/mod.rs @@ -393,9 +393,11 @@ async fn get_block_numbers_of_events( let futures = event_block_numbers .into_iter() .map(|block_number| async move { - let timestamp = - timestamp_of_block_in_seconds(&web3.alloy, BlockNumberOrTag::Number(block_number)) - .await?; + let timestamp = timestamp_of_block_in_seconds( + &web3.provider, + BlockNumberOrTag::Number(block_number), + ) + .await?; Ok((block_number, timestamp)) }); let block_number_timestamp_pair: Vec> = @@ -1250,7 +1252,7 @@ mod test { pool: PgPool::connect_lazy("postgresql://").unwrap(), config: Default::default(), }, - trampoline: HooksTrampoline::Instance::deployed(&web3.alloy) + trampoline: HooksTrampoline::Instance::deployed(&web3.provider) .await .unwrap(), web3, diff --git a/crates/autopilot/src/infra/blockchain/contracts.rs b/crates/autopilot/src/infra/blockchain/contracts.rs index afbebbea14..3409a444d2 100644 --- a/crates/autopilot/src/infra/blockchain/contracts.rs +++ b/crates/autopilot/src/infra/blockchain/contracts.rs @@ -45,7 +45,7 @@ impl Contracts { .settlement .or_else(|| GPv2Settlement::deployment_address(&chain.id())) .unwrap(), - web3.alloy.clone(), + web3.provider.clone(), ); let signatures = contracts::alloy::support::Signatures::Instance::new( @@ -53,7 +53,7 @@ impl Contracts { .signatures .or_else(|| contracts::alloy::support::Signatures::deployment_address(&chain.id())) .unwrap(), - web3.alloy.clone(), + web3.provider.clone(), ); let weth = WETH9::Instance::new( @@ -61,7 +61,7 @@ impl Contracts { .weth .or_else(|| WETH9::deployment_address(&chain.id())) .unwrap(), - web3.alloy.clone(), + web3.provider.clone(), ); let balances = Balances::Instance::new( @@ -69,7 +69,7 @@ impl Contracts { .balances .or_else(|| Balances::deployment_address(&chain.id())) .unwrap(), - web3.alloy.clone(), + web3.provider.clone(), ); let trampoline = HooksTrampoline::Instance::new( @@ -77,10 +77,10 @@ impl Contracts { .trampoline .or_else(|| HooksTrampoline::deployment_address(&chain.id())) .unwrap(), - web3.alloy.clone(), + web3.provider.clone(), ); - let chainalysis_oracle = ChainalysisOracle::Instance::deployed(&web3.alloy) + let chainalysis_oracle = ChainalysisOracle::Instance::deployed(&web3.provider) .await .ok(); @@ -99,7 +99,7 @@ impl Contracts { .call() .await .expect("authenticator address"), - web3.alloy.clone(), + web3.provider.clone(), ); Self { diff --git a/crates/autopilot/src/infra/blockchain/mod.rs b/crates/autopilot/src/infra/blockchain/mod.rs index b771441d45..90248b9ac9 100644 --- a/crates/autopilot/src/infra/blockchain/mod.rs +++ b/crates/autopilot/src/infra/blockchain/mod.rs @@ -32,7 +32,7 @@ impl Rpc { ethrpc_args: &shared::ethrpc::Arguments, ) -> Result { let web3 = boundary::web3_client(url, ethrpc_args); - let chain = Chain::try_from(web3.alloy.get_chain_id().await?) + let chain = Chain::try_from(web3.provider.get_chain_id().await?) .map_err(|_| Error::UnsupportedChain)?; Ok(Self { @@ -87,7 +87,7 @@ impl Ethereum { Self { current_block: current_block_args - .stream(url, unbuffered_web3.alloy.clone()) + .stream(url, unbuffered_web3.provider.clone()) .await .expect("couldn't initialize current block stream"), web3, @@ -113,10 +113,10 @@ impl Ethereum { pub async fn transaction(&self, hash: eth::TxId) -> Result { let (receipt, traces): (Option, GethTrace) = tokio::try_join!( - self.web3.alloy.get_transaction_receipt(hash.0), + self.web3.provider.get_transaction_receipt(hash.0), // Use unbuffered transport for the Debug API since not all providers support // batched debug calls. - self.unbuffered_web3.alloy.debug_trace_transaction( + self.unbuffered_web3.provider.debug_trace_transaction( hash.0, GethDebugTracingOptions::new_tracer(GethDebugBuiltInTracerType::CallTracer), ) @@ -131,7 +131,7 @@ impl Ethereum { )))?; let block = self .web3 - .alloy + .provider .get_block_by_hash(block_hash) .await? .ok_or(Error::TransactionNotFound)?; diff --git a/crates/autopilot/src/run.rs b/crates/autopilot/src/run.rs index 2b0cad10d8..8833155918 100644 --- a/crates/autopilot/src/run.rs +++ b/crates/autopilot/src/run.rs @@ -186,7 +186,7 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { .map(|node_url| shared::ethrpc::web3(&args.shared.ethrpc, node_url, "simulation")); let chain_id = web3 - .alloy + .provider .get_chain_id() .instrument(info_span!("chain_id")) .await @@ -240,9 +240,9 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { addr }); let vault = - vault_address.map(|address| BalancerV2Vault::Instance::new(address, web3.alloy.clone())); + vault_address.map(|address| BalancerV2Vault::Instance::new(address, web3.provider.clone())); - let uniswapv3_factory = IUniswapV3Factory::Instance::deployed(&web3.alloy) + let uniswapv3_factory = IUniswapV3Factory::Instance::deployed(&web3.provider) .instrument(info_span!("uniswapv3_deployed")) .await .inspect_err(|err| tracing::warn!(%err, "error while fetching IUniswapV3Factory instance")) @@ -353,7 +353,7 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { let token_info_fetcher = Arc::new(CachedTokenInfoFetcher::new(Arc::new(TokenInfoFetcher { web3: web3.clone(), }))); - let block_retriever = Arc::new(web3.alloy.clone()); + let block_retriever = Arc::new(web3.provider.clone()); let code_fetcher = Arc::new(CachedCodeFetcher::new(Arc::new(web3.clone()))); @@ -414,7 +414,7 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { let skip_event_sync_start = if args.skip_event_sync { Some( - block_number_to_block_number_hash(&web3.alloy, BlockNumberOrTag::Latest) + block_number_to_block_number_hash(&web3.provider, BlockNumberOrTag::Latest) .await .expect("Failed to fetch latest block"), ) @@ -444,7 +444,7 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { }; let settlement_event_indexer = EventUpdater::new( boundary::events::settlement::GPv2SettlementContract::new( - web3.alloy.clone(), + web3.provider.clone(), *eth.contracts().settlement().address(), ), boundary::events::settlement::Indexer::new( @@ -727,13 +727,13 @@ async fn shadow_mode(args: Arguments) -> ! { .collect(); let web3 = shared::ethrpc::web3(&args.shared.ethrpc, &args.shared.node_url, "base"); - let weth = WETH9::Instance::deployed(&web3.alloy) + let weth = WETH9::Instance::deployed(&web3.provider) .await .expect("couldn't find deployed WETH contract"); let trusted_tokens = { let chain_id = web3 - .alloy + .provider .get_chain_id() .await .expect("Could not get chainId"); @@ -765,7 +765,7 @@ async fn shadow_mode(args: Arguments) -> ! { let current_block = args .shared .current_block - .stream(args.shared.node_url, web3.alloy.clone()) + .stream(args.shared.node_url, web3.provider.clone()) .await .expect("couldn't initialize current block stream"); diff --git a/crates/cow-amm/src/factory.rs b/crates/cow-amm/src/factory.rs index 54f6d7984b..214a3a6f40 100644 --- a/crates/cow-amm/src/factory.rs +++ b/crates/cow-amm/src/factory.rs @@ -30,6 +30,6 @@ impl AlloyEventRetrieving for Factory { } fn provider(&self) -> &DynProvider { - &self.web3.alloy + &self.web3.provider } } diff --git a/crates/cow-amm/src/maintainers.rs b/crates/cow-amm/src/maintainers.rs index fe39199a65..b4c93a2f84 100644 --- a/crates/cow-amm/src/maintainers.rs +++ b/crates/cow-amm/src/maintainers.rs @@ -28,7 +28,7 @@ impl EmptyPoolRemoval { .traded_tokens() .iter() .map(move |token| async move { - ERC20::Instance::new(*token, self.web3.alloy.clone()) + ERC20::Instance::new(*token, self.web3.provider.clone()) .balanceOf(*amm_address) .call() .await diff --git a/crates/cow-amm/src/registry.rs b/crates/cow-amm/src/registry.rs index 864b6ec1cc..1a20c4b58f 100644 --- a/crates/cow-amm/src/registry.rs +++ b/crates/cow-amm/src/registry.rs @@ -44,7 +44,7 @@ impl Registry { ) { let storage = Storage::new( deployment_block, - CowAmmLegacyHelper::Instance::new(helper_contract, self.web3.alloy.clone()), + CowAmmLegacyHelper::Instance::new(helper_contract, self.web3.provider.clone()), factory, db, ) @@ -57,7 +57,7 @@ impl Registry { address: factory, }; let event_handler = - EventHandler::new(Arc::new(self.web3.alloy.clone()), indexer, storage, None); + EventHandler::new(Arc::new(self.web3.provider.clone()), indexer, storage, None); let token_balance_maintainer = EmptyPoolRemoval::new(self.storage.clone(), self.web3.clone()); diff --git a/crates/driver/src/boundary/liquidity/balancer/v2/mod.rs b/crates/driver/src/boundary/liquidity/balancer/v2/mod.rs index 0e61710b0f..72287f6eb2 100644 --- a/crates/driver/src/boundary/liquidity/balancer/v2/mod.rs +++ b/crates/driver/src/boundary/liquidity/balancer/v2/mod.rs @@ -104,7 +104,7 @@ async fn init_liquidity( ) -> Result> { let web3 = eth.web3().clone(); let contracts = BalancerContracts { - vault: BalancerV2Vault::Instance::new(config.vault.0, web3.alloy.clone()), + vault: BalancerV2Vault::Instance::new(config.vault.0, web3.provider.clone()), factories: [ config .weighted @@ -112,7 +112,7 @@ async fn init_liquidity( .map(|&factory| { BalancerFactoryInstance::Weighted(BalancerV2WeightedPoolFactory::Instance::new( factory, - web3.alloy.clone(), + web3.provider.clone(), )) }) .collect::>(), @@ -121,7 +121,10 @@ async fn init_liquidity( .iter() .map(|&factory| { BalancerFactoryInstance::WeightedV3( - BalancerV2WeightedPoolFactoryV3::Instance::new(factory, web3.alloy.clone()), + BalancerV2WeightedPoolFactoryV3::Instance::new( + factory, + web3.provider.clone(), + ), ) }) .collect::>(), @@ -131,7 +134,7 @@ async fn init_liquidity( .map(|&factory| { BalancerFactoryInstance::StableV2(BalancerV2StablePoolFactoryV2::Instance::new( factory, - web3.alloy.clone(), + web3.provider.clone(), )) }) .collect::>(), @@ -142,7 +145,7 @@ async fn init_liquidity( BalancerFactoryInstance::LiquidityBootstrapping( BalancerV2LiquidityBootstrappingPoolFactory::Instance::new( factory, - web3.alloy.clone(), + web3.provider.clone(), ), ) }) @@ -154,7 +157,7 @@ async fn init_liquidity( BalancerFactoryInstance::ComposableStable( BalancerV2ComposableStablePoolFactory::Instance::new( factory, - web3.alloy.clone(), + web3.provider.clone(), ), ) }) diff --git a/crates/driver/src/boundary/liquidity/mod.rs b/crates/driver/src/boundary/liquidity/mod.rs index f34ec43781..0c91256ae9 100644 --- a/crates/driver/src/boundary/liquidity/mod.rs +++ b/crates/driver/src/boundary/liquidity/mod.rs @@ -56,7 +56,7 @@ impl Fetcher { /// Creates a new fetcher for the specified configuration. pub async fn try_new(eth: &Ethereum, config: &infra::liquidity::Config) -> Result { let block_stream = eth.current_block(); - let block_retriever = Arc::new(eth.web3().alloy.clone()); + let block_retriever = Arc::new(eth.web3().provider.clone()); let uni_v2: Vec<_> = future::try_join_all( config diff --git a/crates/driver/src/boundary/liquidity/uniswap/v2.rs b/crates/driver/src/boundary/liquidity/uniswap/v2.rs index 6c2d33d9a6..4d51435a77 100644 --- a/crates/driver/src/boundary/liquidity/uniswap/v2.rs +++ b/crates/driver/src/boundary/liquidity/uniswap/v2.rs @@ -133,7 +133,7 @@ where R: PoolReading + Send + Sync + 'static, F: FnOnce(Web3, PairProvider) -> R, { - let router = IUniswapLikeRouter::Instance::new(config.router.0, eth.web3().alloy.clone()); + let router = IUniswapLikeRouter::Instance::new(config.router.0, eth.web3().provider.clone()); let settlement = eth.contracts().settlement().clone(); let pool_fetcher = { let factory = router.factory().call().await?; diff --git a/crates/driver/src/boundary/liquidity/zeroex.rs b/crates/driver/src/boundary/liquidity/zeroex.rs index 13c15d4c3b..fbc5b44a7e 100644 --- a/crates/driver/src/boundary/liquidity/zeroex.rs +++ b/crates/driver/src/boundary/liquidity/zeroex.rs @@ -87,7 +87,7 @@ pub async fn collector( let eth = eth.with_metric_label("zeroex".into()); let settlement = *eth.contracts().settlement().address(); let web3 = eth.web3().clone(); - let contract = contracts::alloy::IZeroex::Instance::deployed(&web3.alloy).await?; + let contract = contracts::alloy::IZeroex::Instance::deployed(&web3.provider).await?; let http_client_factory = &HttpClientFactory::new(&shared::http_client::Arguments { http_timeout: config.http_timeout, }); diff --git a/crates/driver/src/domain/competition/pre_processing.rs b/crates/driver/src/domain/competition/pre_processing.rs index 99bd4bb178..bc67effeb0 100644 --- a/crates/driver/src/domain/competition/pre_processing.rs +++ b/crates/driver/src/domain/competition/pre_processing.rs @@ -140,7 +140,7 @@ impl DataAggregator { .map(|(factory, helper)| (factory.0, helper.0)) .collect(); let cow_amm_cache = - cow_amm::Cache::new(eth.web3().alloy.clone(), cow_amm_helper_by_factory); + cow_amm::Cache::new(eth.web3().provider.clone(), cow_amm_helper_by_factory); Self { utilities: Arc::new(Utilities { diff --git a/crates/driver/src/infra/blockchain/contracts.rs b/crates/driver/src/infra/blockchain/contracts.rs index 3ef81c4b03..4b19cc1226 100644 --- a/crates/driver/src/infra/blockchain/contracts.rs +++ b/crates/driver/src/infra/blockchain/contracts.rs @@ -57,18 +57,18 @@ impl Contracts { .map(Into::into) .or_else(|| GPv2Settlement::deployment_address(&chain.id())) .unwrap(), - web3.alloy.clone(), + web3.provider.clone(), ); let vault_relayer = settlement.vaultRelayer().call().await?; let vault = - BalancerV2Vault::Instance::new(settlement.vault().call().await?, web3.alloy.clone()); + BalancerV2Vault::Instance::new(settlement.vault().call().await?, web3.provider.clone()); let balance_helper = Balances::Instance::new( addresses .balances .map(Into::into) .or_else(|| Balances::deployment_address(&chain.id())) .unwrap(), - web3.alloy.clone(), + web3.provider.clone(), ); let signatures = contracts::alloy::support::Signatures::Instance::new( addresses @@ -76,7 +76,7 @@ impl Contracts { .map(Into::into) .or_else(|| contracts::alloy::support::Signatures::deployment_address(&chain.id())) .unwrap(), - web3.alloy.clone(), + web3.provider.clone(), ); let weth = WETH9::Instance::new( @@ -85,7 +85,7 @@ impl Contracts { .map(Into::into) .or_else(|| WETH9::deployment_address(&chain.id())) .unwrap(), - web3.alloy.clone(), + web3.provider.clone(), ); let settlement_domain_separator = eth::DomainSeparator( @@ -101,7 +101,7 @@ impl Contracts { let flashloan_router = addresses .flashloan_router .or_else(|| FlashLoanRouter::deployment_address(&chain.id()).map(eth::ContractAddress)) - .map(|address| FlashLoanRouter::Instance::new(address.0, web3.alloy.clone())); + .map(|address| FlashLoanRouter::Instance::new(address.0, web3.provider.clone())); Ok(Self { settlement, diff --git a/crates/driver/src/infra/blockchain/gas.rs b/crates/driver/src/infra/blockchain/gas.rs index 8c8741fdd5..a4d9a48ce3 100644 --- a/crates/driver/src/infra/blockchain/gas.rs +++ b/crates/driver/src/infra/blockchain/gas.rs @@ -37,12 +37,12 @@ impl GasPriceEstimator { mempools: &[mempool::Config], ) -> Result { let gas: Arc = match gas_estimator_type { - GasEstimatorType::Web3 => Arc::new(NodeGasPriceEstimator::new(web3.alloy.clone())), + GasEstimatorType::Web3 => Arc::new(NodeGasPriceEstimator::new(web3.provider.clone())), GasEstimatorType::Alloy { past_blocks, reward_percentile, } => Arc::new(ConfigurableGasPriceEstimator::new( - web3.alloy.clone(), + web3.provider.clone(), EstimatorConfig { past_blocks: *past_blocks, reward_percentile: *reward_percentile, diff --git a/crates/driver/src/infra/blockchain/mod.rs b/crates/driver/src/infra/blockchain/mod.rs index 32abce8509..ecdb544e7a 100644 --- a/crates/driver/src/infra/blockchain/mod.rs +++ b/crates/driver/src/infra/blockchain/mod.rs @@ -54,7 +54,7 @@ impl Rpc { args.max_batch_size, args.max_concurrent_requests, ); - let chain = Chain::try_from(web3.alloy.get_chain_id().await?)?; + let chain = Chain::try_from(web3.provider.get_chain_id().await?)?; Ok(Self { web3, chain, args }) } @@ -112,7 +112,7 @@ impl Ethereum { let Rpc { web3, chain, args } = rpc; let current_block_stream = current_block_args - .stream(args.url.clone(), web3.alloy.clone()) + .stream(args.url.clone(), web3.provider.clone()) .await .expect("couldn't initialize current block stream"); @@ -170,7 +170,7 @@ impl Ethereum { /// Check if a smart contract is deployed to the given address. pub async fn is_contract(&self, address: eth::Address) -> Result { - let code = self.web3.alloy.get_code_at(address).await?; + let code = self.web3.provider.get_code_at(address).await?; Ok(!code.is_empty()) } @@ -197,7 +197,7 @@ impl Ethereum { _ => tx, }; - let access_list = self.web3.alloy.create_access_list(&tx).pending().await?; + let access_list = self.web3.provider.create_access_list(&tx).pending().await?; Ok(access_list .ensure_ok() @@ -222,7 +222,7 @@ impl Ethereum { let estimated_gas = self .web3 - .alloy + .provider .estimate_gas(tx) .pending() .await @@ -246,7 +246,7 @@ impl Ethereum { /// Returns the current [`eth::Ether`] balance of the specified account. pub async fn balance(&self, address: eth::Address) -> Result { self.web3 - .alloy + .provider .get_balance(address) .await .map(Into::into) @@ -261,7 +261,7 @@ impl Ethereum { /// Returns the transaction's on-chain inclusion status. pub async fn transaction_status(&self, tx_hash: ð::TxId) -> Result { self.web3 - .alloy + .provider .get_transaction_receipt(tx_hash.0) .await .map(|result| { diff --git a/crates/driver/src/infra/blockchain/token.rs b/crates/driver/src/infra/blockchain/token.rs index dfa27f223b..1f4aa4227c 100644 --- a/crates/driver/src/infra/blockchain/token.rs +++ b/crates/driver/src/infra/blockchain/token.rs @@ -14,7 +14,7 @@ pub struct Erc20 { impl Erc20 { pub(super) fn new(eth: &Ethereum, address: eth::TokenAddress) -> Self { Self { - token: contracts::alloy::ERC20::Instance::new(address.0.0, eth.web3.alloy.clone()), + token: contracts::alloy::ERC20::Instance::new(address.0.0, eth.web3.provider.clone()), } } diff --git a/crates/driver/src/infra/mempool/mod.rs b/crates/driver/src/infra/mempool/mod.rs index 1b120a7e5c..09ff772e3a 100644 --- a/crates/driver/src/infra/mempool/mod.rs +++ b/crates/driver/src/infra/mempool/mod.rs @@ -100,7 +100,7 @@ impl Mempool { /// specified block number. If no block number is provided in the config, /// uses the alloy's default behavior. pub async fn get_nonce(&self, address: eth::Address) -> Result { - let call = self.transport.alloy.get_transaction_count(address); + let call = self.transport.provider.get_transaction_count(address); match self.config.nonce_block_number { Some(BlockNumberOrTag::Latest) => call.latest(), Some(BlockNumberOrTag::Earliest) => call.earliest(), @@ -143,7 +143,7 @@ impl Mempool { let submission = self .transport - .alloy + .provider .send_transaction(tx_request) .await .map_err(anyhow::Error::from); @@ -188,7 +188,7 @@ impl Mempool { ) -> anyhow::Result> { let tx_pool_content = self .transport - .alloy + .provider .txpool_content_from(signer) .await .context("failed to query pending transactions")?; diff --git a/crates/driver/src/tests/cases/settle.rs b/crates/driver/src/tests/cases/settle.rs index 6a69e68c50..f184381c0e 100644 --- a/crates/driver/src/tests/cases/settle.rs +++ b/crates/driver/src/tests/cases/settle.rs @@ -118,7 +118,7 @@ async fn submits_huge_solution() { // half of the block gas limit, we want it to be submitted/settled as long as it // fits in the block. test.web3() - .alloy + .provider .raw_request::<_, bool>("evm_setBlockGasLimit".into(), (9_000_000,)) .await .unwrap(); diff --git a/crates/driver/src/tests/setup/blockchain.rs b/crates/driver/src/tests/setup/blockchain.rs index a5a1fe42d8..984e4ed58f 100644 --- a/crates/driver/src/tests/setup/blockchain.rs +++ b/crates/driver/src/tests/setup/blockchain.rs @@ -254,10 +254,10 @@ impl Blockchain { web3.wallet.register_signer(primary_account); // Use the primary account to fund the trader, cow amm and the solver with ETH. - let balance = web3.alloy.get_balance(primary_address).await.unwrap(); + let balance = web3.provider.get_balance(primary_address).await.unwrap(); wait_for( &web3, - web3.alloy.send_and_watch( + web3.provider.send_and_watch( TransactionRequest::default() .from(primary_address) .to(main_trader_address) @@ -267,15 +267,15 @@ impl Blockchain { .await .unwrap(); - let weth = contracts::alloy::WETH9::Instance::deploy_builder(web3.alloy.clone()) + let weth = contracts::alloy::WETH9::Instance::deploy_builder(web3.provider.clone()) .from(main_trader_address) .deploy() .await .unwrap(); - let weth = WETH9::WETH9::new(weth, web3.alloy.clone()); + let weth = WETH9::WETH9::new(weth, web3.provider.clone()); wait_for( &web3, - web3.alloy.send_and_watch( + web3.provider.send_and_watch( TransactionRequest::default() .from(primary_address) .to(*weth.address()) @@ -286,14 +286,16 @@ impl Blockchain { .unwrap(); // Set up the settlement contract and related contracts. - let vault_authorizer = - BalancerV2Authorizer::Instance::deploy_builder(web3.alloy.clone(), main_trader_address) - .from(main_trader_address) - .deploy() - .await - .unwrap(); + let vault_authorizer = BalancerV2Authorizer::Instance::deploy_builder( + web3.provider.clone(), + main_trader_address, + ) + .from(main_trader_address) + .deploy() + .await + .unwrap(); let vault = BalancerV2Vault::Instance::deploy_builder( - web3.alloy.clone(), + web3.provider.clone(), vault_authorizer, *weth.address(), alloy::primitives::U256::ZERO, @@ -303,11 +305,11 @@ impl Blockchain { .deploy() .await .unwrap(); - let authenticator = GPv2AllowListAuthentication::deploy(web3.alloy.clone()) + let authenticator = GPv2AllowListAuthentication::deploy(web3.provider.clone()) .await .unwrap(); let mut settlement = GPv2Settlement::GPv2Settlement::deploy( - web3.alloy.clone(), + web3.provider.clone(), *authenticator.address(), vault, ) @@ -319,7 +321,7 @@ impl Blockchain { // replace the vault relayer code to allow the settlement // contract at a specific address. let mut code = web3 - .alloy + .provider .get_code_at(vault_relayer) .await .unwrap() @@ -333,31 +335,35 @@ impl Blockchain { } code }; - web3.alloy + web3.provider .anvil_set_code(vault_relayer, vault_relayer_code.into()) .await .unwrap(); // Note: (settlement.address() == authenticator_address) != settlement_address - let settlement_code = web3.alloy.get_code_at(*settlement.address()).await.unwrap(); - web3.alloy + let settlement_code = web3 + .provider + .get_code_at(*settlement.address()) + .await + .unwrap(); + web3.provider .anvil_set_code(settlement_address, settlement_code) .await .unwrap(); settlement = - GPv2Settlement::GPv2Settlement::new(settlement_address, web3.alloy.clone()); + GPv2Settlement::GPv2Settlement::new(settlement_address, web3.provider.clone()); } let balances_address = match config.balances_address { Some(balances_address) => balances_address, - None => Balances::Instance::deploy_builder(web3.alloy.clone()) + None => Balances::Instance::deploy_builder(web3.provider.clone()) .from(main_trader_address) .deploy() .await .unwrap(), }; - let balances = Balances::Instance::new(balances_address, web3.alloy.clone()); + let balances = Balances::Instance::new(balances_address, web3.provider.clone()); authenticator .initializeManager(main_trader_address) @@ -369,22 +375,22 @@ impl Blockchain { let signatures_address = if let Some(signatures_address) = config.signatures_address { signatures_address } else { - Signatures::Instance::deploy_builder(web3.alloy.clone()) + Signatures::Instance::deploy_builder(web3.provider.clone()) .from(main_trader_address) .deploy() .await .unwrap() }; - let signatures = Signatures::Instance::new(signatures_address, web3.alloy.clone()); + let signatures = Signatures::Instance::new(signatures_address, web3.provider.clone()); let flashloan_router_address = - FlashLoanRouter::Instance::deploy_builder(web3.alloy.clone(), *settlement.address()) + FlashLoanRouter::Instance::deploy_builder(web3.provider.clone(), *settlement.address()) .from(main_trader_address) .deploy() .await .unwrap(); let flashloan_router = - FlashLoanRouter::Instance::new(flashloan_router_address, web3.alloy.clone()); + FlashLoanRouter::Instance::new(flashloan_router_address, web3.provider.clone()); let mut trader_addresses: Vec
= Vec::new(); for config in config.solvers { @@ -396,7 +402,7 @@ impl Blockchain { .unwrap(); wait_for( &web3, - web3.alloy.send_and_watch( + web3.provider.send_and_watch( TransactionRequest::default() .from(primary_address) .to(config.address()) @@ -419,13 +425,13 @@ impl Blockchain { let mut tokens = HashMap::new(); for pool in config.pools.iter() { if pool.reserve_a.token != "WETH" && !tokens.contains_key(pool.reserve_a.token) { - let token = ERC20Mintable::Instance::deploy(web3.alloy.clone()) + let token = ERC20Mintable::Instance::deploy(web3.provider.clone()) .await .unwrap(); tokens.insert(pool.reserve_a.token, token); } if pool.reserve_b.token != "WETH" && !tokens.contains_key(pool.reserve_b.token) { - let token = ERC20Mintable::Instance::deploy(web3.alloy.clone()) + let token = ERC20Mintable::Instance::deploy(web3.provider.clone()) .await .unwrap(); tokens.insert(pool.reserve_b.token, token); @@ -433,15 +439,17 @@ impl Blockchain { } // Create the uniswap factory. let contract_address = contracts::alloy::UniswapV2Factory::Instance::deploy_builder( - web3.alloy.clone(), + web3.provider.clone(), main_trader_address, ) .from(main_trader_address) .deploy() .await .unwrap(); - let uniswap_factory = - contracts::alloy::UniswapV2Factory::Instance::new(contract_address, web3.alloy.clone()); + let uniswap_factory = contracts::alloy::UniswapV2Factory::Instance::new( + contract_address, + web3.provider.clone(), + ); // Create and fund a uniswap pair for each pool. Fund the settlement contract // with the same liquidity as the pool, to allow for internalized interactions. let mut pairs = Vec::new(); @@ -471,7 +479,7 @@ impl Blockchain { .call() .await .unwrap(), - web3.alloy.clone(), + web3.provider.clone(), ); pairs.push(Pair { token_a: pool.reserve_a.token, @@ -729,11 +737,11 @@ impl Blockchain { // Find the pair to use for this order and calculate the buy and sell amounts. let sell_token = ERC20::Instance::new( self.get_token_wrapped(order.sell_token), - self.web3.alloy.clone(), + self.web3.provider.clone(), ); let buy_token = ERC20::Instance::new( self.get_token_wrapped(order.buy_token), - self.web3.alloy.clone(), + self.web3.provider.clone(), ); let pair = self.find_pair(order); let execution = self.execution(order); @@ -859,7 +867,7 @@ impl Blockchain { } pub async fn set_auto_mining(&self, enabled: bool) { - self.web3.alloy.evm_set_automine(enabled).await.unwrap(); + self.web3.provider.evm_set_automine(enabled).await.unwrap(); } } @@ -958,7 +966,7 @@ impl Drop for Node { /// wait for transactions to be confirmed before proceeding with the test. When /// switching from geth back to hardhat, this function can be removed. pub async fn wait_for(web3: &Web3, fut: impl Future) -> T { - let block = web3.alloy.get_block_number().await.unwrap(); + let block = web3.provider.get_block_number().await.unwrap(); let result = fut.await; wait_for_block(web3, block + 1).await; result @@ -968,7 +976,7 @@ pub async fn wait_for(web3: &Web3, fut: impl Future) -> T { pub async fn wait_for_block(web3: &Web3, block: u64) { tokio::time::timeout(std::time::Duration::from_secs(15), async { loop { - let next_block = web3.alloy.get_block_number().await.unwrap(); + let next_block = web3.provider.get_block_number().await.unwrap(); if next_block >= block { break; } diff --git a/crates/driver/src/tests/setup/mod.rs b/crates/driver/src/tests/setup/mod.rs index e4202ef77d..5d1af8abde 100644 --- a/crates/driver/src/tests/setup/mod.rs +++ b/crates/driver/src/tests/setup/mod.rs @@ -1131,7 +1131,8 @@ impl Test { pub async fn settle_with_solver(&self, solver_name: &str, solution_id: u64) -> Settle { let submission_deadline_latest_block: u64 = - self.web3().alloy.get_block_number().await.unwrap() + self.settle_submission_deadline; + self.web3().provider.get_block_number().await.unwrap() + + self.settle_submission_deadline; let old_balances = self.balances().await; let res = self .client @@ -1185,7 +1186,7 @@ impl Test { "ETH", self.blockchain .web3 - .alloy + .provider .get_balance(self.trader_address) .await .unwrap(), diff --git a/crates/driver/src/tests/setup/solver.rs b/crates/driver/src/tests/setup/solver.rs index 79f147657b..ca01351e46 100644 --- a/crates/driver/src/tests/setup/solver.rs +++ b/crates/driver/src/tests/setup/solver.rs @@ -412,7 +412,7 @@ impl Solver { .flat_map(|f| { let build_token = |token_name: String| async move { let token = config.blockchain.get_token_wrapped(token_name.as_str()); - let contract = ERC20::Instance::new(token, config.blockchain.web3.alloy.clone()); + let contract = ERC20::Instance::new(token, config.blockchain.web3.provider.clone()); let settlement = config.blockchain.settlement.address(); ( token.encode_hex_with_prefix(), diff --git a/crates/e2e/src/setup/deploy.rs b/crates/e2e/src/setup/deploy.rs index 217cbf234b..f6963d31fb 100644 --- a/crates/e2e/src/setup/deploy.rs +++ b/crates/e2e/src/setup/deploy.rs @@ -47,48 +47,50 @@ pub struct Contracts { impl Contracts { pub async fn deployed_with(web3: &Web3, deployed: DeployedContracts) -> Self { let network_id = web3 - .alloy + .provider .get_chain_id() .await .expect("get network ID failed") .to_string(); tracing::info!("connected to forked test network {}", network_id); - let gp_settlement = GPv2Settlement::Instance::deployed(&web3.alloy) + let gp_settlement = GPv2Settlement::Instance::deployed(&web3.provider) .await .unwrap(); let balances = match deployed.balances { - Some(address) => Balances::Instance::new(address, web3.alloy.clone()), - None => Balances::Instance::deployed(&web3.alloy) + Some(address) => Balances::Instance::new(address, web3.provider.clone()), + None => Balances::Instance::deployed(&web3.provider) .await .expect("failed to find balances contract"), }; let signatures = match deployed.signatures { - Some(address) => Signatures::Instance::new(address, web3.alloy.clone()), - None => Signatures::Instance::deployed(&web3.alloy) + Some(address) => Signatures::Instance::new(address, web3.provider.clone()), + None => Signatures::Instance::deployed(&web3.provider) .await .expect("failed to find signatures contract"), }; - let flashloan_router = FlashLoanRouter::Instance::deployed(&web3.alloy).await.ok(); + let flashloan_router = FlashLoanRouter::Instance::deployed(&web3.provider) + .await + .ok(); Self { chain_id: network_id .parse() .expect("Couldn't parse network ID to u64"), - balancer_vault: BalancerV2Vault::Instance::deployed(&web3.alloy) + balancer_vault: BalancerV2Vault::Instance::deployed(&web3.provider) .await .unwrap(), - gp_authenticator: GPv2AllowListAuthentication::Instance::deployed(&web3.alloy) + gp_authenticator: GPv2AllowListAuthentication::Instance::deployed(&web3.provider) .await .unwrap(), - uniswap_v2_factory: UniswapV2Factory::Instance::deployed(&web3.alloy) + uniswap_v2_factory: UniswapV2Factory::Instance::deployed(&web3.provider) .await .unwrap(), - uniswap_v2_router: UniswapV2Router02::Instance::deployed(&web3.alloy) + uniswap_v2_router: UniswapV2Router02::Instance::deployed(&web3.provider) .await .unwrap(), - weth: WETH9::Instance::deployed(&web3.alloy).await.unwrap(), + weth: WETH9::Instance::deployed(&web3.provider).await.unwrap(), allowance: gp_settlement .vaultRelayer() .call() @@ -103,11 +105,11 @@ impl Contracts { .0, ), ethflows: vec![ - CoWSwapEthFlow::Instance::deployed(&web3.alloy) + CoWSwapEthFlow::Instance::deployed(&web3.provider) .await .unwrap(), ], - hooks: HooksTrampoline::Instance::deployed(&web3.alloy) + hooks: HooksTrampoline::Instance::deployed(&web3.provider) .await .unwrap(), gp_settlement, @@ -119,7 +121,7 @@ impl Contracts { pub async fn deploy(web3: &Web3) -> Self { let network_id = web3 - .alloy + .provider .get_chain_id() .await .expect("get network ID failed") @@ -127,19 +129,22 @@ impl Contracts { tracing::info!("connected to test network {}", network_id); let accounts = web3 - .alloy + .provider .get_accounts() .await .expect("get accounts failed"); let admin = accounts[0]; - let weth = WETH9::Instance::deploy(web3.alloy.clone()).await.unwrap(); - - let balancer_authorizer = BalancerV2Authorizer::Instance::deploy(web3.alloy.clone(), admin) + let weth = WETH9::Instance::deploy(web3.provider.clone()) .await .unwrap(); + + let balancer_authorizer = + BalancerV2Authorizer::Instance::deploy(web3.provider.clone(), admin) + .await + .unwrap(); let balancer_vault = BalancerV2Vault::Instance::deploy( - web3.alloy.clone(), + web3.provider.clone(), *balancer_authorizer.address(), *weth.address(), U256::ZERO, @@ -148,18 +153,18 @@ impl Contracts { .await .unwrap(); - let uniswap_v2_factory = UniswapV2Factory::Instance::deploy(web3.alloy.clone(), admin) + let uniswap_v2_factory = UniswapV2Factory::Instance::deploy(web3.provider.clone(), admin) .await .unwrap(); let uniswap_v2_router = UniswapV2Router02::Instance::deploy( - web3.alloy.clone(), + web3.provider.clone(), *uniswap_v2_factory.address(), *weth.address(), ) .await .unwrap(); - let gp_authenticator = GPv2AllowListAuthentication::Instance::deploy(web3.alloy.clone()) + let gp_authenticator = GPv2AllowListAuthentication::Instance::deploy(web3.provider.clone()) .await .unwrap(); gp_authenticator @@ -168,16 +173,16 @@ impl Contracts { .await .expect("failed to initialize manager"); let gp_settlement = GPv2Settlement::Instance::deploy( - web3.alloy.clone(), + web3.provider.clone(), *gp_authenticator.address(), *balancer_vault.address(), ) .await .unwrap(); - let balances = Balances::Instance::deploy(web3.alloy.clone()) + let balances = Balances::Instance::deploy(web3.provider.clone()) .await .unwrap(); - let signatures = Signatures::Instance::deploy(web3.alloy.clone()) + let signatures = Signatures::Instance::deploy(web3.provider.clone()) .await .unwrap(); @@ -208,24 +213,25 @@ impl Contracts { ); let ethflow = CoWSwapEthFlow::Instance::deploy( - web3.alloy.clone(), + web3.provider.clone(), *gp_settlement.address(), *weth.address(), ) .await .unwrap(); let ethflow_secondary = CoWSwapEthFlow::Instance::deploy( - web3.alloy.clone(), + web3.provider.clone(), *gp_settlement.address(), *weth.address(), ) .await .unwrap(); - let hooks = HooksTrampoline::Instance::deploy(web3.alloy.clone(), *gp_settlement.address()) - .await - .unwrap(); + let hooks = + HooksTrampoline::Instance::deploy(web3.provider.clone(), *gp_settlement.address()) + .await + .unwrap(); let flashloan_router = - FlashLoanRouter::Instance::deploy(web3.alloy.clone(), *gp_settlement.address()) + FlashLoanRouter::Instance::deploy(web3.provider.clone(), *gp_settlement.address()) .await .unwrap(); diff --git a/crates/e2e/src/setup/onchain_components/mod.rs b/crates/e2e/src/setup/onchain_components/mod.rs index ecafbaee3e..1524b8ef59 100644 --- a/crates/e2e/src/setup/onchain_components/mod.rs +++ b/crates/e2e/src/setup/onchain_components/mod.rs @@ -54,7 +54,7 @@ impl TestAccount { } pub async fn nonce(&self, web3: &Web3) -> u64 { - web3.alloy + web3.provider .get_transaction_count(self.address()) .await .unwrap() @@ -293,7 +293,7 @@ impl OnchainComponents { for solver in &solvers { self.web3 - .alloy + .provider .anvil_send_impersonated_transaction_with_config( gpv2_auth .addSolver(solver.address()) @@ -313,7 +313,7 @@ impl OnchainComponents { if let Some(router) = &self.contracts.flashloan_router { self.web3 - .alloy + .provider .anvil_send_impersonated_transaction_with_config( gpv2_auth .addSolver(*router.address()) @@ -339,13 +339,14 @@ impl OnchainComponents { let mut res = Vec::with_capacity(N); for _ in 0..N { - let contract_address = ERC20Mintable::Instance::deploy_builder(self.web3.alloy.clone()) + let contract_address = ERC20Mintable::Instance::deploy_builder(self.web3.provider.clone()) // We can't escape the .from here because we need to ensure Minter permissions later on .from(minter) .deploy() .await .expect("ERC20Mintable deployment failed"); - let contract = ERC20Mintable::Instance::new(contract_address, self.web3.alloy.clone()); + let contract = + ERC20Mintable::Instance::new(contract_address, self.web3.provider.clone()); res.push(MintableToken { contract, minter }); } @@ -361,7 +362,7 @@ impl OnchainComponents { ) -> [MintableToken; N] { let minter = self .web3 - .alloy + .provider .get_accounts() .await .expect("getting accounts failed")[0]; @@ -497,7 +498,7 @@ impl OnchainComponents { .call() .await .expect("failed to get Uniswap V2 pair"), - self.web3.alloy.clone(), + self.web3.provider.clone(), ); assert!(!pair.address().is_zero(), "Uniswap V2 pair is not deployed"); @@ -524,7 +525,7 @@ impl OnchainComponents { pub async fn deploy_cow_token(&self, supply: U256) -> CowToken { let holder = NetworkWallet::::default_signer_address(&self.web3().wallet); let contract = CowProtocolToken::CowProtocolToken::deploy( - self.web3.alloy.clone(), + self.web3.provider.clone(), holder, holder, supply, @@ -591,9 +592,9 @@ impl OnchainComponents { } pub async fn send_wei(&self, to: Address, amount: U256) { - let balance_before = self.web3.alloy.get_balance(to).await.unwrap(); + let balance_before = self.web3.provider.get_balance(to).await.unwrap(); self.web3 - .alloy + .provider .send_transaction(TransactionRequest::default().with_to(to).with_value(amount)) .await .unwrap() @@ -606,13 +607,13 @@ impl OnchainComponents { // supposedly succeeds but the balances still don't get changed. // If you hit this assert try using a different block number for your // forked test. - let balance_after = self.web3.alloy.get_balance(to).await.unwrap(); + let balance_after = self.web3.provider.get_balance(to).await.unwrap(); assert_eq!(balance_after, balance_before + amount); } pub async fn mint_block(&self) { tracing::info!("mining block"); - self.web3.alloy.evm_mine(None).await.unwrap(); + self.web3.provider.evm_mine(None).await.unwrap(); } pub fn contracts(&self) -> &Contracts { diff --git a/crates/e2e/src/setup/services.rs b/crates/e2e/src/setup/services.rs index 023e36fe40..c018566960 100644 --- a/crates/e2e/src/setup/services.rs +++ b/crates/e2e/src/setup/services.rs @@ -924,7 +924,7 @@ impl<'a> Services<'a> { async fn mint_block(&self) { tracing::info!("mining block"); - self.web3.alloy.evm_mine(None).await.unwrap(); + self.web3.provider.evm_mine(None).await.unwrap(); } } diff --git a/crates/e2e/tests/e2e/app_data_signer.rs b/crates/e2e/tests/e2e/app_data_signer.rs index 4fe3fba81a..3630647dd2 100644 --- a/crates/e2e/tests/e2e/app_data_signer.rs +++ b/crates/e2e/tests/e2e/app_data_signer.rs @@ -94,7 +94,7 @@ async fn order_creation_checks_metadata_signer(web3: Web3) { // EIP-1271 - let safe = Safe::deploy(safe_owner.clone(), web3.alloy.clone()).await; + let safe = Safe::deploy(safe_owner.clone(), web3.provider.clone()).await; token_a.mint(safe.address(), 10u64.eth()).await; safe.exec_alloy_call( token_a diff --git a/crates/e2e/tests/e2e/banned_users.rs b/crates/e2e/tests/e2e/banned_users.rs index 5e4ab46dd3..4171cea0dd 100644 --- a/crates/e2e/tests/e2e/banned_users.rs +++ b/crates/e2e/tests/e2e/banned_users.rs @@ -35,15 +35,15 @@ async fn forked_mainnet_onchain_banned_user_test(web3: Web3) { let token_dai = ERC20::Instance::new( address!("6b175474e89094c44da98b954eedeac495271d0f"), - web3.alloy.clone(), + web3.provider.clone(), ); let token_usdt = ERC20::Instance::new( address!("dac17f958d2ee523a2206206994597c13d831ec7"), - web3.alloy.clone(), + web3.provider.clone(), ); - web3.alloy + web3.provider .anvil_send_impersonated_transaction_with_config( token_dai .transfer(BANNED_USER, 1000u64.eth()) @@ -61,7 +61,7 @@ async fn forked_mainnet_onchain_banned_user_test(web3: Web3) { .unwrap(); // Approve GPv2 for trading - web3.alloy + web3.provider .anvil_send_impersonated_transaction_with_config( token_dai .approve(onchain.contracts().allowance, 1000u64.eth()) diff --git a/crates/e2e/tests/e2e/cow_amm.rs b/crates/e2e/tests/e2e/cow_amm.rs index 78ab8afe62..0cb0cf730d 100644 --- a/crates/e2e/tests/e2e/cow_amm.rs +++ b/crates/e2e/tests/e2e/cow_amm.rs @@ -69,14 +69,15 @@ async fn cow_amm_jit(web3: Web3) { .await; // set up cow_amm - let oracle = - contracts::alloy::cow_amm::CowAmmUniswapV2PriceOracle::Instance::deploy(web3.alloy.clone()) - .await - .unwrap(); + let oracle = contracts::alloy::cow_amm::CowAmmUniswapV2PriceOracle::Instance::deploy( + web3.provider.clone(), + ) + .await + .unwrap(); let cow_amm_factory = contracts::alloy::cow_amm::CowAmmConstantProductFactory::Instance::deploy( - web3.alloy.clone(), + web3.provider.clone(), *onchain.contracts().gp_settlement.address(), ) .await @@ -146,7 +147,7 @@ async fn cow_amm_jit(web3: Web3) { .send_and_watch() .await .unwrap(); - let cow_amm = contracts::alloy::cow_amm::CowAmm::Instance::new(cow_amm, web3.alloy.clone()); + let cow_amm = contracts::alloy::cow_amm::CowAmm::Instance::new(cow_amm, web3.provider.clone()); // Start system with the regular baseline solver as a quoter but a mock solver // for the actual solver competition. That way we can handcraft a solution @@ -201,7 +202,7 @@ async fn cow_amm_jit(web3: Web3) { // a relatively small valid_to and we initialize the chain with a date in // the past so the computer's current time is way ahead of the blockchain. let block = web3 - .alloy + .provider .get_block(alloy::eips::BlockId::latest()) .await .unwrap() @@ -382,10 +383,10 @@ async fn cow_amm_driver_support(web3: Web3) { // since changing the forked number would result in very costly ~1 year of event // syncing, we deploy the following SCs let deployed_contracts = { - let balances = Balances::Instance::deploy(web3.alloy.clone()) + let balances = Balances::Instance::deploy(web3.provider.clone()) .await .unwrap(); - let signatures = Signatures::Instance::deploy(web3.alloy.clone()) + let signatures = Signatures::Instance::deploy(web3.provider.clone()) .await .unwrap(); DeployedContracts { @@ -404,12 +405,12 @@ async fn cow_amm_driver_support(web3: Web3) { // create necessary token instances let usdc = ERC20::Instance::new( address!("a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48"), - web3.alloy.clone(), + web3.provider.clone(), ); let usdt = ERC20::Instance::new( address!("dac17f958d2ee523a2206206994597c13d831ec7"), - web3.alloy.clone(), + web3.provider.clone(), ); // Unbalance the cow amm enough that baseline is able to rebalance @@ -452,7 +453,7 @@ async fn cow_amm_driver_support(web3: Web3) { // settle. // Give trader some USDC - web3.alloy + web3.provider .anvil_send_impersonated_transaction_with_config( usdc.transfer(trader.address(), 1000u64.matom()) .from(USDC_WHALE_MAINNET) @@ -479,14 +480,14 @@ async fn cow_amm_driver_support(web3: Web3) { const ZERO_BALANCE_AMM: Address = address!("b3bf81714f704720dcb0351ff0d42eca61b069fc"); let pendle_token = ERC20::Instance::new( address!("808507121b80c02388fad14726482e061b8da827"), - web3.alloy.clone(), + web3.provider.clone(), ); let balance = pendle_token .balanceOf(ZERO_BALANCE_AMM) .call() .await .unwrap(); - web3.alloy + web3.provider .anvil_send_impersonated_transaction_with_config( pendle_token .transfer( @@ -690,14 +691,15 @@ async fn cow_amm_opposite_direction(web3: Web3) { // the user order. // Set up the CoW AMM as before - let oracle = - contracts::alloy::cow_amm::CowAmmUniswapV2PriceOracle::Instance::deploy(web3.alloy.clone()) - .await - .unwrap(); + let oracle = contracts::alloy::cow_amm::CowAmmUniswapV2PriceOracle::Instance::deploy( + web3.provider.clone(), + ) + .await + .unwrap(); let cow_amm_factory = contracts::alloy::cow_amm::CowAmmConstantProductFactory::Instance::deploy( - web3.alloy.clone(), + web3.provider.clone(), *onchain.contracts().gp_settlement.address(), ) .await @@ -780,7 +782,7 @@ async fn cow_amm_opposite_direction(web3: Web3) { .await .unwrap(); let cow_amm = - contracts::alloy::cow_amm::CowAmm::Instance::new(cow_amm_address, web3.alloy.clone()); + contracts::alloy::cow_amm::CowAmm::Instance::new(cow_amm_address, web3.provider.clone()); // Start system with the mocked solver. Baseline is still required for the // native price estimation. @@ -831,7 +833,7 @@ async fn cow_amm_opposite_direction(web3: Web3) { // Get the current block timestamp let block = web3 - .alloy + .provider .get_block(alloy::eips::BlockId::latest()) .await .unwrap() diff --git a/crates/e2e/tests/e2e/eth_integration.rs b/crates/e2e/tests/e2e/eth_integration.rs index 51eaa0f4b2..1a0417ddb2 100644 --- a/crates/e2e/tests/e2e/eth_integration.rs +++ b/crates/e2e/tests/e2e/eth_integration.rs @@ -46,7 +46,7 @@ async fn eth_integration(web3: Web3) { .await .unwrap(); - let trader_a_eth_balance_before = web3.alloy.get_balance(trader_a.address()).await.unwrap(); + let trader_a_eth_balance_before = web3.provider.get_balance(trader_a.address()).await.unwrap(); let services = Services::new(&onchain).await; services.start_protocol(solver).await; @@ -109,8 +109,8 @@ async fn eth_integration(web3: Web3) { tracing::info!("Waiting for trade."); onchain.mint_block().await; let trade_happened = || async { - let balance_a = web3.alloy.get_balance(trader_a.address()).await.unwrap(); - let balance_b = web3.alloy.get_balance(trader_b.address()).await.unwrap(); + let balance_a = web3.provider.get_balance(trader_a.address()).await.unwrap(); + let balance_b = web3.provider.get_balance(trader_b.address()).await.unwrap(); let trader_a_eth_decreased = (balance_a - trader_a_eth_balance_before) == 49u64.eth(); let trader_b_eth_increased = balance_b >= 49u64.eth(); diff --git a/crates/e2e/tests/e2e/eth_safe.rs b/crates/e2e/tests/e2e/eth_safe.rs index cdba2f6bfd..3925e8f60f 100644 --- a/crates/e2e/tests/e2e/eth_safe.rs +++ b/crates/e2e/tests/e2e/eth_safe.rs @@ -22,7 +22,7 @@ async fn test(web3: Web3) { let [solver] = onchain.make_solvers(10u64.eth()).await; let [trader] = onchain.make_accounts(10u64.eth()).await; - let safe = Safe::deploy(trader.clone(), web3.alloy.clone()).await; + let safe = Safe::deploy(trader.clone(), web3.provider.clone()).await; let [token] = onchain .deploy_tokens_with_weth_uni_v2_pools(1000u64.eth(), 1000u64.eth()) .await; @@ -77,7 +77,7 @@ async fn test(web3: Web3) { tracing::info!("Waiting for trade."); let trade_happened = || async { - let safe_balance = web3.alloy.get_balance(safe.address()).await.unwrap(); + let safe_balance = web3.provider.get_balance(safe.address()).await.unwrap(); // the balance is slightly less because of the fee U256::from(3_899_000_000_000_000_000_u128) <= safe_balance && safe_balance <= U256::from(4_000_000_000_000_000_000_u128) diff --git a/crates/e2e/tests/e2e/ethflow.rs b/crates/e2e/tests/e2e/ethflow.rs index bf1025bd0a..eabbb5a6f8 100644 --- a/crates/e2e/tests/e2e/ethflow.rs +++ b/crates/e2e/tests/e2e/ethflow.rs @@ -152,7 +152,7 @@ async fn eth_flow_tx(web3: Web3) { let quote: OrderQuoteResponse = test_submit_quote(&services, "e_request).await; let valid_to = chrono::offset::Utc::now().timestamp() as u32 - + timestamp_of_current_block_in_seconds(&web3.alloy) + + timestamp_of_current_block_in_seconds(&web3.provider) .await .unwrap() + 3600; @@ -260,7 +260,7 @@ async fn eth_flow_without_quote(web3: Web3) { services.start_protocol(solver).await; let valid_to = chrono::offset::Utc::now().timestamp() as u32 - + timestamp_of_current_block_in_seconds(&web3.alloy) + + timestamp_of_current_block_in_seconds(&web3.provider) .await .unwrap() + 3600; @@ -311,7 +311,7 @@ async fn eth_flow_indexing_after_refund(web3: Web3) { services.start_protocol(solver).await; // Create an order that only exists to be cancelled. - let valid_to = timestamp_of_current_block_in_seconds(&web3.alloy) + let valid_to = timestamp_of_current_block_in_seconds(&web3.provider) .await .unwrap() + 60; @@ -348,7 +348,7 @@ async fn eth_flow_indexing_after_refund(web3: Web3) { let receiver = Address::repeat_byte(0x42); let sell_amount = 1u64.eth(); let valid_to = chrono::offset::Utc::now().timestamp() as u32 - + timestamp_of_current_block_in_seconds(&web3.alloy) + + timestamp_of_current_block_in_seconds(&web3.provider) .await .unwrap() + 60; @@ -498,7 +498,7 @@ async fn test_order_was_settled(ethflow_order: &ExtendedEthFlowOrder, onchain: & wait_for_condition(TIMEOUT, || async { onchain.mint_block().await; let buy_token = - ERC20Mintable::Instance::new(ethflow_order.0.buyToken, onchain.web3().alloy.clone()); + ERC20Mintable::Instance::new(ethflow_order.0.buyToken, onchain.web3().provider.clone()); let receiver_buy_token_balance = buy_token .balanceOf(ethflow_order.0.receiver) .call() @@ -836,7 +836,7 @@ async fn eth_flow_zero_buy_amount(web3: Web3) { let place_order = async |trader: TestAccount, buy_amount: u64| { let valid_to = chrono::offset::Utc::now().timestamp() as u32 - + timestamp_of_current_block_in_seconds(&web3.alloy) + + timestamp_of_current_block_in_seconds(&web3.provider) .await .unwrap() + 3600; diff --git a/crates/e2e/tests/e2e/hooks.rs b/crates/e2e/tests/e2e/hooks.rs index 2339f7e281..39696c2904 100644 --- a/crates/e2e/tests/e2e/hooks.rs +++ b/crates/e2e/tests/e2e/hooks.rs @@ -251,12 +251,12 @@ async fn allowance(web3: Web3) { async fn signature(web3: Web3) { let mut onchain = OnchainComponents::deploy(web3.clone()).await; - let chain_id = U256::from(web3.alloy.get_chain_id().await.unwrap()); + let chain_id = U256::from(web3.provider.get_chain_id().await.unwrap()); let [solver] = onchain.make_solvers(1u64.eth()).await; let [trader] = onchain.make_accounts(1u64.eth()).await; - let safe_infra = onchain_components::safe::Infrastructure::new(web3.alloy.clone()).await; + let safe_infra = onchain_components::safe::Infrastructure::new(web3.provider.clone()).await; // Prepare the Safe creation transaction, but don't execute it! This will // be executed as a pre-hook. @@ -287,7 +287,7 @@ async fn signature(web3: Web3) { let safe_address = safe_creation_builder.clone().call().await.unwrap(); let safe = Safe::deployed( chain_id, - contracts::alloy::GnosisSafe::Instance::new(safe_address, web3.alloy.clone()), + contracts::alloy::GnosisSafe::Instance::new(safe_address, web3.provider.clone()), trader.clone(), ); @@ -361,7 +361,7 @@ async fn signature(web3: Web3) { assert_eq!(balance, 5u64.eth()); // Check that the Safe really hasn't been deployed yet. - let code = web3.alloy.get_code_at(safe.address()).await.unwrap(); + let code = web3.provider.get_code_at(safe.address()).await.unwrap(); assert_eq!(code.0.len(), 0); tracing::info!("Waiting for trade."); @@ -387,7 +387,7 @@ async fn signature(web3: Web3) { assert!(balance >= order.buy_amount); // Check Safe was deployed - let code = web3.alloy.get_code_at(safe.address()).await.unwrap(); + let code = web3.provider.get_code_at(safe.address()).await.unwrap(); assert_ne!(code.0.len(), 0); tracing::info!("Waiting for auction to be cleared."); @@ -401,7 +401,7 @@ async fn partial_fills(web3: Web3) { let [solver] = onchain.make_solvers(1u64.eth()).await; let [trader] = onchain.make_accounts(3u64.eth()).await; - let counter = contracts::alloy::test::Counter::Instance::deploy(web3.alloy.clone()) + let counter = contracts::alloy::test::Counter::Instance::deploy(web3.provider.clone()) .await .unwrap(); @@ -523,12 +523,12 @@ async fn partial_fills(web3: Web3) { async fn quote_verification(web3: Web3) { let mut onchain = OnchainComponents::deploy(web3.clone()).await; - let chain_id = U256::from(web3.alloy.get_chain_id().await.unwrap()); + let chain_id = U256::from(web3.provider.get_chain_id().await.unwrap()); let [trader] = onchain.make_accounts(1u64.eth()).await; let [solver] = onchain.make_solvers(1u64.eth()).await; - let safe_infra = onchain_components::safe::Infrastructure::new(web3.alloy.clone()).await; + let safe_infra = onchain_components::safe::Infrastructure::new(web3.provider.clone()).await; // Prepare the Safe creation transaction, but don't execute it! This will // be executed as a pre-hook. @@ -554,7 +554,7 @@ async fn quote_verification(web3: Web3) { let safe = Safe::deployed( chain_id, - contracts::alloy::GnosisSafe::Instance::new(safe_address, web3.alloy.clone()), + contracts::alloy::GnosisSafe::Instance::new(safe_address, web3.provider.clone()), trader.clone(), ); diff --git a/crates/e2e/tests/e2e/limit_orders.rs b/crates/e2e/tests/e2e/limit_orders.rs index d4de694f72..6417277b19 100644 --- a/crates/e2e/tests/e2e/limit_orders.rs +++ b/crates/e2e/tests/e2e/limit_orders.rs @@ -863,16 +863,16 @@ async fn forked_mainnet_single_limit_order_test(web3: Web3) { let token_usdc = ERC20::Instance::new( address!("a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48"), - web3.alloy.clone(), + web3.provider.clone(), ); let token_usdt = ERC20::Instance::new( address!("dac17f958d2ee523a2206206994597c13d831ec7"), - web3.alloy.clone(), + web3.provider.clone(), ); // Give trader some USDC - web3.alloy + web3.provider .anvil_send_impersonated_transaction_with_config( token_usdc .transfer(trader.address(), 1000u64.matom()) @@ -963,16 +963,16 @@ async fn forked_gnosis_single_limit_order_test(web3: Web3) { let token_usdc = ERC20::Instance::new( address!("ddafbb505ad214d7b80b1f830fccc89b60fb7a83"), - web3.alloy.clone(), + web3.provider.clone(), ); let token_wxdai = ERC20::Instance::new( address!("e91d153e0b41518a2ce8dd3d7944fa863463a97d"), - web3.alloy.clone(), + web3.provider.clone(), ); // Give trader some USDC - web3.alloy + web3.provider .anvil_send_impersonated_transaction_with_config( token_usdc .transfer(trader.address(), 1000u64.matom()) diff --git a/crates/e2e/tests/e2e/liquidity.rs b/crates/e2e/tests/e2e/liquidity.rs index cba31e19ad..5e13e3baf2 100644 --- a/crates/e2e/tests/e2e/liquidity.rs +++ b/crates/e2e/tests/e2e/liquidity.rs @@ -54,21 +54,21 @@ async fn zero_ex_liquidity(web3: Web3) { let token_usdc = ERC20::Instance::new( address!("a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48"), - web3.alloy.clone(), + web3.provider.clone(), ); let token_usdt = ERC20::Instance::new( address!("dac17f958d2ee523a2206206994597c13d831ec7"), - web3.alloy.clone(), + web3.provider.clone(), ); web3.wallet.register_signer(solver.signer.clone()); - let zeroex = IZeroex::Instance::deployed(&web3.alloy).await.unwrap(); + let zeroex = IZeroex::Instance::deployed(&web3.provider).await.unwrap(); let amount = 500u64.matom(); // Give trader some USDC - web3.alloy + web3.provider .anvil_send_impersonated_transaction_with_config( token_usdc .transfer(trader.address(), amount) @@ -87,7 +87,7 @@ async fn zero_ex_liquidity(web3: Web3) { // Give 0x maker a bit more USDT // With a lower amount 0x contract shows much lower fillable amount - web3.alloy + web3.provider .anvil_send_impersonated_transaction_with_config( token_usdt .transfer( @@ -107,7 +107,7 @@ async fn zero_ex_liquidity(web3: Web3) { .await .unwrap(); // Required for the remaining fillable taker amount - web3.alloy + web3.provider .anvil_send_impersonated_transaction_with_config( token_usdc .transfer(solver.address(), amount) @@ -159,7 +159,7 @@ async fn zero_ex_liquidity(web3: Web3) { &trader.signer, ); - let chain_id = web3.alloy.get_chain_id().await.unwrap(); + let chain_id = web3.provider.get_chain_id().await.unwrap(); let zeroex_liquidity_orders = create_zeroex_liquidity_orders( order.clone(), zeroex_maker.clone(), diff --git a/crates/e2e/tests/e2e/liquidity_source_notification.rs b/crates/e2e/tests/e2e/liquidity_source_notification.rs index cef36e12b8..8a2cb42b1f 100644 --- a/crates/e2e/tests/e2e/liquidity_source_notification.rs +++ b/crates/e2e/tests/e2e/liquidity_source_notification.rs @@ -65,17 +65,17 @@ async fn liquidity_source_notification(web3: Web3) { // Access trade tokens contracts let token_usdc = ERC20::Instance::new( address!("a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48"), - web3.alloy.clone(), + web3.provider.clone(), ); let token_usdt = ERC20::Instance::new( address!("dac17f958d2ee523a2206206994597c13d831ec7"), - web3.alloy.clone(), + web3.provider.clone(), ); // CoW onchain setup // Fund trader - web3.alloy + web3.provider .anvil_send_impersonated_transaction_with_config( token_usdc .transfer(trader.address(), trade_amount) @@ -93,7 +93,7 @@ async fn liquidity_source_notification(web3: Web3) { .unwrap(); // Fund solver - web3.alloy + web3.provider .anvil_send_impersonated_transaction_with_config( token_usdc .transfer(solver.address(), trade_amount) @@ -122,7 +122,7 @@ async fn liquidity_source_notification(web3: Web3) { // Liquorice settlement contract through which we will trade with the // `liquorice_maker` - let liquorice_settlement = LiquoriceSettlement::Instance::deployed(&web3.alloy) + let liquorice_settlement = LiquoriceSettlement::Instance::deployed(&web3.provider) .await .unwrap(); @@ -133,7 +133,7 @@ async fn liquidity_source_notification(web3: Web3) { .expect("no balance manager found"); // Fund `liquorice_maker` - web3.alloy + web3.provider .anvil_send_impersonated_transaction_with_config( token_usdt .transfer(liquorice_maker.address(), trade_amount) diff --git a/crates/e2e/tests/e2e/place_order_with_quote.rs b/crates/e2e/tests/e2e/place_order_with_quote.rs index e1aeb18774..07bef2aa6c 100644 --- a/crates/e2e/tests/e2e/place_order_with_quote.rs +++ b/crates/e2e/tests/e2e/place_order_with_quote.rs @@ -57,7 +57,7 @@ async fn place_order_with_quote(web3: Web3) { services.start_protocol(solver.clone()).await; // Disable auto-mine so we don't accidentally mine a settlement - web3.alloy + web3.provider .evm_set_automine(false) .await .expect("Must be able to disable automine"); @@ -140,7 +140,7 @@ async fn disabled_same_sell_and_buy_token_order_feature(web3: Web3) { services.start_protocol(solver.clone()).await; // Disable auto-mine so we don't accidentally mine a settlement - web3.alloy + web3.provider .evm_set_automine(false) .await .expect("Must be able to disable automine"); diff --git a/crates/e2e/tests/e2e/quote_verification.rs b/crates/e2e/tests/e2e/quote_verification.rs index 295cb0c636..a288b082b4 100644 --- a/crates/e2e/tests/e2e/quote_verification.rs +++ b/crates/e2e/tests/e2e/quote_verification.rs @@ -143,7 +143,7 @@ async fn test_bypass_verification_for_rfq_quotes(web3: Web3) { "https" => url.set_scheme("wss").unwrap(), _ => unreachable!("unexpected scheme"), } - let block_stream = ethrpc::block_stream::current_block_ws_stream(web3.alloy.clone(), url) + let block_stream = ethrpc::block_stream::current_block_ws_stream(web3.provider.clone(), url) .await .unwrap(); let onchain = OnchainComponents::deployed(web3.clone()).await; @@ -421,7 +421,7 @@ async fn verified_quote_with_simulated_balance(web3: Web3) { assert!( onchain .web3() - .alloy + .provider .get_balance(trader.address()) .await .unwrap() @@ -567,7 +567,7 @@ async fn trace_based_balance_detection(web3: Web3) { // offset within a struct mapping, making it undetectable by standard slot // calculation methods let struct_offset_token = - contracts::alloy::test::NonStandardERC20Balances::Instance::deploy(web3.alloy.clone()) + contracts::alloy::test::NonStandardERC20Balances::Instance::deploy(web3.provider.clone()) .await .unwrap(); @@ -577,14 +577,14 @@ async fn trace_based_balance_detection(web3: Web3) { // calling another contract to get a balance--or calling another contract to // *not* get a balance) let local_storage_token = contracts::alloy::test::RemoteERC20Balances::Instance::deploy( - web3.alloy.clone(), + web3.provider.clone(), weth, true, ) .await .unwrap(); let delegated_storage_token = contracts::alloy::test::RemoteERC20Balances::Instance::deploy( - web3.alloy.clone(), + web3.provider.clone(), weth, false, ) @@ -697,7 +697,7 @@ async fn trace_based_balance_detection(web3: Web3) { let (override_token, state_override) = override_result.unwrap(); // Call balanceOf with the state override to verify it works - let token_contract = ERC20::Instance::new(token, web3.alloy.clone()); + let token_contract = ERC20::Instance::new(token, web3.provider.clone()); let balance = token_contract .balanceOf(test_account) .state(AddressMap::from_iter([( diff --git a/crates/e2e/tests/e2e/refunder.rs b/crates/e2e/tests/e2e/refunder.rs index 01ace9ad94..36f50f5a7b 100644 --- a/crates/e2e/tests/e2e/refunder.rs +++ b/crates/e2e/tests/e2e/refunder.rs @@ -31,11 +31,11 @@ async fn advance_time_past_expiration(web3: &Web3, valid_to: u32) { // Add 60 seconds buffer so the order is definitively expired, not just at the // boundary. let target_timestamp = valid_to as u64 + 60; - web3.alloy + web3.provider .evm_set_next_block_timestamp(target_timestamp) .await .expect("Must be able to set block timestamp"); - web3.alloy + web3.provider .evm_mine(None) .await .expect("Unable to mine next block"); @@ -277,7 +277,7 @@ async fn run_refunder_threshold_test( let ethflow_contract = onchain.contracts().ethflows.first().unwrap(); // Compute absolute valid_to timestamp from blockchain time + duration - let valid_to = timestamp_of_current_block_in_seconds(&web3.alloy) + let valid_to = timestamp_of_current_block_in_seconds(&web3.provider) .await .unwrap() + validity.order; @@ -596,7 +596,7 @@ async fn refunder_multiple_ethflow_contracts(web3: Web3) { let buy_token = *token.address(); let validity_duration = 600u32; - let valid_to = timestamp_of_current_block_in_seconds(&web3.alloy) + let valid_to = timestamp_of_current_block_in_seconds(&web3.provider) .await .unwrap() + validity_duration; diff --git a/crates/e2e/tests/e2e/replace_order.rs b/crates/e2e/tests/e2e/replace_order.rs index d2175f94e2..ef76ccdcc6 100644 --- a/crates/e2e/tests/e2e/replace_order.rs +++ b/crates/e2e/tests/e2e/replace_order.rs @@ -153,7 +153,7 @@ async fn try_replace_unreplaceable_order_test(web3: Web3) { .unwrap(); // disable auto mining to prevent order being immediately executed - web3.alloy.evm_set_automine(false).await.unwrap(); + web3.provider.evm_set_automine(false).await.unwrap(); // Place Orders let services = Services::new(&onchain).await; @@ -224,7 +224,7 @@ async fn try_replace_unreplaceable_order_test(web3: Web3) { ); // Continue automining so our order can be executed - web3.alloy + web3.provider .evm_set_automine(true) .await .expect("Must be able to disable auto-mining"); diff --git a/crates/e2e/tests/e2e/smart_contract_orders.rs b/crates/e2e/tests/e2e/smart_contract_orders.rs index 3c8e9e5a42..bc1ca03996 100644 --- a/crates/e2e/tests/e2e/smart_contract_orders.rs +++ b/crates/e2e/tests/e2e/smart_contract_orders.rs @@ -29,7 +29,7 @@ async fn smart_contract_orders(web3: Web3) { let [solver] = onchain.make_solvers(1u64.eth()).await; let [trader] = onchain.make_accounts(1u64.eth()).await; - let safe = Safe::deploy(trader, web3.alloy.clone()).await; + let safe = Safe::deploy(trader, web3.provider.clone()).await; let [token] = onchain .deploy_tokens_with_weth_uni_v2_pools(100_000u64.eth(), 100_000u64.eth()) @@ -155,7 +155,7 @@ async fn erc1271_gas_limit(web3: Web3) { let mut onchain = OnchainComponents::deploy(web3.clone()).await; let [solver] = onchain.make_solvers(1u64.eth()).await; - let trader = contracts::alloy::test::GasHog::Instance::deploy(web3.alloy.clone()) + let trader = contracts::alloy::test::GasHog::Instance::deploy(web3.provider.clone()) .await .unwrap(); diff --git a/crates/e2e/tests/e2e/submission.rs b/crates/e2e/tests/e2e/submission.rs index f45c449e98..562c7fd87a 100644 --- a/crates/e2e/tests/e2e/submission.rs +++ b/crates/e2e/tests/e2e/submission.rs @@ -71,7 +71,7 @@ async fn test_cancel_on_expiry(web3: Web3) { services.start_protocol(solver.clone()).await; // Disable auto-mine so we don't accidentally mine a settlement - web3.alloy + web3.provider .evm_set_automine(false) .await .expect("Must be able to disable automine"); @@ -98,7 +98,7 @@ async fn test_cancel_on_expiry(web3: Web3) { // Start tracking confirmed blocks so we can find the transaction later let block_stream = web3 - .alloy + .provider .watch_blocks() .await .expect("must be able to create blocks filter") @@ -112,11 +112,11 @@ async fn test_cancel_on_expiry(web3: Web3) { .unwrap(); // Restart mining, but with blocks that are too small to fit the settlement - web3.alloy + web3.provider .evm_set_block_gas_limit(100_000) .await .expect("Must be able to set block gas limit"); - web3.alloy + web3.provider .evm_set_interval_mining(1) .await .expect("Must be able to set mining interval"); @@ -167,7 +167,7 @@ async fn test_submit_same_sell_and_buy_token_order_without_quote(web3: Web3) { .await; // Disable auto-mine so we don't accidentally mine a settlement - web3.alloy + web3.provider .evm_set_automine(false) .await .expect("Must be able to disable automine"); @@ -196,7 +196,7 @@ async fn test_submit_same_sell_and_buy_token_order_without_quote(web3: Web3) { services.create_order(&order).await.unwrap(); // Start tracking confirmed blocks so we can find the transaction later let block_stream = web3 - .alloy + .provider .watch_blocks() .await .expect("must be able to create blocks filter") @@ -213,7 +213,7 @@ async fn test_submit_same_sell_and_buy_token_order_without_quote(web3: Web3) { .unwrap(); // Continue mining to confirm the settlement - web3.alloy + web3.provider .evm_set_automine(true) .await .expect("Must be able to enable automine"); @@ -278,7 +278,7 @@ async fn test_execute_same_sell_and_buy_token(web3: Web3) { .await; // Disable auto-mine so we don't accidentally mine a settlement - web3.alloy + web3.provider .evm_set_automine(false) .await .expect("Must be able to disable automine"); @@ -358,7 +358,7 @@ async fn test_execute_same_sell_and_buy_token(web3: Web3) { // Start tracking confirmed blocks so we can find the transaction later let block_stream = web3 - .alloy + .provider .watch_blocks() .await .expect("must be able to create blocks filter") @@ -375,7 +375,7 @@ async fn test_execute_same_sell_and_buy_token(web3: Web3) { .unwrap(); // Continue mining to confirm the settlement - web3.alloy + web3.provider .evm_set_automine(true) .await .expect("Must be able to enable automine"); @@ -411,7 +411,7 @@ async fn test_execute_same_sell_and_buy_token(web3: Web3) { async fn get_pending_tx(account: Address, web3: &Web3) -> Option { let txpool = web3 - .alloy + .provider .txpool_content() .await .expect("must be able to inspect mempool"); @@ -428,7 +428,7 @@ async fn get_confirmed_transaction( let block_hashes = block_hash_stream.next().await.unwrap(); for block_hash in block_hashes { let transaction_senders = web3 - .alloy + .provider .get_block_receipts(block_hash.into()) .await .unwrap() diff --git a/crates/e2e/tests/e2e/wrapper.rs b/crates/e2e/tests/e2e/wrapper.rs index f7653a2ed8..6bcf5c47f6 100644 --- a/crates/e2e/tests/e2e/wrapper.rs +++ b/crates/e2e/tests/e2e/wrapper.rs @@ -50,11 +50,11 @@ async fn forked_mainnet_wrapper_test(web3: Web3) { let token_weth = onchain.contracts().weth.clone(); let token_usdc = ERC20::Instance::new( address!("a0b86991c6218b36c1d19d4a2e9eb0ce3606eb48"), - web3.alloy.clone(), + web3.provider.clone(), ); // Authorize the empty wrapper as a solver - web3.alloy + web3.provider .anvil_send_impersonated_transaction_with_config( onchain .contracts() @@ -214,7 +214,7 @@ async fn forked_mainnet_wrapper_test(web3: Web3) { tracing::info!("Settlement transaction hash: {:?}", solve_tx_hash); let solve_tx = web3 - .alloy + .provider .get_transaction_by_hash(solve_tx_hash) .await .unwrap() @@ -245,7 +245,7 @@ async fn forked_mainnet_wrapper_test(web3: Web3) { }; let trace = web3 - .alloy + .provider .debug_trace_transaction(solve_tx_hash, tracing_options) .await .unwrap(); diff --git a/crates/ethrpc/src/alloy/instrumentation.rs b/crates/ethrpc/src/alloy/instrumentation.rs index d4bd4143b4..02dbfa5eaa 100644 --- a/crates/ethrpc/src/alloy/instrumentation.rs +++ b/crates/ethrpc/src/alloy/instrumentation.rs @@ -156,8 +156,8 @@ pub trait ProviderLabelingExt { impl ProviderLabelingExt for Web3 { fn labeled(&self, label: S) -> Self { - let is_local = self.alloy.client().is_local(); - let transport = self.alloy.client().transport().clone(); + let is_local = self.provider.client().is_local(); + let transport = self.provider.client().transport().clone(); let transport_with_label = LabelingLayer { label: label.to_string(), } @@ -171,7 +171,7 @@ impl ProviderLabelingExt for Web3 { .erased(); Self { - alloy, + provider: alloy, wallet: self.wallet.clone(), } } diff --git a/crates/ethrpc/src/block_stream.rs b/crates/ethrpc/src/block_stream.rs index eeff615e5d..cd1de4448b 100644 --- a/crates/ethrpc/src/block_stream.rs +++ b/crates/ethrpc/src/block_stream.rs @@ -526,7 +526,7 @@ mod tests { let alloy_provider = Web3::new_from_env(); let ws_node = std::env::var("NODE_WS_URL").unwrap().parse().unwrap(); - let receiver = current_block_ws_stream(alloy_provider.alloy, ws_node) + let receiver = current_block_ws_stream(alloy_provider.provider, ws_node) .await .unwrap(); let mut stream = into_stream(receiver); @@ -543,13 +543,13 @@ mod tests { // single block let range = RangeInclusive::try_new(5, 5).unwrap(); - let blocks = web3.alloy.blocks(range).await.unwrap(); + let blocks = web3.provider.blocks(range).await.unwrap(); assert_eq!(blocks.len(), 1); assert_eq!(blocks.last().unwrap().0, 5); // multiple blocks let range = RangeInclusive::try_new(5, 8).unwrap(); - let blocks = web3.alloy.blocks(range).await.unwrap(); + let blocks = web3.provider.blocks(range).await.unwrap(); assert_eq!(blocks.len(), 4); assert_eq!(blocks.last().unwrap().0, 8); assert_eq!(blocks.first().unwrap().0, 5); @@ -562,7 +562,7 @@ mod tests { current_block_number, ) .unwrap(); - let blocks = web3.alloy.blocks(range).await.unwrap(); + let blocks = web3.provider.blocks(range).await.unwrap(); assert_eq!(blocks.len(), 6); assert_eq!(blocks.last().unwrap().0, 5); assert_eq!(blocks.first().unwrap().0, 0); diff --git a/crates/ethrpc/src/lib.rs b/crates/ethrpc/src/lib.rs index b84d8a7fb4..c11eac9386 100644 --- a/crates/ethrpc/src/lib.rs +++ b/crates/ethrpc/src/lib.rs @@ -17,7 +17,7 @@ pub type AlloyProvider = DynProvider; /// having to everything at once. #[derive(Debug, Clone)] pub struct Web3 { - pub alloy: AlloyProvider, + pub provider: AlloyProvider, pub wallet: MutWallet, } @@ -30,7 +30,10 @@ impl Web3 { pub fn new_from_url(url: &str) -> Self { let (alloy, wallet) = crate::alloy::provider(url, Default::default(), None); - Self { alloy, wallet } + Self { + provider: alloy, + wallet, + } } } @@ -69,5 +72,8 @@ pub fn web3(args: Config, url: &Url, label: Option<&str>) -> Web3 { _ => alloy::provider(url.as_str(), args, label), }; - Web3 { alloy, wallet } + Web3 { + provider: alloy, + wallet, + } } diff --git a/crates/ethrpc/src/mock.rs b/crates/ethrpc/src/mock.rs index 2ed9f586cb..53dd3aeb3f 100644 --- a/crates/ethrpc/src/mock.rs +++ b/crates/ethrpc/src/mock.rs @@ -11,7 +11,7 @@ impl Web3 { // this will not behave like the original mock transport but it's only used // in one place so let's keep this for now and fix it when we switch to // alloy in the 1 place that uses the mock provider. - alloy: ProviderBuilder::new() + provider: ProviderBuilder::new() .connect_mocked_client(asserter) .erased(), wallet: MutWallet::default(), @@ -21,7 +21,7 @@ impl Web3 { pub fn web3() -> Web3 { Web3 { - alloy: ProviderBuilder::new() + provider: ProviderBuilder::new() .connect_mocked_client(Asserter::new()) .erased(), wallet: MutWallet::default(), diff --git a/crates/orderbook/src/run.rs b/crates/orderbook/src/run.rs index 1271d956e6..c17bcc5bfd 100644 --- a/crates/orderbook/src/run.rs +++ b/crates/orderbook/src/run.rs @@ -86,7 +86,7 @@ pub async fn run(args: Arguments) { .map(|node_url| shared::ethrpc::web3(&args.shared.ethrpc, node_url, "simulation")); let chain_id = web3 - .alloy + .provider .get_chain_id() .await .expect("Could not get chainId"); @@ -98,14 +98,14 @@ pub async fn run(args: Arguments) { } let settlement_contract = match args.shared.settlement_contract_address { - Some(address) => GPv2Settlement::Instance::new(address, web3.alloy.clone()), - None => GPv2Settlement::Instance::deployed(&web3.alloy) + Some(address) => GPv2Settlement::Instance::new(address, web3.provider.clone()), + None => GPv2Settlement::Instance::deployed(&web3.provider) .await .expect("load settlement contract"), }; let balances_contract = match args.shared.balances_contract_address { - Some(address) => Balances::Instance::new(address, web3.alloy.clone()), - None => Balances::Instance::deployed(&web3.alloy.clone()) + Some(address) => Balances::Instance::new(address, web3.provider.clone()), + None => Balances::Instance::deployed(&web3.provider.clone()) .await .expect("load balances contract"), }; @@ -116,15 +116,15 @@ pub async fn run(args: Arguments) { .expect("Couldn't get vault relayer address"); let signatures_contract = match args.shared.signatures_contract_address { Some(address) => { - contracts::alloy::support::Signatures::Instance::new(address, web3.alloy.clone()) + contracts::alloy::support::Signatures::Instance::new(address, web3.provider.clone()) } - None => contracts::alloy::support::Signatures::Instance::deployed(&web3.alloy) + None => contracts::alloy::support::Signatures::Instance::deployed(&web3.provider) .await .expect("load signatures contract"), }; let native_token = match args.shared.native_token_address { - Some(address) => WETH9::Instance::new(address, web3.alloy.clone()), - None => WETH9::Instance::deployed(&web3.alloy) + Some(address) => WETH9::Instance::new(address, web3.provider.clone()), + None => WETH9::Instance::deployed(&web3.provider) .await .expect("load native token contract"), }; @@ -156,11 +156,11 @@ pub async fn run(args: Arguments) { } }); let vault = - vault_address.map(|address| BalancerV2Vault::Instance::new(address, web3.alloy.clone())); + vault_address.map(|address| BalancerV2Vault::Instance::new(address, web3.provider.clone())); let hooks_contract = match args.shared.hooks_contract_address { - Some(address) => HooksTrampoline::Instance::new(address, web3.alloy.clone()), - None => HooksTrampoline::Instance::deployed(&web3.alloy) + Some(address) => HooksTrampoline::Instance::new(address, web3.provider.clone()), + None => HooksTrampoline::Instance::deployed(&web3.provider) .await .expect("load hooks trampoline contract"), }; @@ -234,7 +234,7 @@ pub async fn run(args: Arguments) { allowed_tokens.push(BUY_ETH_ADDRESS); let unsupported_tokens = args.unsupported_tokens.clone(); - let uniswapv3_factory = IUniswapV3Factory::Instance::deployed(&web3.alloy) + let uniswapv3_factory = IUniswapV3Factory::Instance::deployed(&web3.provider) .await .inspect_err(|err| tracing::warn!(%err, "error while fetching IUniswapV3Factory instance")) .ok(); @@ -278,7 +278,7 @@ pub async fn run(args: Arguments) { let current_block_stream = args .shared .current_block - .stream(args.shared.node_url.clone(), web3.alloy.clone()) + .stream(args.shared.node_url.clone(), web3.provider.clone()) .await .unwrap(); @@ -392,7 +392,7 @@ pub async fn run(args: Arguments) { let fast_quoter = create_quoter(fast_price_estimator, QuoteVerificationMode::Unverified); let app_data_validator = Validator::new(args.app_data_size_limit); - let chainalysis_oracle = ChainalysisOracle::Instance::deployed(&web3.alloy) + let chainalysis_oracle = ChainalysisOracle::Instance::deployed(&web3.provider) .await .ok(); let order_validator = Arc::new(OrderValidator::new( diff --git a/crates/refunder/src/lib.rs b/crates/refunder/src/lib.rs index 92815ec760..94b4b9d49a 100644 --- a/crates/refunder/src/lib.rs +++ b/crates/refunder/src/lib.rs @@ -71,7 +71,7 @@ pub async fn run(args: arguments::Arguments) { if let Some(expected_chain_id) = args.chain_id { let chain_id = web3 - .alloy + .provider .get_chain_id() .await .expect("Could not get chainId"); diff --git a/crates/refunder/src/refund_service.rs b/crates/refunder/src/refund_service.rs index ca1e087589..0ef475d1a0 100644 --- a/crates/refunder/src/refund_service.rs +++ b/crates/refunder/src/refund_service.rs @@ -247,7 +247,7 @@ impl RefundService { let database = Postgres::new(db, lookback_time); // Chain reader - let chain = AlloyChain::new(web3.alloy.clone(), ethflow_addresses); + let chain = AlloyChain::new(web3.provider.clone(), ethflow_addresses); // Signer/wallet configuration let signer_address = signer.address(); @@ -255,7 +255,9 @@ impl RefundService { // Transaction submitter let gas_estimator = Box::new( - shared::gas_price_estimation::eth_node::NodeGasPriceEstimator::new(web3.alloy.clone()), + shared::gas_price_estimation::eth_node::NodeGasPriceEstimator::new( + web3.provider.clone(), + ), ); let submitter = Submitter { web3, diff --git a/crates/refunder/src/submitter.rs b/crates/refunder/src/submitter.rs index 6d6075d304..7930af9ba6 100644 --- a/crates/refunder/src/submitter.rs +++ b/crates/refunder/src/submitter.rs @@ -63,7 +63,7 @@ impl ChainWrite for Submitter { self.nonce_of_last_submission = Some(nonce); let ethflow_contract = - CoWSwapEthFlow::Instance::new(ethflow_contract, self.web3.alloy.clone()); + CoWSwapEthFlow::Instance::new(ethflow_contract, self.web3.provider.clone()); let tx_result = ethflow_contract .invalidateOrdersIgnoringNotAllowed(encoded_ethflow_orders) // Gas conversions are lossy but technically the should not have decimal points even though they're floats @@ -94,7 +94,7 @@ impl Submitter { // this command returns the tx count ever mined at the latest block // Mempool tx are not considered. self.web3 - .alloy + .provider .get_transaction_count(self.signer_address) .await .with_context(|| { diff --git a/crates/shared/src/account_balances/simulation.rs b/crates/shared/src/account_balances/simulation.rs index b497fae6c5..6842373d01 100644 --- a/crates/shared/src/account_balances/simulation.rs +++ b/crates/shared/src/account_balances/simulation.rs @@ -79,7 +79,7 @@ impl Balances { std::cmp::min(balance, allowance) } SellTokenSource::External => { - let vault = BalancerV2Vault::new(self.vault(), &self.web3.alloy); + let vault = BalancerV2Vault::new(self.vault(), &self.web3.provider); let balance = token.balanceOf(query.owner); let approved = vault.hasApprovedRelayer(query.owner, self.vault_relayer()); let allowance = token.allowance(query.owner, self.vault()); @@ -94,7 +94,7 @@ impl Balances { } } SellTokenSource::Internal => { - let vault = BalancerV2Vault::new(self.vault(), &self.web3.alloy); + let vault = BalancerV2Vault::new(self.vault(), &self.web3.provider); let balance = vault.getInternalBalance(query.owner, vec![query.token]); let approved = vault.hasApprovedRelayer(query.owner, self.vault_relayer()); let (balance, approved) = futures::try_join!( @@ -120,7 +120,7 @@ impl BalanceFetching for Balances { .iter() .map(|query| async { if query.interactions.is_empty() { - let token = ERC20::Instance::new(query.token, self.web3.alloy.clone()); + let token = ERC20::Instance::new(query.token, self.web3.provider.clone()); self.tradable_balance_simple(query, &token).await } else { self.tradable_balance_simulated(query).await @@ -181,11 +181,11 @@ mod tests { let web3 = Web3::new_from_env(); let settlement = GPv2Settlement::GPv2Settlement::new( alloy::primitives::address!("0x9008d19f58aabd9ed0d60971565aa8510560ab41"), - web3.alloy.clone(), + web3.provider.clone(), ); let balances = contracts::alloy::support::Balances::Instance::new( address!("3e8C6De9510e7ECad902D005DE3Ab52f35cF4f1b"), - web3.alloy.clone(), + web3.provider.clone(), ); let balances = Balances::new( &web3, diff --git a/crates/shared/src/bad_token/token_owner_finder/mod.rs b/crates/shared/src/bad_token/token_owner_finder/mod.rs index e12b41de61..12319b1778 100644 --- a/crates/shared/src/bad_token/token_owner_finder/mod.rs +++ b/crates/shared/src/bad_token/token_owner_finder/mod.rs @@ -425,7 +425,7 @@ impl TokenOwnerFinding for TokenOwnerFinder { token: Address, min_balance: U256, ) -> Result> { - let instance = ERC20::Instance::new(token, self.web3.alloy.clone()); + let instance = ERC20::Instance::new(token, self.web3.provider.clone()); // We use a stream with ready_chunks so that we can start with the addresses of // fast TokenOwnerFinding implementations first without having to wait diff --git a/crates/shared/src/bad_token/trace_call.rs b/crates/shared/src/bad_token/trace_call.rs index d5821b46b5..bc3cf34c41 100644 --- a/crates/shared/src/bad_token/trace_call.rs +++ b/crates/shared/src/bad_token/trace_call.rs @@ -550,7 +550,7 @@ mod tests { // observe::tracing::initialize("orderbook::bad_token=debug, // shared::transport=debug", tracing::level_filters::LevelFilter::OFF); let web3 = Web3::new_from_env(); - let version = web3.alloy.get_chain_id().await.unwrap().to_string(); + let version = web3.provider.get_chain_id().await.unwrap().to_string(); let base_tokens = &[ testlib::tokens::WETH, @@ -723,7 +723,7 @@ mod tests { // the callback that I didn't follow in the SC code. // - 0x4f9254c83eb525f9fcf346490bbb3ed28a81c667 Not sure why deny listed. - let settlement = GPv2Settlement::Instance::deployed(&web3.alloy) + let settlement = GPv2Settlement::Instance::deployed(&web3.provider) .await .unwrap(); let finder = Arc::new(TokenOwnerFinder { @@ -755,13 +755,13 @@ mod tests { base_tokens: base_tokens.to_vec(), }), Arc::new(BalancerVaultFinder( - BalancerV2Vault::Instance::deployed(&web3.alloy) + BalancerV2Vault::Instance::deployed(&web3.provider) .await .unwrap(), )), Arc::new( UniswapV3Finder::new( - IUniswapV3Factory::Instance::deployed(&web3.alloy) + IUniswapV3Factory::Instance::deployed(&web3.provider) .await .unwrap(), base_tokens.to_vec(), @@ -800,10 +800,10 @@ mod tests { observe::tracing::initialize(&observe::Config::default().with_env_filter("shared=debug")); let web3 = Web3::new_from_env(); let base_tokens = vec![testlib::tokens::WETH]; - let settlement = GPv2Settlement::Instance::deployed(&web3.alloy) + let settlement = GPv2Settlement::Instance::deployed(&web3.provider) .await .unwrap(); - let factory = IUniswapV3Factory::Instance::deployed(&web3.alloy) + let factory = IUniswapV3Factory::Instance::deployed(&web3.provider) .await .unwrap(); let univ3 = Arc::new( @@ -931,7 +931,7 @@ mod tests { let web3 = Web3::new_from_env(); - let settlement = GPv2Settlement::Instance::deployed(&web3.alloy) + let settlement = GPv2Settlement::Instance::deployed(&web3.provider) .await .unwrap(); let finder = Arc::new(TokenOwnerFinder { diff --git a/crates/shared/src/code_fetching.rs b/crates/shared/src/code_fetching.rs index e2d1f5a0fd..a1e78da06d 100644 --- a/crates/shared/src/code_fetching.rs +++ b/crates/shared/src/code_fetching.rs @@ -27,7 +27,7 @@ pub trait CodeFetching: Send + Sync + 'static { impl CodeFetching for Web3 { #[instrument(skip_all)] async fn code(&self, address: Address) -> Result { - Ok(self.alloy.get_code_at(address).await?) + Ok(self.provider.get_code_at(address).await?) } #[instrument(skip_all)] diff --git a/crates/shared/src/event_handling.rs b/crates/shared/src/event_handling.rs index 6adfc60f7c..3c785234ce 100644 --- a/crates/shared/src/event_handling.rs +++ b/crates/shared/src/event_handling.rs @@ -840,7 +840,7 @@ mod tests { #[ignore] async fn past_events_by_block_hashes_test() { let web3 = Web3::new_from_env(); - let contract = GPv2Settlement::Instance::deployed(&web3.alloy) + let contract = GPv2Settlement::Instance::deployed(&web3.provider) .await .unwrap(); let storage = EventStorage { events: vec![] }; @@ -865,7 +865,7 @@ mod tests { ), ]; let event_handler = - EventHandler::new(Arc::new(web3.alloy.clone()), contract, storage, None); + EventHandler::new(Arc::new(web3.provider.clone()), contract, storage, None); let (replacement_blocks, _) = event_handler.past_events_by_block_hashes(&blocks).await; assert_eq!(replacement_blocks, blocks[..2]); } @@ -874,24 +874,28 @@ mod tests { #[ignore] async fn update_events_test() { let web3 = Web3::new_from_env(); - let contract = GPv2Settlement::Instance::deployed(&web3.alloy) + let contract = GPv2Settlement::Instance::deployed(&web3.provider) .await .unwrap(); let storage = EventStorage { events: vec![] }; - let current_block = web3.alloy.get_block_number().await.unwrap(); + let current_block = web3.provider.get_block_number().await.unwrap(); const NUMBER_OF_BLOCKS: u64 = 300; //get block in history (current_block - NUMBER_OF_BLOCKS) let block = web3 - .alloy + .provider .get_block_by_number(current_block.saturating_sub(NUMBER_OF_BLOCKS).into()) .await .unwrap() .unwrap(); let block = (block.number(), block.hash()); - let mut event_handler = - EventHandler::new(Arc::new(web3.alloy.clone()), contract, storage, Some(block)); + let mut event_handler = EventHandler::new( + Arc::new(web3.provider.clone()), + contract, + storage, + Some(block), + ); let _result = event_handler.update_events().await; // add logs to event handler and observe } @@ -901,25 +905,29 @@ mod tests { async fn multiple_new_blocks_but_no_reorg_test() { tracing_subscriber::fmt::init(); let web3 = Web3::new_from_env(); - let contract = GPv2Settlement::Instance::deployed(&web3.alloy) + let contract = GPv2Settlement::Instance::deployed(&web3.provider) .await .unwrap(); let storage: EventStorage = EventStorage { events: vec![] }; - let current_block = web3.alloy.get_block_number().await.unwrap(); + let current_block = web3.provider.get_block_number().await.unwrap(); const NUMBER_OF_BLOCKS: u64 = 300; //get block in history (current_block - NUMBER_OF_BLOCKS) let block = web3 - .alloy + .provider .get_block_by_number(current_block.saturating_sub(NUMBER_OF_BLOCKS).into()) .await .unwrap() .unwrap(); let block = (block.number(), block.hash()); - let mut event_handler = - EventHandler::new(Arc::new(web3.alloy.clone()), contract, storage, Some(block)); + let mut event_handler = EventHandler::new( + Arc::new(web3.provider.clone()), + contract, + storage, + Some(block), + ); let _result = event_handler.update_events().await; tracing::info!("wait for at least 2 blocks to see if we hit the new code path"); tokio::time::sleep(tokio::time::Duration::from_millis(26_000)).await; @@ -930,11 +938,11 @@ mod tests { #[ignore] async fn optional_block_skipping() { let web3 = Web3::new_from_env(); - let contract = GPv2Settlement::Instance::deployed(&web3.alloy) + let contract = GPv2Settlement::Instance::deployed(&web3.provider) .await .unwrap(); - let current_block = web3.alloy.get_block_number().await.unwrap(); + let current_block = web3.provider.get_block_number().await.unwrap(); // In this test we query for events multiple times. Newer events might be // included each time we query again for the same events, but we want to // disregard them. @@ -956,13 +964,13 @@ mod tests { let storage_empty = EventStorage { events: vec![] }; let event_start = block_number_to_block_number_hash( - &web3.alloy, + &web3.provider, BlockNumberOrTag::Number(current_block - RANGE_SIZE), ) .await .unwrap(); let mut base_event_handler = EventHandler::new( - Arc::new(web3.alloy.clone()), + Arc::new(web3.provider.clone()), contract.clone(), storage_empty, Some(event_start), @@ -978,13 +986,13 @@ mod tests { // date but using `new_skip_blocks_before` if there are no events let storage_empty = EventStorage { events: vec![] }; let event_start = block_number_to_block_number_hash( - &web3.alloy, + &web3.provider, BlockNumberOrTag::Number(current_block - RANGE_SIZE), ) .await .unwrap(); let mut base_block_skip_event_handler = EventHandler::new_skip_blocks_before( - Arc::new(web3.alloy.clone()), + Arc::new(web3.provider.clone()), contract.clone(), storage_empty, event_start, @@ -1022,7 +1030,7 @@ mod tests { events: vec![last_event.clone()], }; let mut nonempty_event_handler = EventHandler::new_skip_blocks_before( - Arc::new(web3.alloy.clone()), + Arc::new(web3.provider.clone()), contract, storage_nonempty, // Same event start as for the two previous event handlers. The test checks that this diff --git a/crates/shared/src/gas_price_estimation/mod.rs b/crates/shared/src/gas_price_estimation/mod.rs index b7755cc1fd..040eaf41d6 100644 --- a/crates/shared/src/gas_price_estimation/mod.rs +++ b/crates/shared/src/gas_price_estimation/mod.rs @@ -76,7 +76,7 @@ pub async fn create_priority_estimator( web3: &Web3, estimator_types: &[GasEstimatorType], ) -> Result> { - let network_id = web3.alloy.get_chain_id().await?.to_string(); + let network_id = web3.provider.get_chain_id().await?.to_string(); let mut estimators = Vec::>::new(); for estimator_type in estimator_types { @@ -86,15 +86,15 @@ pub async fn create_priority_estimator( estimators.push(Box::new(DriverGasEstimator::new( http_factory.create(), url.clone(), - web3.alloy.clone(), + web3.provider.clone(), ))); } GasEstimatorType::Web3 => { - estimators.push(Box::new(NodeGasPriceEstimator::new(web3.alloy.clone()))) + estimators.push(Box::new(NodeGasPriceEstimator::new(web3.provider.clone()))) } GasEstimatorType::Alloy => { let estimator = ConfigurableGasPriceEstimator::new( - web3.alloy.clone(), + web3.provider.clone(), EstimatorConfig { past_blocks: default_past_blocks(), reward_percentile: default_reward_percentile(), diff --git a/crates/shared/src/order_validation.rs b/crates/shared/src/order_validation.rs index 8460a501b3..1167dd8430 100644 --- a/crates/shared/src/order_validation.rs +++ b/crates/shared/src/order_validation.rs @@ -1056,7 +1056,7 @@ mod tests { #[tokio::test] async fn pre_validate_err() { - let native_token = WETH9::Instance::new([0xef; 20].into(), ethrpc::mock::web3().alloy); + let native_token = WETH9::Instance::new([0xef; 20].into(), ethrpc::mock::web3().provider); let validity_configuration = OrderValidPeriodConfiguration { min: Duration::from_secs(1), max_market: Duration::from_secs(100), @@ -1203,7 +1203,7 @@ mod tests { #[tokio::test] async fn pre_validate_ok() { let native_token = - WETH9::Instance::new(Address::repeat_byte(0xef), ethrpc::mock::web3().alloy); + WETH9::Instance::new(Address::repeat_byte(0xef), ethrpc::mock::web3().provider); let validity_configuration = OrderValidPeriodConfiguration { min: Duration::from_secs(1), max_market: Duration::from_secs(100), @@ -1294,7 +1294,7 @@ mod tests { #[tokio::test] async fn pre_validate_same_tokens_allow_sell() { let native_token = - WETH9::Instance::new(Address::repeat_byte(0xef), ethrpc::mock::web3().alloy); + WETH9::Instance::new(Address::repeat_byte(0xef), ethrpc::mock::web3().provider); let validity_configuration = OrderValidPeriodConfiguration { min: Duration::from_secs(1), max_market: Duration::from_secs(100), @@ -1402,7 +1402,7 @@ mod tests { let mut limit_order_counter = MockLimitOrderCounting::new(); limit_order_counter.expect_count().returning(|_| Ok(0u64)); - let native_token = WETH9::Instance::new([0xef; 20].into(), ethrpc::mock::web3().alloy); + let native_token = WETH9::Instance::new([0xef; 20].into(), ethrpc::mock::web3().provider); let validator = OrderValidator::new( native_token, Arc::new(order_validation::banned::Users::none()), @@ -1614,7 +1614,7 @@ mod tests { .expect_count() .returning(|_| Ok(MAX_LIMIT_ORDERS_PER_USER)); - let native_token = WETH9::Instance::new([0xef; 20].into(), ethrpc::mock::web3().alloy); + let native_token = WETH9::Instance::new([0xef; 20].into(), ethrpc::mock::web3().provider); let validator = OrderValidator::new( native_token, Arc::new(order_validation::banned::Users::none()), @@ -1695,7 +1695,7 @@ mod tests { limit_order_counter .expect_count() .returning(|_| Ok(MAX_LIMIT_ORDERS_PER_USER)); - let native_token = WETH9::Instance::new([0xef; 20].into(), ethrpc::mock::web3().alloy); + let native_token = WETH9::Instance::new([0xef; 20].into(), ethrpc::mock::web3().provider); let validator = OrderValidator::new( native_token, Arc::new(order_validation::banned::Users::none()), @@ -1760,7 +1760,7 @@ mod tests { .returning(|_, _| Ok(())); let mut limit_order_counter = MockLimitOrderCounting::new(); limit_order_counter.expect_count().returning(|_| Ok(0u64)); - let native_token = WETH9::Instance::new([0xef; 20].into(), ethrpc::mock::web3().alloy); + let native_token = WETH9::Instance::new([0xef; 20].into(), ethrpc::mock::web3().provider); let validator = OrderValidator::new( native_token, Arc::new(order_validation::banned::Users::none()), @@ -1818,7 +1818,7 @@ mod tests { .returning(|_, _| Ok(())); let mut limit_order_counter = MockLimitOrderCounting::new(); limit_order_counter.expect_count().returning(|_| Ok(0u64)); - let native_token = WETH9::Instance::new([0xef; 20].into(), ethrpc::mock::web3().alloy); + let native_token = WETH9::Instance::new([0xef; 20].into(), ethrpc::mock::web3().provider); let validator = OrderValidator::new( native_token, Arc::new(order_validation::banned::Users::none()), @@ -1881,7 +1881,7 @@ mod tests { let mut limit_order_counter = MockLimitOrderCounting::new(); limit_order_counter.expect_count().returning(|_| Ok(0u64)); - let native_token = WETH9::Instance::new([0xef; 20].into(), ethrpc::mock::web3().alloy); + let native_token = WETH9::Instance::new([0xef; 20].into(), ethrpc::mock::web3().provider); let validator = OrderValidator::new( native_token, Arc::new(order_validation::banned::Users::none()), @@ -1946,7 +1946,7 @@ mod tests { .returning(|_, _| Err(TransferSimulationError::InsufficientBalance)); let mut limit_order_counter = MockLimitOrderCounting::new(); limit_order_counter.expect_count().returning(|_| Ok(0u64)); - let native_token = WETH9::Instance::new([0xef; 20].into(), ethrpc::mock::web3().alloy); + let native_token = WETH9::Instance::new([0xef; 20].into(), ethrpc::mock::web3().provider); let validator = OrderValidator::new( native_token, Arc::new(order_validation::banned::Users::none()), @@ -2010,7 +2010,7 @@ mod tests { .returning(|_| Err(SignatureValidationError::Invalid)); let mut limit_order_counter = MockLimitOrderCounting::new(); limit_order_counter.expect_count().returning(|_| Ok(0u64)); - let native_token = WETH9::Instance::new([0xef; 20].into(), ethrpc::mock::web3().alloy); + let native_token = WETH9::Instance::new([0xef; 20].into(), ethrpc::mock::web3().provider); let validator = OrderValidator::new( native_token, Arc::new(order_validation::banned::Users::none()), @@ -2080,7 +2080,8 @@ mod tests { .returning(move |_, _| Err(create_error())); let mut limit_order_counter = MockLimitOrderCounting::new(); limit_order_counter.expect_count().returning(|_| Ok(0u64)); - let native_token = WETH9::Instance::new([0xef; 20].into(), ethrpc::mock::web3().alloy); + let native_token = + WETH9::Instance::new([0xef; 20].into(), ethrpc::mock::web3().provider); let validator = OrderValidator::new( native_token, Arc::new(order_validation::banned::Users::none()), @@ -2175,7 +2176,7 @@ mod tests { .returning(|_, _| Err(TransferSimulationError::InsufficientBalance)); let mut limit_order_counter = MockLimitOrderCounting::new(); limit_order_counter.expect_count().returning(|_| Ok(0u64)); - let native_token = WETH9::Instance::new([0xef; 20].into(), ethrpc::mock::web3().alloy); + let native_token = WETH9::Instance::new([0xef; 20].into(), ethrpc::mock::web3().provider); let validator = OrderValidator::new( native_token, Arc::new(order_validation::banned::Users::none()), @@ -2586,7 +2587,7 @@ mod tests { .expect_validate_signature_and_get_additional_gas() .returning(|_| Ok(default_verification_gas_limit())); let mut limit_order_counter = MockLimitOrderCounting::new(); - let native_token = WETH9::Instance::new([0xef; 20].into(), ethrpc::mock::web3().alloy); + let native_token = WETH9::Instance::new([0xef; 20].into(), ethrpc::mock::web3().provider); limit_order_counter.expect_count().returning(|_| Ok(0u64)); let validator = OrderValidator::new( native_token, diff --git a/crates/shared/src/price_estimation/trade_verifier/balance_overrides/detector.rs b/crates/shared/src/price_estimation/trade_verifier/balance_overrides/detector.rs index 481ccdaae2..909c0ff107 100644 --- a/crates/shared/src/price_estimation/trade_verifier/balance_overrides/detector.rs +++ b/crates/shared/src/price_estimation/trade_verifier/balance_overrides/detector.rs @@ -134,7 +134,7 @@ impl Detector { let trace = self .web3 - .alloy + .provider .debug_trace_call( call_request, BlockId::latest(), @@ -293,7 +293,7 @@ impl Detector { let overrides = strategy.state_override(&holder, &test_balance); // Call balanceOf with the override - let token_contract = ERC20::Instance::new(token, self.web3.alloy.clone()); + let token_contract = ERC20::Instance::new(token, self.web3.provider.clone()); let balance = token_contract .balanceOf(holder) .state(overrides) diff --git a/crates/shared/src/price_estimation/trade_verifier/mod.rs b/crates/shared/src/price_estimation/trade_verifier/mod.rs index 4f5cee74cc..85b42835f7 100644 --- a/crates/shared/src/price_estimation/trade_verifier/mod.rs +++ b/crates/shared/src/price_estimation/trade_verifier/mod.rs @@ -98,7 +98,7 @@ impl TradeVerifier { tokens_without_verification: HashSet
, ) -> Result { let settlement_contract = - GPv2Settlement::GPv2Settlement::new(settlement, web3.alloy.clone()); + GPv2Settlement::GPv2Settlement::new(settlement, web3.provider.clone()); let domain_separator = DomainSeparator(settlement_contract.domainSeparator().call().await?.0); Ok(Self { @@ -168,7 +168,7 @@ impl TradeVerifier { let settle_call = legacy_settlement_to_alloy(settlement).abi_encode(); let block = *self.block_stream.borrow(); - let solver = Solver::Instance::new(solver_address, self.web3.alloy.clone()); + let solver = Solver::Instance::new(solver_address, self.web3.provider.clone()); let swap_simulation = solver.swap( *self.settlement.address(), tokens.clone(), diff --git a/crates/shared/src/signature_validator/simulation.rs b/crates/shared/src/signature_validator/simulation.rs index d7d3adfa99..b284464a52 100644 --- a/crates/shared/src/signature_validator/simulation.rs +++ b/crates/shared/src/signature_validator/simulation.rs @@ -63,7 +63,7 @@ impl Validator { // change), the order's validity can be directly determined by whether // the signature matches the expected hash of the order data, checked // with isValidSignature method called on the owner's contract - let contract = ERC1271SignatureValidator::new(check.signer, &self.web3.alloy); + let contract = ERC1271SignatureValidator::new(check.signer, &self.web3.provider); let magic_bytes = contract .isValidSignature(check.hash.into(), check.signature.clone().into()) .call() diff --git a/crates/shared/src/sources/balancer_v2/pool_fetching/mod.rs b/crates/shared/src/sources/balancer_v2/pool_fetching/mod.rs index 5cb30bfc92..6c99612470 100644 --- a/crates/shared/src/sources/balancer_v2/pool_fetching/mod.rs +++ b/crates/shared/src/sources/balancer_v2/pool_fetching/mod.rs @@ -329,7 +329,7 @@ async fn create_aggregate_pool_fetcher( let registered_pools = pool_initializer.initialize_pools().await?; let fetched_block_number = registered_pools.fetched_block_number; let fetched_block_hash = web3 - .alloy + .provider .get_block_by_number(BlockNumberOrTag::Number(fetched_block_number)) .await? .context("failed to get block by block number")? diff --git a/crates/shared/src/sources/balancer_v2/pools/common.rs b/crates/shared/src/sources/balancer_v2/pools/common.rs index bd84d77f5f..d1dcae8438 100644 --- a/crates/shared/src/sources/balancer_v2/pools/common.rs +++ b/crates/shared/src/sources/balancer_v2/pools/common.rs @@ -872,7 +872,7 @@ mod tests { let pool_info_fetcher = PoolInfoFetcher { vault: BalancerV2Vault::Instance::new( Address::repeat_byte(0xba), - ethrpc::mock::web3().alloy, + ethrpc::mock::web3().provider, ), factory: MockFactoryIndexing::new(), token_infos: Arc::new(token_infos), @@ -898,7 +898,7 @@ mod tests { let pool_info_fetcher = PoolInfoFetcher { vault: BalancerV2Vault::Instance::new( Address::repeat_byte(0xba), - ethrpc::mock::web3().alloy, + ethrpc::mock::web3().provider, ), factory: MockFactoryIndexing::new(), token_infos: Arc::new(token_infos), diff --git a/crates/shared/src/sources/balancer_v2/pools/weighted.rs b/crates/shared/src/sources/balancer_v2/pools/weighted.rs index 56a7e16734..fc0f6debc5 100644 --- a/crates/shared/src/sources/balancer_v2/pools/weighted.rs +++ b/crates/shared/src/sources/balancer_v2/pools/weighted.rs @@ -282,7 +282,7 @@ mod tests { let web3 = Web3::with_asserter(asserter); let factory = - BalancerV2WeightedPoolFactory::Instance::new(Address::default(), web3.alloy.clone()); + BalancerV2WeightedPoolFactory::Instance::new(Address::default(), web3.provider.clone()); let pool_info = PoolInfo { common: common::PoolInfo { id: B256::repeat_byte(0x90), @@ -300,7 +300,7 @@ mod tests { }; let pool_state = { - let block = web3.alloy.get_block_number().await.unwrap(); + let block = web3.provider.get_block_number().await.unwrap(); let pool_state = factory.fetch_pool_state( &pool_info, diff --git a/crates/shared/src/sources/swapr.rs b/crates/shared/src/sources/swapr.rs index 045c890aba..d4a3062f97 100644 --- a/crates/shared/src/sources/swapr.rs +++ b/crates/shared/src/sources/swapr.rs @@ -26,7 +26,8 @@ impl PoolReading for SwaprPoolReader { let fetch_pool = self.0.read_state(pair, block); async move { - let pair_contract = ISwaprPair::Instance::new(pair_address, self.0.web3.alloy.clone()); + let pair_contract = + ISwaprPair::Instance::new(pair_address, self.0.web3.provider.clone()); let fetch_fee = pair_contract.swapFee().block(block); let (pool, fee) = futures::join!(fetch_pool, fetch_fee.call().into_future()); @@ -111,7 +112,7 @@ mod tests { #[ignore] async fn fetch_swapr_pool() { let web3 = Web3::new_from_env(); - let version = web3.alloy.get_chain_id().await.unwrap().to_string(); + let version = web3.provider.get_chain_id().await.unwrap().to_string(); let pool_fetcher = uniswap_v2::UniV2BaselineSourceParameters::from_baseline_source( BaselineSource::Swapr, &version, diff --git a/crates/shared/src/sources/uniswap_v2/mod.rs b/crates/shared/src/sources/uniswap_v2/mod.rs index d23a4d1d6f..aee970fc35 100644 --- a/crates/shared/src/sources/uniswap_v2/mod.rs +++ b/crates/shared/src/sources/uniswap_v2/mod.rs @@ -105,7 +105,7 @@ impl UniV2BaselineSourceParameters { pub async fn into_source(&self, web3: &Web3) -> Result { let web3 = web3.labeled("uniswapV2"); let router = - contracts::alloy::IUniswapLikeRouter::Instance::new(self.router, web3.alloy.clone()); + contracts::alloy::IUniswapLikeRouter::Instance::new(self.router, web3.provider.clone()); let factory = router.factory().call().await.context("factory")?; let pair_provider = pair_provider::PairProvider { factory, @@ -207,7 +207,7 @@ mod tests { token1: Address, expected_pool_address: Address, ) { - let version_ = web3.alloy.get_chain_id().await.unwrap().to_string(); + let version_ = web3.provider.get_chain_id().await.unwrap().to_string(); assert_eq!(version_, version, "wrong node for test"); let source = UniV2BaselineSourceParameters::from_baseline_source(source, version) .unwrap() @@ -223,7 +223,7 @@ mod tests { #[ignore] async fn baseline_mainnet() { let web3 = ethrpc::Web3::new_from_env(); - let version = web3.alloy.get_chain_id().await.unwrap().to_string(); + let version = web3.provider.get_chain_id().await.unwrap().to_string(); assert_eq!(version, "1", "test must be run with mainnet node"); let test = |source, token0, token1, expected| { test_baseline_source(&web3, "1", source, token0, token1, expected) @@ -256,7 +256,7 @@ mod tests { #[ignore] async fn baseline_sepolia() { let web3 = ethrpc::Web3::new_from_env(); - let version = web3.alloy.get_chain_id().await.unwrap().to_string(); + let version = web3.provider.get_chain_id().await.unwrap().to_string(); assert_eq!(version, "11155111", "test must be run with mainnet node"); let test = |source, token0, token1, expected| { test_baseline_source(&web3, "11155111", source, token0, token1, expected) @@ -276,7 +276,7 @@ mod tests { #[ignore] async fn baseline_xdai() { let web3 = ethrpc::Web3::new_from_env(); - let version = web3.alloy.get_chain_id().await.unwrap().to_string(); + let version = web3.provider.get_chain_id().await.unwrap().to_string(); assert_eq!(version, "100", "test must be run with xdai node"); let test = |source, token0, token1, expected| { test_baseline_source(&web3, "100", source, token0, token1, expected) @@ -305,7 +305,7 @@ mod tests { #[ignore] async fn fetch_baoswap_pool() { let web3 = Web3::new_from_env(); - let version = web3.alloy.get_chain_id().await.unwrap().to_string(); + let version = web3.provider.get_chain_id().await.unwrap().to_string(); let pool_fetcher = UniV2BaselineSourceParameters::from_baseline_source(BaselineSource::Baoswap, &version) .unwrap() @@ -341,7 +341,7 @@ mod tests { #[ignore] async fn fetch_honeyswap_pool() { let web3 = Web3::new_from_env(); - let version = web3.alloy.get_chain_id().await.unwrap().to_string(); + let version = web3.provider.get_chain_id().await.unwrap().to_string(); let pool_fetcher = UniV2BaselineSourceParameters::from_baseline_source( BaselineSource::Honeyswap, &version, diff --git a/crates/shared/src/sources/uniswap_v2/pool_fetching.rs b/crates/shared/src/sources/uniswap_v2/pool_fetching.rs index 85b69d592c..7e322491fe 100644 --- a/crates/shared/src/sources/uniswap_v2/pool_fetching.rs +++ b/crates/shared/src/sources/uniswap_v2/pool_fetching.rs @@ -274,15 +274,15 @@ impl PoolReading for DefaultPoolReader { let pair_address = self.pair_provider.pair_address(&pair); // Fetch ERC20 token balances of the pools to sanity check with reserves - let token0 = ERC20::Instance::new(pair.get().0, self.web3.alloy.clone()); - let token1 = ERC20::Instance::new(pair.get().1, self.web3.alloy.clone()); + let token0 = ERC20::Instance::new(pair.get().0, self.web3.provider.clone()); + let token1 = ERC20::Instance::new(pair.get().1, self.web3.provider.clone()); async move { let fetch_token0_balance = token0.balanceOf(pair_address).block(block); let fetch_token1_balance = token1.balanceOf(pair_address).block(block); let pair_contract = - IUniswapLikePair::Instance::new(pair_address, self.web3.alloy.clone()); + IUniswapLikePair::Instance::new(pair_address, self.web3.provider.clone()); let fetch_reserves = pair_contract.getReserves().block(block); let (reserves, token0_balance, token1_balance) = futures::join!( diff --git a/crates/shared/src/sources/uniswap_v3/pool_fetching.rs b/crates/shared/src/sources/uniswap_v3/pool_fetching.rs index f839e2107a..5047e408db 100644 --- a/crates/shared/src/sources/uniswap_v3/pool_fetching.rs +++ b/crates/shared/src/sources/uniswap_v3/pool_fetching.rs @@ -302,7 +302,7 @@ impl UniswapV3PoolFetcher { let events = tokio::sync::Mutex::new(EventHandler::new( block_retriever, - UniswapV3PoolEventFetcher(web3.alloy), + UniswapV3PoolEventFetcher(web3.provider), RecentEventsCache::default(), Some(init_block), )); diff --git a/crates/shared/src/token_info.rs b/crates/shared/src/token_info.rs index 844316a5e4..44fc73aa75 100644 --- a/crates/shared/src/token_info.rs +++ b/crates/shared/src/token_info.rs @@ -52,7 +52,7 @@ impl TokenInfoFetcher { }); } - let erc20 = ERC20::Instance::new(address, self.web3.alloy.clone()); + let erc20 = ERC20::Instance::new(address, self.web3.provider.clone()); let (decimals, symbol) = { let decimals = erc20.decimals(); let symbol = erc20.symbol(); diff --git a/crates/shared/src/trace_many.rs b/crates/shared/src/trace_many.rs index 4b6e955a2c..a4033a6799 100644 --- a/crates/shared/src/trace_many.rs +++ b/crates/shared/src/trace_many.rs @@ -24,7 +24,7 @@ pub async fn trace_many( .zip(std::iter::repeat([TraceType::Trace].as_slice())) .collect(); - web3.alloy.trace_call_many(r.as_slice()).latest().await + web3.provider.trace_call_many(r.as_slice()).latest().await } /// Check the return value of `trace_many` for whether all top level diff --git a/crates/solver/src/interactions/allowances.rs b/crates/solver/src/interactions/allowances.rs index 345e40c047..168210e411 100644 --- a/crates/solver/src/interactions/allowances.rs +++ b/crates/solver/src/interactions/allowances.rs @@ -152,7 +152,7 @@ impl AllowanceManaging for AllowanceManager { spender: Address, ) -> Result { Ok(fetch_allowances( - self.web3.alloy.clone(), + self.web3.provider.clone(), self.owner, HashMap::from([(spender, tokens)]), ) @@ -171,7 +171,7 @@ impl AllowanceManaging for AllowanceManager { } let allowances = - fetch_allowances(self.web3.alloy.clone(), self.owner, spender_tokens).await?; + fetch_allowances(self.web3.provider.clone(), self.owner, spender_tokens).await?; let mut result = Vec::new(); for request in requests { let allowance = allowances diff --git a/crates/solver/src/interactions/weth.rs b/crates/solver/src/interactions/weth.rs index 97016384a1..5189bc73fc 100644 --- a/crates/solver/src/interactions/weth.rs +++ b/crates/solver/src/interactions/weth.rs @@ -46,7 +46,7 @@ mod tests { #[test] fn encode_unwrap_weth() { - let weth = WETH9::Instance::new([0x42; 20].into(), ethrpc::mock::web3().alloy); + let weth = WETH9::Instance::new([0x42; 20].into(), ethrpc::mock::web3().provider); let amount = U256::from(13_370_000_000_000_000_000u128); let interaction = UnwrapWethInteraction { weth: weth.clone(), @@ -66,11 +66,11 @@ mod tests { #[test] fn merge_same_native_token() { let mut unwrap0 = UnwrapWethInteraction { - weth: WETH9::Instance::new([0x01; 20].into(), ethrpc::mock::web3().alloy), + weth: WETH9::Instance::new([0x01; 20].into(), ethrpc::mock::web3().provider), amount: U256::ONE, }; let unwrap1 = UnwrapWethInteraction { - weth: WETH9::Instance::new([0x01; 20].into(), ethrpc::mock::web3().alloy), + weth: WETH9::Instance::new([0x01; 20].into(), ethrpc::mock::web3().provider), amount: U256::from(2), }; @@ -81,11 +81,11 @@ mod tests { #[test] fn merge_different_native_token() { let mut unwrap0 = UnwrapWethInteraction { - weth: WETH9::Instance::new([0x01; 20].into(), ethrpc::mock::web3().alloy), + weth: WETH9::Instance::new([0x01; 20].into(), ethrpc::mock::web3().provider), amount: U256::ONE, }; let unwrap1 = UnwrapWethInteraction { - weth: WETH9::Instance::new([0x02; 20].into(), ethrpc::mock::web3().alloy), + weth: WETH9::Instance::new([0x02; 20].into(), ethrpc::mock::web3().provider), amount: U256::from(2), }; @@ -97,11 +97,11 @@ mod tests { #[should_panic] fn merge_u256_overflow() { let mut unwrap0 = UnwrapWethInteraction { - weth: WETH9::Instance::new([0x01; 20].into(), ethrpc::mock::web3().alloy), + weth: WETH9::Instance::new([0x01; 20].into(), ethrpc::mock::web3().provider), amount: U256::ONE, }; let unwrap1 = UnwrapWethInteraction { - weth: WETH9::Instance::new([0x01; 20].into(), ethrpc::mock::web3().alloy), + weth: WETH9::Instance::new([0x01; 20].into(), ethrpc::mock::web3().provider), amount: U256::MAX, }; diff --git a/crates/solver/src/liquidity/balancer_v2.rs b/crates/solver/src/liquidity/balancer_v2.rs index 3892b12b77..9ce7a836de 100644 --- a/crates/solver/src/liquidity/balancer_v2.rs +++ b/crates/solver/src/liquidity/balancer_v2.rs @@ -255,7 +255,7 @@ mod tests { fn dummy_contracts() -> (Address, BalancerV2Vault::Instance) { ( Address::from_slice(&[0xc0; 20]), - BalancerV2Vault::Instance::new([0xc1; 20].into(), ethrpc::mock::web3().alloy), + BalancerV2Vault::Instance::new([0xc1; 20].into(), ethrpc::mock::web3().provider), ) } diff --git a/crates/solver/src/liquidity/zeroex.rs b/crates/solver/src/liquidity/zeroex.rs index d31d4adb83..503be0c60f 100644 --- a/crates/solver/src/liquidity/zeroex.rs +++ b/crates/solver/src/liquidity/zeroex.rs @@ -383,7 +383,7 @@ pub mod tests { let sell_token = Address::with_last_byte(1); let zeroex = Arc::new(IZeroex::Instance::new( Default::default(), - ethrpc::mock::web3().alloy, + ethrpc::mock::web3().provider, )); let allowances = Allowances::new(*zeroex.address(), hashmap! { sell_token => U256::from(99) }); @@ -429,7 +429,7 @@ pub mod tests { let sell_token = Address::with_last_byte(1); let zeroex = Arc::new(IZeroex::Instance::new( Default::default(), - ethrpc::mock::web3().alloy, + ethrpc::mock::web3().provider, )); let allowances = Allowances::new( *zeroex.address(), diff --git a/crates/solver/src/settlement/settlement_encoder.rs b/crates/solver/src/settlement/settlement_encoder.rs index e9f17a2c83..d3aee36909 100644 --- a/crates/solver/src/settlement/settlement_encoder.rs +++ b/crates/solver/src/settlement/settlement_encoder.rs @@ -627,7 +627,7 @@ pub mod tests { #[test] fn settlement_merges_unwraps_for_same_token() { - let weth = WETH9::Instance::new([0x42; 20].into(), ethrpc::mock::web3().alloy); + let weth = WETH9::Instance::new([0x42; 20].into(), ethrpc::mock::web3().provider); let mut encoder = SettlementEncoder::new(HashMap::new()); encoder.add_unwrap(UnwrapWethInteraction { @@ -757,11 +757,11 @@ pub mod tests { fn settlement_encoder_appends_unwraps_for_different_tokens() { let mut encoder = SettlementEncoder::new(HashMap::new()); encoder.add_unwrap(UnwrapWethInteraction { - weth: WETH9::Instance::new([0x01; 20].into(), ethrpc::mock::web3().alloy), + weth: WETH9::Instance::new([0x01; 20].into(), ethrpc::mock::web3().provider), amount: U256::ONE, }); encoder.add_unwrap(UnwrapWethInteraction { - weth: WETH9::Instance::new([0x02; 20].into(), ethrpc::mock::web3().alloy), + weth: WETH9::Instance::new([0x02; 20].into(), ethrpc::mock::web3().provider), amount: U256::from(2), }); @@ -786,7 +786,7 @@ pub mod tests { alloy::primitives::Bytes::default(), ); let unwrap = UnwrapWethInteraction { - weth: WETH9::Instance::new([0x01; 20].into(), ethrpc::mock::web3().alloy), + weth: WETH9::Instance::new([0x01; 20].into(), ethrpc::mock::web3().provider), amount: U256::ONE, }; @@ -966,7 +966,7 @@ pub mod tests { .add_trade(order_1_3, U256::from(11), U256::ZERO) .unwrap(); - let weth = WETH9::Instance::new(Address::with_last_byte(2), ethrpc::mock::web3().alloy); + let weth = WETH9::Instance::new(Address::with_last_byte(2), ethrpc::mock::web3().provider); encoder.add_unwrap(UnwrapWethInteraction { weth, amount: U256::from(12), diff --git a/crates/solvers/src/domain/solver.rs b/crates/solvers/src/domain/solver.rs index ac1463eaab..80c4ff52f6 100644 --- a/crates/solvers/src/domain/solver.rs +++ b/crates/solvers/src/domain/solver.rs @@ -80,7 +80,7 @@ impl Solver { let uni_v3_quoter_v2 = match config.uni_v3_node_url { Some(url) => { let web3 = ethrpc::web3(Default::default(), &url, Some("baseline")); - contracts::alloy::UniswapV3QuoterV2::Instance::deployed(&web3.alloy) + contracts::alloy::UniswapV3QuoterV2::Instance::deployed(&web3.provider) .await .map(Arc::new) .inspect_err(|err| { From fde29a405f35039195347696979e89490b81607b Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Wed, 4 Feb 2026 09:48:14 +0100 Subject: [PATCH 034/219] Log tracing spans in JSON logger (#4117) # Description In order to reduce load on the logging infra we want to switch to structured logging (JSON). However, when we tested the current setup we realized that the `request_id` (among other things) was not logged which made debugging things basically impossible. # Changes Adjusted the custom JSON formatter to iterate over parent spans and serialize their names and associated fields. Conceptually the current logic is slightly awkward as the field formatter formats the fields to JSON and later when we format the whole log line we re-parse the formatted string to JSON and the serialize it again. But unless this is actually causing issues when it's deployed I'll not address in order to unblock structured logging ASAP. ## How to test Manual tests (spans are at the end) original version: ``` { "timestamp":"2026-02-03T09:51:18.773085+00:00", "level":"INFO", "fields":{ "message":"- \"GET /api/v1/token/0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48/native_price HTTP/1.1\" 404 \"-\" \"curl/8.7.1\" 5.229292ms", "log.target":"orderbook::api::request_summary", "log.module_path":"warp::filters::log", "log.file":"/Users/martin/.cargo/git/checkouts/warp-ee983b87d3028bb6/586244e/src/filters/log.rs", "log.line":37 }, "target":"log", "trace_id":"4aaa6c6e6e56f103d5cf975005c15d85" ``` default JSON logger: ``` { "timestamp":"2026-02-03T09:40:00.265994Z", "level":"INFO", "fields":{ "message":"- \"GET /api/v1/token/0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48/native_price HTTP/1.1\" 404 \"-\" \"curl/8.7.1\" 2.721875ms", "log.target":"orderbook::api::request_summary", "log.module_path":"warp::filters::log", "log.file":"/Users/martin/.cargo/git/checkouts/warp-ee983b87d3028bb6/586244e/src/filters/log.rs", "log.line":37 }, "target":"orderbook::api::request_summary", "span":{ "request_id":"6", "name":"http_request" }, "spans":[ { "request_id":"6", "name":"http_request" } ] } ``` new version: ``` { "timestamp":"2026-02-03T09:41:25.529338+00:00", "level":"INFO", "fields":{ "message":"- \"GET /api/v1/token/0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48/native_price HTTP/1.1\" 404 \"-\" \"curl/8.7.1\" 6.522583ms", "log.target":"orderbook::api::request_summary", "log.module_path":"warp::filters::log", "log.file":"/Users/martin/.cargo/git/checkouts/warp-ee983b87d3028bb6/586244e/src/filters/log.rs", "log.line":37 }, "target":"log", "trace_id":"cd6bf89be55582b19fd046b7c111f2b1", "spans":{ "http_request":{ "request_id":"0" } } } ``` --- .../distributed_tracing/trace_id_format.rs | 55 ++++++++++++++++++- crates/observe/src/tracing.rs | 2 + 2 files changed, 54 insertions(+), 3 deletions(-) diff --git a/crates/observe/src/distributed_tracing/trace_id_format.rs b/crates/observe/src/distributed_tracing/trace_id_format.rs index dc2766a31d..7bc18b910c 100644 --- a/crates/observe/src/distributed_tracing/trace_id_format.rs +++ b/crates/observe/src/distributed_tracing/trace_id_format.rs @@ -2,7 +2,11 @@ use { chrono::Utc, opentelemetry::trace::{TraceContextExt, TraceId}, serde::ser::{SerializeMap, Serializer as _}, - std::{fmt, io}, + std::{ + collections::{HashMap, hash_map::Entry}, + fmt, + io, + }, tracing::{Event, Span, Subscriber}, tracing_opentelemetry::OpenTelemetrySpanExt, tracing_serde::{AsSerde, fields::AsMap}, @@ -11,6 +15,7 @@ use { FmtContext, FormatEvent, FormatFields, + FormattedFields, format::{Format, Full, Writer}, time::FormatTime, }, @@ -40,7 +45,13 @@ use { /// "status": 200 /// }, /// "target": "warp::filters::trace", -/// "trace_id": "4bf92f3577b34da6a3ce929d0e0e4736" +/// "trace_id": "4bf92f3577b34da6a3ce929d0e0e4736", +/// "spans": { +/// "spanName1": { +/// "field1": 123, +/// "field2": "abc" +/// } +/// } /// } /// ``` pub struct TraceIdJsonFormat; @@ -52,7 +63,7 @@ where { fn format_event( &self, - _ctx: &FmtContext<'_, S, N>, + ctx: &FmtContext<'_, S, N>, mut writer: Writer<'_>, event: &Event<'_>, ) -> std::fmt::Result @@ -79,6 +90,44 @@ where serializer.serialize_entry("trace_id", &trace_id.to_string())?; } + // serialize all parent span names and their fields + if let Some(scope) = ctx.event_scope() { + let mut parent_spans = HashMap::::new(); + + for span in scope.from_root() { + let current_span_fields: serde_json::Map = span + .extensions() + .get::>() + .and_then(|fields| serde_json::from_str(fields.as_str()).ok()) + .unwrap_or_default(); + + match parent_spans.entry(span.name().to_string()) { + Entry::Vacant(entry) => { + entry.insert(serde_json::Value::Object(current_span_fields)); + } + Entry::Occupied(mut entry) => { + // the desired format does not preserve the hierarchy of spans + // so theoretically there could be nested spans with the same + // name so we merge the fields of all spans with the same name + // + // if there are duplicated fields the value of the span closest + // to the processed event wins + // + // also theoretically we could detect fields getting overwritten + // but we couldn't log that without causing a stack overflow so we + // don't + entry + .get_mut() + .as_object_mut() + .expect("fields get initialized with an object") + .extend(current_span_fields.into_iter()) + } + } + } + + serializer.serialize_entry("spans", &parent_spans)?; + } + serializer.end() }; diff --git a/crates/observe/src/tracing.rs b/crates/observe/src/tracing.rs index 76b01e2196..7b827424aa 100644 --- a/crates/observe/src/tracing.rs +++ b/crates/observe/src/tracing.rs @@ -79,6 +79,8 @@ fn set_tracing_subscriber(config: &Config) { if config.use_json_format { // structured logging tracing_subscriber::fmt::layer() + .with_ansi(false) + .fmt_fields(tracing_subscriber::fmt::format::JsonFields::default()) .event_format(TraceIdJsonFormat) .with_writer(writer) .with_filter($env_filter) From bc2788e2960fa168b579901d727d6b81f41a36db Mon Sep 17 00:00:00 2001 From: "Jan [Yann]" <4518474+fafk@users.noreply.github.com> Date: Thu, 5 Feb 2026 14:26:41 +0100 Subject: [PATCH 035/219] Initial Claude setup (#4122) # Description Add instructions for Claude to work more efficiently. Expect .env.claude with secrets filled in. Contact me. None of this was written by hand. To put together the order debugging document I took transcripts of both Felix's talks on the topic and our internal docs and threw it at Claude. Then I tried it for an order and made some edits. # Changes - [x] Sets up MCP servers for main DB and analytics DB - [x] Sets up MPC for fetch - [x] Add CLAUDE.md instructing Claude about the project and coding practices (formatting, style, etc.) - [x] Adds a document telling Claude how to debug orders so it knows what to do next time you ask him "y order {uid} not filled" - [x] Added a comman so you can run `/debug-order 0xd997dc715a7610c75e5f97548685befacb7ea5ad878cb4bac1816903514ed84d1dffc418c0d83bd8b98ab3d2e07b83bf5439f4236981a392` within Claude Code ## How to test Ask Claude Coe to do stuff for you. --- .claude/commands/debug-order.md | 18 + .gitignore | 1 + .mcp.json | 25 ++ CLAUDE.md | 167 ++++++++ docs/COW_ORDER_DEBUG_SKILL.md | 651 ++++++++++++++++++++++++++++++++ 5 files changed, 862 insertions(+) create mode 100644 .claude/commands/debug-order.md create mode 100644 .mcp.json create mode 100644 CLAUDE.md create mode 100644 docs/COW_ORDER_DEBUG_SKILL.md diff --git a/.claude/commands/debug-order.md b/.claude/commands/debug-order.md new file mode 100644 index 0000000000..92708aadc9 --- /dev/null +++ b/.claude/commands/debug-order.md @@ -0,0 +1,18 @@ +--- +description: Debug why a CoW Protocol order failed to match +--- + +Debug order: $ARGUMENTS + +Read and follow the instructions in ./docs/COW_ORDER_DEBUG_SKILL.md to investigate this order. + +Key steps: +1. Parse the order UID and network from arguments (default: mainnet) +2. Fetch order data from API to get status and details +3. Check order_events in DB for lifecycle events +4. Search Victoria Logs for the order UID + - For finding discarded solutions where the order UID appears in calldata, use regex: `.*ORDER_UID_WITHOUT_0X.*` plus `discarded` +5. Identify root cause and report findings with evidence +6. If you haven't found anything go wild and try all SQL / log searches / codebase searches you can think of + +Always show your evidence (log lines, DB results, API responses) when presenting findings. diff --git a/.gitignore b/.gitignore index 5ff5e4c1e7..b86a4933ba 100644 --- a/.gitignore +++ b/.gitignore @@ -5,3 +5,4 @@ /.idea **/testing.*.toml /playground/.env +.env.claude diff --git a/.mcp.json b/.mcp.json new file mode 100644 index 0000000000..609c8b46d2 --- /dev/null +++ b/.mcp.json @@ -0,0 +1,25 @@ +{ + "mcpServers": { + "postgres-protocol": { + "command": "/bin/bash", + "args": [ + "-c", + "set -a && source .env.claude && set +a && npx -y @modelcontextprotocol/server-postgres \"${COW_DB_URL}\"" + ] + }, + "postgres-analytics": { + "command": "/bin/bash", + "args": [ + "-c", + "set -a && source .env.claude && set +a && npx -y @modelcontextprotocol/server-postgres \"${COW_ANALYTICS_DB_URL}\"" + ] + }, + "fetch": { + "command": "/bin/bash", + "args": [ + "-c", + "set -a && source .env.claude && set +a && /opt/homebrew/bin/uvx mcp-server-fetch" + ] + } + } +} diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000000..c8b776a3dd --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,167 @@ +# Cow Protocol Services + +Backend services for Cow Protocol, a decentralized trading protocol with batch auctions on EVM networks. + +## Project Structure + +This is a Rust workspace containing multiple services and libraries: + +### Main Services (Binaries) +- **orderbook** - HTTP API for order submission and queries +- **autopilot** - Protocol driver that manages auctions +- **driver** - Handles liquidity collection and solution selection +- **solvers** - Internal solver engine (baseline) +- **refunder** - Handles refunds + +### Key Libraries +- **shared** - Common functionality (pricing, liquidity, gas estimation) +- **database** - PostgreSQL abstraction and migrations +- **model** - Serialization models for API +- **contracts** - Smart contract bindings + +## Architecture Overview + +``` +User signs order → Orderbook validates → Autopilot includes in auction + ↓ + ┌─────────────────────────┴─────────────────────────┐ + ↓ ↓ + Colocated External Solvers Our Drivers + Non-Colocated Solvers + (run their own driver+solver) ↓ ↓ + │ Our solvers External solver APIs + │ (baseline, (non-colocated partners + │ balancer, ...) like 1inch, 0x, etc) + └─────────────────────────┬─────────────────────────┘ + ↓ + Autopilot ranks solutions, picks winner(s) + ↓ + Winning driver submits to chain (2-3 block window) + ↓ + Settlement contract executes: + 1. Pre-interactions (incl user pre-hooks) + 2. Transfer sell tokens in + 3. Main interactions (swaps/routing) + 4. Pay out buy tokens + 5. Post-interactions (incl user post-hooks) + ↓ + Circuit breaker monitors compliance +``` + +**Solver types:** +- **Colocated**: External partners run their own driver + solver. Full control, full responsibility. +- **Non-colocated**: We run the driver, configured with their solver API endpoint. We handle simulation/submission. + +**Key components:** +- **Orderbook**: Validates + stores orders, handles quoting +- **Autopilot**: Central auctioneer, runs every ~12-15s (eventually every block), filters orders, adds fee policies, sends auction to solvers, ranks solutions +- **Driver**: Fetches liquidity, encodes solutions to calldata, simulates, submits to chain. Handles everything except route-finding. +- **Solver Engine**: Pure math — finds best routes/matches. Can be internal (baseline, balancer) or external API calls. +- **Circuit Breaker**: Monitors on-chain settlements match off-chain auction outcomes. Jails misbehaving solvers. + +## Technology Stack + +- **Language**: Rust 2021+ Edition +- **Runtime**: Tokio async +- **Database**: PostgreSQL with sqlx +- **Web3**: Alloy +- **HTTP**: Axum + +## Documentation + +- **Protocol Documentation**: https://docs.cow.fi/ + - Technical Reference: API specs and SDK docs + - Concepts: Protocol fundamentals and architecture + +## Testing + +- Use `just` commands for running tests (see Justfile) +- E2E tests available in `crates/e2e` +- Local development environment in `playground/` + +## Directory Structure + +``` +crates/ # 25+ Rust crates (binaries + libraries) +database/ # PostgreSQL migrations and schemas +playground/ # Local dev environment +configs/ # Configuration files +``` + +# General Coding Instructions + +If there is a test you can run then run it or `cargo check` or `cargo build`; run it after you have made changes. +Use rust-analyzer MCP when appropriate such as finding usages or renaming. After a change run "cargo +nightly fmt". + +## Code Style + +Instead of using full paths like `volume_fee_bucket_overrides: Vec`, import the type at the beginning so you don't have to use the full path later. + +Don't add a lot of comments. Add comments only if the code is a bit weird or the concept is not clear. + +## CoW Protocol Database Access + +**Always show the SQL query before executing it** against postgres MCP tools (`mcp__postgres-protocol__query`, `mcp__postgres-analytics__query`). + +**Query timeout**: MCP servers are configured with a 120 second timeout. For potentially long-running queries, prefix with `SET statement_timeout = '30s';` (or appropriate duration) to fail fast: +```sql +SET statement_timeout = '30s'; +SELECT ... FROM large_table ...; +``` +If a query times out, try a different approach (add more filters, use a smaller time range, simplify aggregations, or break into smaller queries). + +Read-only replica available via MCP. If that fails for some reason, then you can use psql with: +```bash +source .env.claude && PGPASSWORD="$COW_DB_PASSWORD" psql \ + -h "$COW_DB_HOST" -p "$COW_DB_PORT" -U "$COW_DB_USER" -d -c "" +``` +but use MCP where possible. + +Databases: `mainnet`, `arbitrum-one`, `base`, `linea`, `polygon`, `xdai`, `sepolia`, `plasma`, `ink`, `bnb` etc. + +## RPC Node + +Use `$ETH_MAINNET_RPC` from `.env.claude` for mainnet. Use `cast` or whatever tools you want freely. + +## Grafana Logs Access + +Query logs via the Grafana API (credentials in `.env.claude`): + +```bash +source .env.claude && curl -s -H "Authorization: Bearer $GRAFANA_API_TOKEN" \ + "$GRAFANA_URL/api/ds/query" \ + -X POST -H "Content-Type: application/json" \ + -d '{ + "queries": [{ + "refId": "A", + "datasource": {"type": "victoriametrics-logs-datasource", "uid": "'"$VICTORIA_LOGS_DATASOURCE_UID"'"}, + "expr": "", + "queryType": "instant" + }], + "from": "now-1h", + "to": "now" + }' +``` +Adjust expr for search terms (e.g., plasma, ink, error) +Adjust from/to for time range (e.g., now-15m, now-24h) +Parse log lines with: | jq -r '.results.A.frames[0].data.values[1][]' + +## Etherscan API (V2) + +Use MCP `mcp__fetch__fetch` tool. API Key in `.env.claude` as `$ETHERSCAN_API_KEY`. + +**Important**: V1 API is deprecated. Use V2 with the `chainid` parameter: +- Mainnet: `chainid=1` +- Arbitrum: `chainid=42161` +- Base: `chainid=8453` + +Example URL format: +``` +https://api.etherscan.io/v2/api?chainid=1&module=account&action=balance&address=&tag=latest&apikey= +``` + +Read the API key from `.env.claude` and use it directly in the URL (MCP fetch doesn't do shell variable substitution). + +## Investigating orders + +When asked to look into what happened to an order read file ./docs/COW_ORDER_DEBUG_SKILL.md and follow the instructions there. +Make heavy use of logs and DB to find all info you need and present finding to the user with evidence. diff --git a/docs/COW_ORDER_DEBUG_SKILL.md b/docs/COW_ORDER_DEBUG_SKILL.md new file mode 100644 index 0000000000..02020253ff --- /dev/null +++ b/docs/COW_ORDER_DEBUG_SKILL.md @@ -0,0 +1,651 @@ +# CoW Protocol Order Debug Skill + +Debug why CoW Protocol orders fail to match. Requires DB access + Victoria Logs access (via Grafana). + +## Quick Checklist + +Run through these in order: + +1. [ ] **Order status** — Check API status first (cancelled/expired/fulfilled/open) +2. [ ] **User cancellation** — If cancelled, search logs for `order cancelled ORDER_UID` FIRST +3. [ ] **Order in auction** — Was order in autopilot auction? When? +4. [ ] **Solver bids** — Did any solver bid? What happened to their solution? +5. [ ] **Settlement outcome** — Did settlement succeed/fail/timeout? +6. [ ] **Limit price sanity** — Was quote reasonable? Check slippage, fees, gas +7. [ ] **Price movement** — Did price move between quote and expiry? + +--- + +## 1. Fetch Order Data + +```bash +# Replace $NETWORK (mainnet/gnosis/arbitrum) and $ORDER_UID +curl -s "https://api.cow.fi/$NETWORK/api/v1/orders/$ORDER_UID" | jq . + +# For staging orders: +curl -s "https://barn.api.cow.fi/$NETWORK/api/v1/orders/$ORDER_UID" | jq . +``` + +### GPV2Order Struct (Smart Contract Source of Truth) + +| Field | Meaning | Debug Notes | +|-------|---------|-------------| +| `sellToken` | Token being sold | | +| `buyToken` | Token being bought | | +| `sellAmount` | Amount to sell (wei) | For sell orders, this is exact | +| `buyAmount` | Min amount to receive | For buy orders, this is exact | +| `validTo` | Unix timestamp expiry | Check if expired | +| `appData` | Hash of metadata JSON | Contains hooks, partner fees, flash loan hints | +| `feeAmount` | **Legacy, always 0** | Fee now in limit price | +| `kind` | "sell" or "buy" | | +| `partiallyFillable` | bool | Swaps = false (fill-or-kill), limits can be true | +| `sellTokenBalance` | **Legacy, always "erc20"** | Balancer vault balances never took off | +| `buyTokenBalance` | **Legacy, always "erc20"** | | +| `signingScheme` | eip712/ethsign/presign/eip1271 | See signing section | +| `signature` | The actual signature bytes | | +| `receiver` | Who gets buy tokens | null = order owner | + +**Additional API fields:** +- `class`: "market" vs "limit" — see note below +- `status`: fulfilled/open/cancelled/expired +- `surplusFee`: Protocol's fee estimate for limit orders +- `surplusFeeTimestamp`: Must be <10 min old or order won't enter auction + +**Note on order class:** In the DB, almost every order is stored as `class = 'limit'`. The "market" vs "limit" distinction is about **fee policy**, not order type: +- **Market order**: Had a quote attached, and the order's limit price is within that quote (in-market). Gets market fee policy. +- **Limit order**: Either no quote, or limit price is outside the quote (out-of-market). Gets limit fee policy with surplus fee. + +The `appData.metadata.orderClass` field shows what the UI intended, but the actual classification is determined by comparing the order's price to the quote at placement time. + +--- + +## 2. Signing Schemes + +Orders can fail if signature validation fails. Different schemes have different failure modes: + +| Scheme | Type | Validation | Common Failures | +|--------|------|------------|-----------------| +| `eip712` | EOA | Static, checked once | Sig doesn't match order fields, or signed by unexpected user | +| `ethsign` | EOA (legacy) | Static, checked once | Same as above | + +**Note on unexpected signers:** The majority of signature issues are valid signatures but signed by an unexpected user. This causes the settlement contract to attempt transferring tokens from an account that doesn't have the necessary balance. +| `presign` | Smart contract | On-chain state (`setPreSignature`) | User called `setPreSignature(uid, false)` to cancel | +| `eip1271` | Smart contract | Calls `isValidSignature()` at settlement time | Contract state changed, Safe signer removed, custom logic rejects | + +**EIP-1271 is dynamic** — signature can be valid at order placement but invalid later. Autopilot re-checks these every auction. + +```bash +# Check if presign is set (returns signed boolean) +cast call $SETTLEMENT_CONTRACT "preSignature(bytes)" $ORDER_UID --rpc-url $RPC +``` + +--- + +## 3. Check Logs (Victoria Logs via Grafana) + +Logs are stored in Victoria Logs and accessible via Grafana API. + +**Query template (bash):** +```bash +# Set time range (milliseconds) +NOW_MS=$(($(date +%s) * 1000)) +FROM_MS=$((NOW_MS - 43200000)) # 12 hours ago + +# Set search term (use full order UID with 0x prefix) +SEARCH_TERM="0xd997dc715a7610c75e5f97548685befacb7ea5ad878cb4bac1816903514ed84d1dffc418c0d83bd8b98ab3d2e07b83bf5439f4236981a392" + +# Query Victoria Logs +# - NOT container:controller excludes nginx access logs +# - network:$NETWORK filters to specific chain (mainnet, bnb, arbitrum-one, base, etc) +source .env.claude +curl -s "${GRAFANA_URL}/api/ds/query?ds_type=victoriametrics-logs-datasource" \ + -H 'accept: application/json' \ + -H 'content-type: application/json' \ + -H "x-datasource-uid: ${VICTORIA_LOGS_DATASOURCE_UID}" \ + -H 'x-plugin-id: victoriametrics-logs-datasource' \ + -H "Authorization: Bearer ${GRAFANA_API_TOKEN}" \ + --data-raw "{\"queries\":[{\"refId\":\"A\",\"datasource\":{\"type\":\"victoriametrics-logs-datasource\",\"uid\":\"${VICTORIA_LOGS_DATASOURCE_UID}\"},\"editorMode\":\"code\",\"expr\":\"NOT container:controller ${SEARCH_TERM} | sort by (_time) asc\",\"queryType\":\"instant\",\"maxLines\":100}],\"from\":\"${FROM_MS}\",\"to\":\"${NOW_MS}\"}" \ + | jq -r '.results.A.frames[0].data.values[1][]' +``` + +**Key query parameters:** +- `expr`: Search term (just text, e.g., order UID or auction ID). Add `| sort by (_time) asc` for chronological order +- `queryType`: Use `"instant"` to get log lines. Use `"hits"` to get histogram/counts +- `maxLines`: Number of log lines to return +- `from`/`to`: Millisecond timestamps + +**Example searches:** +```bash +# Search by order UID for a specific network, excluding nginx +"expr": "NOT container:controller network:bnb ORDER_UID | sort by (_time) asc" + +# Search by auction ID +"expr": "NOT container:controller 22788649 | sort by (_time) asc" + +# Search by solver name + auction +"expr": "NOT container:controller baseline 22788649 | sort by (_time) asc" + +# Filter by log content on specific network +"expr": "NOT container:controller network:mainnet order cancelled | sort by (_time) asc" + +# Search by request_id to trace quote→bid issues (useful when order was placed with quote from solver X but that solver never bid) +"expr": "NOT container:controller $REQUEST_ID | sort by (_time) asc" +``` + +**Useful filters:** +- `NOT container:controller` — excludes nginx access logs (REQUIRED for order UID searches) +- `network:$NETWORK` — filter by chain (works for some log sources, not all) + +**Note:** Victoria Logs uses simple text matching. Always use the **full order UID with 0x prefix** for reliable matching. + +**IMPORTANT - Run targeted lifecycle queries:** Always use `NOT container:controller` to exclude nginx: + +```bash +# Run these queries IN PARALLEL to quickly find all lifecycle events (use FULL order UID with 0x): +"expr": "NOT container:controller order created ORDER_UID | sort by (_time) asc" +"expr": "NOT container:controller order cancelled ORDER_UID | sort by (_time) asc" +"expr": "NOT container:controller proposed solution ORDER_UID | sort by (_time) asc" +"expr": "NOT container:controller settlement failed ORDER_UID | sort by (_time) asc" +"expr": "NOT container:controller filtered ORDER_UID | sort by (_time) asc" + +# Find discarded solutions where order appears in calldata (use regex with order UID bytes without 0x prefix) +"expr": "discarded .*ORDER_UID_WITHOUT_0X.* | sort by (_time) asc" +``` + +**What to look for:** +- `order created` — order placement with quote_id +- `New orders in auction` — order entered auction +- `computed solutions` — solver found a route +- `solved auction` — solver submitted winning bid +- `filtered out in-flight` — order being settled +- `order cancelled` — user cancelled via API + +**Get auction competition data:** +```bash +curl -s "https://api.cow.fi/$NETWORK/api/v1/solver_competition/$AUCTION_ID" | jq . +``` + +--- + +## 4. Common Log Patterns + +**IMPORTANT:** Many log messages use **spaces** not underscores (e.g., `order cancelled` not `order_cancelled`). + +**Order lifecycle (search by order UID):** +``` +orderbook::api::post_order: order created # Order placed +autopilot::run_loop::observe: New orders in auction # Added to auction +driver::infra::observe: computed solutions # Solver found route +driver::infra::observe: solved auction # Solver won +autopilot::run_loop: filtered out in-flight # Being settled +autopilot::run_loop: settlement failed # Settlement failed (check err=) +orderbook::orderbook: order cancelled # User cancelled via API +``` + +**Issues to watch for:** +- `order cancelled` — user cancelled the order (check timestamp vs settlement!) +- `settlement failed err=Timeout` — driver timed out during settlement +- `settlement failed` — settlement failed (other reasons) +- `filtered` — order excluded from auction (check reason) +- `error` or `Error` — something went wrong +- `revert` — simulation or settlement failed +- `insufficient_balance` / `insufficient_allowance` — user moved funds + +--- + +## 5. Quote History + +### Method 1: API response (easiest) +The order API response includes the quote that was used: + +```bash +curl -s "https://api.cow.fi/$NETWORK/api/v1/orders/$ORDER_UID" | jq '.quote' +``` + +Returns: +```json +{ + "sellAmount": "4300531427036176000", + "buyAmount": "16788289774218687968", + "feeAmount": "3270684063997860", + "solver": "0x3980daa7eaad0b7e0c53cfc5c2760037270da54d", + "verified": true, + ... +} +``` + +### Method 2: Database +```sql +SELECT q.id, q.sell_amount, q.buy_amount, q.gas_amount, q.solver, q.created +FROM quotes q +JOIN order_quotes oq ON oq.quote_id = q.id +WHERE oq.order_uid = '\x$ORDER_UID_HEX'; +``` + +### Method 3: Logs (fallback) +Find the quote_id from the "order created" log: + +```bash +"expr": "NOT container:controller order created ORDER_UID | sort by (_time) asc" +``` + +**Example log line:** +``` +orderbook::api::post_order: order created order_uid=0x... quote_id=Some(2720468) quote_solver=Some(0x3980...) +``` + +Then search for quote details by ID: +```bash +"expr": "NOT container:controller $QUOTE_ID | sort by (_time) asc" +``` + +--- + +## 6. Quoting Deep Dive + +Quotes determine the limit price users sign. Bad quotes = orders that can't fill. + +### Quote Process + +``` +UI requests quote → Orderbook sends "fake auction" (single order, infinite slippage) to all solvers + ↓ + Solvers return: exchange rate + calldata (recipe) + ↓ + In parallel, orderbook also fetches: + - Gas price estimate + - Native price of sell token (to convert gas cost) + - Native price of buy token (needed for surplus scoring later) + ↓ + Simulate winning solver's calldata → get gas units + ↓ + network_fee = gas_units × gas_price / sell_token_native_price + ↓ + Return quote with exchange rate + network fee +``` + +### Quote Types + +| Type | Behavior | Use Case | +|------|----------|----------| +| **Fast** | Returns after first 3 solver responses, always unverified | UI responsiveness | +| **Optimal** | Waits for all solvers (5s timeout), attempts verification | Actual order placement | +| **Native** | Cached quote for "buy 0.1 ETH with token X" | Native price lookups | + +**Verified vs Unverified:** +- Verified = simulation succeeded, high confidence quote is achievable +- Unverified = simulation failed or skipped, solver might have bad math + +### Limit Price Calculation + +``` +min_buy_amount = (sell_amount - network_fee) × exchange_rate × (1 - slippage) × (1 - partner_fee) +``` + +**Smart slippage**: Smaller orders get higher slippage bc network fee dominates. A 10% gas price spike on a $10 order (where fee is ~$2) eats way more than on a $1M order. + +--- + +## 7. Order Placement Validation + +Orderbook rejects orders that have no chance of executing. Checks: + +| Check | Failure Mode | +|-------|--------------| +| Signature valid | Bad sig, wrong signer | +| Balance sufficient | Fill-or-kill needs full amount, partial needs >0 | +| Approval set | Need approval on GPV2VaultRelayer (not settlement contract directly) | +| AppData pre-image exists | AppData JSON must be provided in full with order, or pre-image must be added to backend beforehand | +| Rate limit | Too many orders per trader | +| Quote attached + valid | If quote ID provided, must exist and match | + +**If order placed without quote** (common for bots): Orderbook re-quotes to classify as market vs limit order. + +--- + +## 8. Autopilot Filtering (Why Order Not In Auction) + +Even after placement, autopilot filters orders each auction. Current filters: + +| Filter | Why | +|--------|-----| +| Signature re-check | presign/eip1271 can become invalid | +| Balance re-check | User moved funds | +| Native price exists | Can't score surplus without ETH-denominated value | +| Fee policy applied | Protocol fee calculation | + +**Mainnet currently has ~6000 orders in auction** — drivers also do their own prioritization/filtering. + +--- + +## 9. Limit Order Specific Checks + +### 9.1 Surplus Fee Validation + +```bash +# From order JSON, verify: +surplusFee != null +surplusFeeTimestamp is within last 10 minutes +``` + +**If missing/stale, check surplus fee computation logs:** +```bash +# Victoria Logs query +"expr": "surplus_fee $ORDER_UID | sort by (_time) asc" +``` + +**Surplus fee error logs:** +```bash +"expr": "surplus_fee error | sort by (_time) asc" +``` + +### 9.2 Auction Filtering Check + +```bash +# Check if order is in current auction: +curl -s "https://api.cow.fi/$NETWORK/api/v1/auction" | jq '.orders[] | select(.uid == "$ORDER_UID")' +``` + +If not present, order is filtered. Check filter logs: +```bash +# Victoria Logs query +"expr": "filtered $ORDER_UID | sort by (_time) asc" +``` + +**Common filter reasons:** +- `insufficient_balance` +- `insufficient_allowance` +- `invalid_signature` (ERC-1271 state changed, presign cancelled) +- `pre_interaction_error` +- `no_native_price` (can't get ETH price for buy token) + +### 9.3 Market Price Verification + +Compute effective sell amount: +``` +effectiveSellAmount = sellAmount - surplusFee +``` + +**For SELL orders:** +```bash +curl -s -X POST "https://barn.api.cow.fi/$NETWORK/api/v1/quote" \ + -H 'content-type: application/json' \ + -d '{ + "from": "$OWNER", + "sellToken": "$SELL_TOKEN", + "buyToken": "$BUY_TOKEN", + "kind": "sell", + "sellAmountAfterFee": "$EFFECTIVE_SELL_AMOUNT" + }' | jq '.quote.buyAmount' +``` +→ Order's `buyAmount` should be **less than** this quote. + +**For BUY orders:** +```bash +curl -s -X POST "https://barn.api.cow.fi/$NETWORK/api/v1/quote" \ + -H 'content-type: application/json' \ + -d '{ + "from": "$OWNER", + "sellToken": "$SELL_TOKEN", + "buyToken": "$BUY_TOKEN", + "kind": "buy", + "buyAmountAfterFee": "$BUY_AMOUNT" + }' | jq '.quote.sellAmount' +``` +→ Order's `effectiveSellAmount` should be **greater than** this quote. + +--- + +## 10. Settlement Flow (On-Chain) + +When driver wins, it has 2-3 blocks to land the tx. + +### Settlement Contract Execution Order + +```solidity +settle( + IERC20[] tokens, // All tokens involved + uint256[] clearingPrices, // Exchange rates + Trade[] trades, // Orders being filled + Interaction[][3] interactions // [pre, main, post] +) +``` + +**Execution sequence:** +1. **Pre-interactions** — Solver prep + user pre-hooks (unstaking, approvals, etc) +2. **For each trade:** + - Convert Trade → Order struct + - Verify signature (presign/eip1271 checked NOW) + - Compute transfer amounts + - Update filledAmounts mapping (replay protection) + - Transfer sell tokens INTO settlement contract +3. **Main interactions** — The actual swaps/routing (Uniswap calls, etc) +4. **Pay out** — Transfer buy tokens to receivers, enforce min amounts +5. **Post-interactions** — Solver cleanup + user post-hooks (bridging, etc) + +### Driver Submission Behavior + +- Uses private RPCs (MEV Blocker) to avoid failed tx costs + get MEV protection +- Gas bumps on each block if not included +- Monitors chain state, cancels if settlement becomes invalid (liquidity moved, etc) +- **Penalty** if solution proposed but not settled + +--- + +## 11. Auction Runtime Issues + +Order is in auction but still not matching? + +**Auction orders log:** +```bash +# Victoria Logs query (just the auction ID number) +"expr": "$AUCTION_ID | sort by (_time) asc" +``` + +**Specific auction run:** +```bash +"expr": "$RUN_ID | sort by (_time) asc" +``` + +### JIT Orders & CoW AMMs + +Solvers can inject "just-in-time" orders (e.g., from market makers). These normally don't count toward surplus scoring bc they're not public. + +**Exception:** CoW AMM contracts are whitelisted — autopilot includes "surplus capturing JIT order owners" in auction. Orders from these contracts DO count for surplus. + +If debugging a CoW AMM interaction, check if the AMM contract is in the whitelist. + +--- + +## 12. Circuit Breaker Monitoring + +The circuit breaker watches all on-chain settlements and compares against off-chain auction outcomes. + +**It enforces:** +- Winning solver is actually the one settling +- Settled amounts match reported amounts +- No protocol violations + +**Violations → solver jailed** (deny-listed until they contact team, explain, fix). + +Check circuit breaker logs if solver claims they won but settlement didn't happen or was rejected. + +--- + +## 13. DB Queries (Direct Access) + +### Check order state in DB: +```sql +SELECT + uid, creation_timestamp, owner, sell_token, buy_token, + sell_amount, buy_amount, valid_to, kind, class, + surplus_fee, surplus_fee_timestamp +FROM orders +WHERE uid = '\x$ORDER_UID_HEX'; +``` + +### Check order lifecycle events: +The `order_events` table tracks order state changes. This is often the fastest way to understand what happened. + +```sql +SELECT timestamp, label::text +FROM order_events +WHERE order_uid = '\x$ORDER_UID_HEX' +ORDER BY timestamp; +``` + +**Event labels:** +| Label | Meaning | +|-------|---------| +| `created` | Order was placed | +| `ready` | Order ready for auction inclusion | +| `considered` | Order was considered in an auction | +| `executing` | Order is being settled (in-flight) | +| `traded` | Order was filled on-chain | +| `cancelled` | User cancelled the order | +| `filtered` | Order was filtered out of auction | +| `invalid` | Order became invalid (balance/allowance/signature) | + +**Example lifecycle:** `created` → `ready` → `considered` → `executing` → `traded` + +### Check quotes for order: +```sql +SELECT + q.id, q.sell_token, q.buy_token, q.sell_amount, q.buy_amount, + q.gas_amount, q.solver, q.created +FROM quotes q +JOIN order_quotes oq ON oq.quote_id = q.id +WHERE oq.order_uid = '\x$ORDER_UID_HEX'; +``` + +### Check auction inclusion history: +```sql +SELECT + auction_id, order_uid, included, filtered_reason +FROM auction_orders +WHERE order_uid = '\x$ORDER_UID_HEX' +ORDER BY auction_id DESC +LIMIT 20; +``` + +You can also check attempts in `settlement_executions` tables. + +### Check successful settlements: +```sql +SELECT + tx_hash, solver, order_uid, executed_sell_amount, executed_buy_amount +FROM settlements s +JOIN trades t ON t.settlement_id = s.id +WHERE t.order_uid = '\x$ORDER_UID_HEX'; +``` + +### Check presignature events (for presign orders): +```sql +SELECT block_number, signed +FROM presignature_events +WHERE order_uid = '\x$ORDER_UID_HEX' +ORDER BY block_number; +``` +If `signed = false`, the user revoked their presignature on-chain. + +--- + +## 14. AppData Deep Dive + +AppData is a hash of a JSON document (the JSON must be provided in full or its pre-image registered beforehand). **Cannot be verified on-chain** (smart contract just sees hash), so all enforcement is off-chain/soft. + +### Common AppData Fields + +```json +{ + "version": "1.0.0", + "metadata": { + "partnerFee": { + "recipient": "0x...", + "bps": 30 + }, + "hooks": { + "pre": [{ "target": "0x...", "callData": "0x...", "gasLimit": "100000" }], + "post": [{ "target": "0x...", "callData": "0x...", "gasLimit": "100000" }] + }, + "flashLoan": { + "lender": "0x...", + "token": "0x...", + "amount": "1000000000000000000" + } + } +} +``` + +**Debug implications:** +- Partner fee misconfigured → order's effective price is wrong +- Pre-hook fails → settlement reverts at pre-interaction stage +- Post-hook fails → settlement reverts after swaps (user loses gas but trade doesn't complete) +- Flash loan hints help solver but don't guarantee execution + +```bash +# Fetch appData content +curl -s "https://api.cow.fi/$NETWORK/api/v1/app_data/$APP_DATA_HASH" +``` + +--- + +## 15. Useful Links + +| Resource | URL | +|----------|-----| +| Order Explorer | `https://explorer.cow.fi/orders/$ORDER_UID` | +| Grafana Logs (Victoria Logs) | `$GRAFANA_URL/explore` (see .env.claude) | +| API Docs | `https://api.cow.fi/docs/` | +| Block-to-Date | `https://etherscan.io/blockdateconverter` | +| Barn (Staging) | `https://barn.cow.fi` | +| Settlement Contract | `0x9008D19f58AAbD9eD0D60971565AA8510560ab41` | + +--- + +## 16. Decision Tree + +``` +Order not matched? +│ +├─ Is order in auction? +│ ├─ NO → Check autopilot logs for filter reason +│ │ → Common: balance, allowance, signature, no native price +│ │ +│ └─ YES → Did solvers bid? +│ ├─ NO → Price probably out of market +│ │ → Verify with quote API +│ │ → Check price movement since quote +│ │ +│ └─ YES → What happened to winning bid? +│ → Check solver pod for revert/error +│ → Get auction_id, check competition endpoint +│ +├─ Is it a limit order? +│ └─ Has surplusFee? Is it fresh (<10min)? +│ → NO: Check surplus fee computation logs +│ +├─ Check signing scheme +│ └─ presign/eip1271? → State may have changed since placement +│ +└─ Check appData + └─ Hooks defined? → Pre/post hook might be failing +``` + +--- + +## 17. Common Root Causes + +| Symptom | Likely Cause | Fix | +|---------|--------------|-----| +| No surplusFee | Quote computation failed | Check estimator logs | +| surplusFee stale | Background task stuck | Escalate to #backend | +| Filtered: insufficient_balance | User moved funds | Wait for rebalance | +| Filtered: invalid_signature | ERC-1271/presign state changed | User must re-sign or re-presign | +| Filtered: no_native_price | Can't price buy token in ETH | Token has no liquidity path to ETH | +| No solver bids | Price out of market | User adjusts limit | +| Solver bid reverted | Liquidity changed between auction and settlement | Normal MEV/timing | +| Quote outlier | Single estimator gave bad price | Check if quote was verified | +| Unverified quote accepted | Simulation failed but UI showed price anyway | User signed bad limit price | +| Pre-hook revert | User's pre-hook call failed | Check hook calldata + target | +| Gas estimate too low | API gas estimation bug | Known issue, being fixed | From 86fe6a083c8bbd282c2bcee7adf9ac0f73ddd94a Mon Sep 17 00:00:00 2001 From: ilya Date: Thu, 5 Feb 2026 17:16:07 +0300 Subject: [PATCH 036/219] Don't notify solvers about failed solutions with a haircut fee (#4115) # Description This PR addresses [a comment](https://github.com/cowprotocol/services/pull/4049#pullrequestreview-3687251046) that suggests avoiding notifying solvers on failed solutions encoding that were configured with a haircut fee. Also, updates the metric to easily identify the error rate of solutions with the haircut fee to configure a new alert to take any action. --- crates/driver/src/domain/competition/mod.rs | 32 +++++++++----- .../src/domain/competition/solution/mod.rs | 10 +++++ .../domain/competition/solution/settlement.rs | 5 +++ crates/driver/src/infra/observe/mod.rs | 44 ++++++++++++++++--- 4 files changed, 74 insertions(+), 17 deletions(-) diff --git a/crates/driver/src/domain/competition/mod.rs b/crates/driver/src/domain/competition/mod.rs index 869d51d683..6355119ccb 100644 --- a/crates/driver/src/domain/competition/mod.rs +++ b/crates/driver/src/domain/competition/mod.rs @@ -239,6 +239,7 @@ impl Competition { .user_trades() .map(|trade| trade.order().uid) .collect(); + let has_haircut = solution.has_haircut(); observe::encoding(&id); let settlement = solution .encode( @@ -248,10 +249,10 @@ impl Competition { self.solver.solver_native_token(), ) .await; - (id, orders, settlement) + (id, orders, has_haircut, settlement) }) .collect::>() - .filter_map(|(id, orders, result)| async move { + .filter_map(|(id, orders, has_haircut, result)| async move { match result { Ok(solution) => { self.risk_detector.encoding_succeeded(&orders); @@ -261,8 +262,11 @@ impl Competition { Err(_err) if id.solutions().len() > 1 => None, Err(err) => { self.risk_detector.encoding_failed(&orders); - observe::encoding_failed(self.solver.name(), &id, &err); - notify::encoding_failed(&self.solver, auction.id(), &id, &err); + observe::encoding_failed(self.solver.name(), &id, &err, has_haircut); + // don't notify on errors for solutions with haircut + if !has_haircut { + notify::encoding_failed(&self.solver, auction.id(), &id, &err); + } None } } @@ -362,6 +366,7 @@ impl Competition { // gets picked by the procotol. if let Ok(remaining) = deadline.remaining() { let score_ref = &mut score; + let has_haircut = settlement.has_haircut(); let simulate_on_new_blocks = async move { let mut stream = ethrpc::block_stream::into_stream(self.eth.current_block().clone()); @@ -369,19 +374,22 @@ impl Competition { if let Err(infra::simulator::Error::Revert(err)) = self.simulate_settlement(&settlement).await { - observe::winner_voided(block, &err); + observe::winner_voided(self.solver.name(), block, &err, has_haircut); *score_ref = None; self.settlements .lock() .unwrap() .retain(|s| s.solution().get() != solution_id); - notify::simulation_failed( - &self.solver, - auction.id(), - settlement.solution(), - &infra::simulator::Error::Revert(err), - true, - ); + // Only notify solver if solution doesn't have haircut + if !has_haircut { + notify::simulation_failed( + &self.solver, + auction.id(), + settlement.solution(), + &infra::simulator::Error::Revert(err), + true, + ); + } return; } } diff --git a/crates/driver/src/domain/competition/solution/mod.rs b/crates/driver/src/domain/competition/solution/mod.rs index 01bdac1626..35f9b64615 100644 --- a/crates/driver/src/domain/competition/solution/mod.rs +++ b/crates/driver/src/domain/competition/solution/mod.rs @@ -519,6 +519,16 @@ impl Solution { ) }) } + + /// Returns true if any trade in this solution has a non-zero haircut fee. + /// Used to determine if simulation failures should suppress solver + /// notifications. + pub fn has_haircut(&self) -> bool { + self.trades.iter().any(|trade| match trade { + Trade::Fulfillment(fulfillment) => !fulfillment.haircut_fee().is_zero(), + Trade::Jit(_) => false, // JIT orders don't have haircut + }) + } } /// Given two solutions returns the factors with diff --git a/crates/driver/src/domain/competition/solution/settlement.rs b/crates/driver/src/domain/competition/solution/settlement.rs index 63bf90be3c..c7ad4a33d7 100644 --- a/crates/driver/src/domain/competition/solution/settlement.rs +++ b/crates/driver/src/domain/competition/solution/settlement.rs @@ -339,6 +339,11 @@ impl Settlement { .map(|(token, amount)| (token, amount.into())) .collect() } + + /// Returns true if this settlement's solution has any trades with haircut. + pub fn has_haircut(&self) -> bool { + self.solution.has_haircut() + } } /// Should the interactions be internalized? diff --git a/crates/driver/src/infra/observe/mod.rs b/crates/driver/src/infra/observe/mod.rs index 07e1b06a12..8ee025539e 100644 --- a/crates/driver/src/infra/observe/mod.rs +++ b/crates/driver/src/infra/observe/mod.rs @@ -122,11 +122,26 @@ pub fn encoding(id: &solution::Id) { } /// Observe that settlement encoding failed. -pub fn encoding_failed(solver: &solver::Name, id: &solution::Id, err: &solution::Error) { - tracing::info!(?id, ?err, "discarded solution: settlement encoding"); +pub fn encoding_failed( + solver: &solver::Name, + id: &solution::Id, + err: &solution::Error, + has_haircut: bool, +) { + tracing::info!( + ?id, + ?err, + has_haircut, + "discarded solution: settlement encoding" + ); + let reason = if has_haircut { + "SettlementEncodingHaircut" + } else { + "SettlementEncoding" + }; metrics::get() .dropped_solutions - .with_label_values(&[solver.as_str(), "SettlementEncoding"]) + .with_label_values(&[solver.as_str(), reason]) .inc(); } @@ -164,8 +179,27 @@ pub fn score(settlement: &Settlement, score: ð::Ether) { // Observe that the winning settlement started failing upon arrival of a new // block -pub fn winner_voided(block: BlockInfo, err: &simulator::RevertError) { - tracing::warn!(block = block.number, ?err, "solution reverts on new block"); +pub fn winner_voided( + solver: &solver::Name, + block: BlockInfo, + err: &simulator::RevertError, + has_haircut: bool, +) { + tracing::warn!( + block = block.number, + ?err, + has_haircut, + "solution reverts on new block" + ); + let reason = if has_haircut { + "SimulationRevertHaircut" + } else { + "SimulationRevert" + }; + metrics::get() + .dropped_solutions + .with_label_values(&[solver.as_str(), reason]) + .inc(); } pub fn revealing() { From 04a3915f53c67e153dd7ce3b84465b4656674b28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Duarte?= Date: Thu, 5 Feb 2026 19:43:37 +0000 Subject: [PATCH 037/219] Optimize total_surplus query (#4116) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description We've been experiencing latency spikes on several endpoints, we've pinned this down to the time it takes to acquire DB connections from the pool; when checking RDS monitoring, the surplus query always shows up at the top. The current theory is that the query is a bit slower than it could be, as more users request the main swap page, their surplus is loaded (even if they don't request it — i.e. load the wallet modal) and if some of these users have a larger amount of orders, they're taking up connections that other endpoints aren't getting. I looked into the user distribution, here are the results: |log_bucket|bucket_start|num_users|pct|cumulative_users|cumulative_pct| |----------|------------|---------|---|----------------|--------------| |0|1|295329|83.86|295329|83.86| |1|10|51334|14.58|346663|98.44| |2|100|5071|1.44|351734|99.88| |3|1000|306|0.09|352040|99.96| |4|10000|106|0.03|352146|99.99| |5|100000|28|0.01|352174|100.00|
Distribution Query

```sql WITH buckets AS ( SELECT floor(log(order_count))::int as log_bucket, power(10, floor(log(order_count)))::int as bucket_start, count(*) as num_users FROM ( SELECT owner, count(*) as order_count FROM orders GROUP BY owner ) sub GROUP BY 1, 2 ) SELECT log_bucket, bucket_start, num_users, round(100.0 * num_users / sum(num_users) over(), 2) as pct, sum(num_users) over(order by log_bucket) as cumulative_users, round(100.0 * sum(num_users) over(order by log_bucket) / sum(num_users) over(), 2) as cumulative_pct FROM buckets ORDER BY log_bucket; ```

However, if it was just this, it would be too simple. Depending on the user, they might have no orders and only onchain orders (note that the max number of onchain orders is around ~10k), some will have skewed data distributions across tables too, leading to analyzing and optimizing this query a bit tricky. There are two crucial changes to the query — removing ARRAY_AGG and adding indexes — the first makes it so that the DB does not have to materialize a potentially big array in memory, which would otherwise lead to bad estimations too; the second provides better "access paths" to some of the information the query requires. ### RDS Stats (1h) Before Screenshot 2026-02-05 at 11-12-43
CloudWatch eu-central-1 After Screenshot 2026-02-05 at 11-14-53
CloudWatch eu-central-1 # Changes - [ ] Replace the query with the optimized one - [ ] Create indexes (already done to avoid issues during migration) ## Query Plans
Before

``` Aggregate (cost=1033006.55..1033006.56 rows=1 width=8) (actual time=130139.025..130143.456 rows=1 loops=1) -> Append (cost=1032650.26..1033006.51 rows=2 width=100) (actual time=31175.499..130090.776 rows=10998 loops=1) -> Subquery Scan on "*SELECT* 1" (cost=1032650.26..1032856.24 rows=1 width=100) (actual time=31175.498..130085.109 rows=10998 loops=1) -> Nested Loop (cost=1032650.26..1032856.23 rows=1 width=100) (actual time=31175.497..130079.454 rows=10998 loops=1) Join Filter: (t.order_uid = o.uid) InitPlan 3 (returns $5) -> Finalize Aggregate (cost=1032601.57..1032601.58 rows=1 width=32) (actual time=30761.210..30765.632 rows=1 loops=1) -> Gather (cost=1032601.35..1032601.56 rows=2 width=32) (actual time=30678.267..30689.666 rows=3 loops=1) Workers Planned: 2 Workers Launched: 2 -> Partial Aggregate (cost=1031601.35..1031601.36 rows=1 width=32) (actual time=30675.862..30675.863 rows=1 loops=3) -> Parallel Bitmap Heap Scan on orders (cost=24591.99..1030861.56 rows=295913 width=57) (actual time=278.099..30619.932 rows=226281 loops=3) Recheck Cond: (owner = '\x10dad59905d93ca37cd25a35f25349cb5956ba8e'::bytea) Rows Removed by Index Recheck: 737927 Heap Blocks: exact=15162 lossy=55571 -> Bitmap Index Scan on order_owner (cost=0.00..24414.44 rows=710192 width=0) (actual time=271.994..271.994 rows=749143 loops=1) Index Cond: (owner = '\x10dad59905d93ca37cd25a35f25349cb5956ba8e'::bytea) InitPlan 4 (returns $6) -> Aggregate (cost=47.00..47.01 rows=1 width=32) (actual time=0.021..0.022 rows=1 loops=1) -> Bitmap Heap Scan on onchain_placed_orders (cost=4.09..46.97 rows=11 width=57) (actual time=0.017..0.018 rows=0 loops=1) Recheck Cond: (sender = '\x10dad59905d93ca37cd25a35f25349cb5956ba8e'::bytea) -> Bitmap Index Scan on order_sender (cost=0.00..4.08 rows=11 width=0) (actual time=0.013..0.013 rows=0 loops=1) Index Cond: (sender = '\x10dad59905d93ca37cd25a35f25349cb5956ba8e'::bytea) -> Nested Loop (cost=1.12..136.08 rows=2 width=188) (actual time=31172.849..109529.707 rows=10914 loops=1) -> Index Scan using orders_pkey on orders o (cost=0.56..85.71 rows=10 width=123) (actual time=31172.113..101635.999 rows=678843 loops=1) Index Cond: (uid = ANY (array_cat($5, $6))) -> Index Only Scan using order_rewards_pkey on order_execution oe (cost=0.56..5.01 rows=3 width=65) (actual time=0.011..0.011 rows=0 loops=678843) Index Cond: (order_uid = o.uid) Heap Fetches: 170 -> Index Scan using trade_order_uid on trades t (cost=0.56..0.74 rows=2 width=81) (actual time=0.416..0.416 rows=1 loops=10914) Index Cond: (order_uid = oe.order_uid) SubPlan 1 -> Index Scan using auction_prices_pkey on auction_prices ap (cost=0.58..35.00 rows=16 width=11) (actual time=1.428..1.428 rows=1 loops=9675) Index Cond: ((auction_id = oe.auction_id) AND (token = o.buy_token)) SubPlan 2 -> Index Scan using auction_prices_pkey on auction_prices ap_1 (cost=0.58..35.00 rows=16 width=11) (actual time=1.596..1.596 rows=1 loops=1323) Index Cond: ((auction_id = oe.auction_id) AND (token = o.sell_token)) -> Subquery Scan on "*SELECT* 2" (cost=2.09..150.26 rows=1 width=100) (actual time=0.010..0.013 rows=0 loops=1) -> Nested Loop (cost=2.09..150.25 rows=1 width=100) (actual time=0.009..0.012 rows=0 loops=1) Join Filter: (j.uid = t_1.order_uid) -> Nested Loop (cost=1.54..79.45 rows=1 width=192) (actual time=0.009..0.010 rows=0 loops=1) -> Nested Loop Anti Join (cost=0.98..74.32 rows=1 width=127) (actual time=0.009..0.010 rows=0 loops=1) -> Index Scan using jit_user_order_creation_timestamp on jit_orders j (cost=0.42..33.62 rows=8 width=127) (actual time=0.008..0.009 rows=0 loops=1) Index Cond: (owner = '\x10dad59905d93ca37cd25a35f25349cb5956ba8e'::bytea) -> Index Only Scan using orders_pkey on orders o_1 (cost=0.56..5.08 rows=1 width=57) (never executed) Index Cond: (uid = j.uid) Heap Fetches: 0 -> Index Only Scan using order_rewards_pkey on order_execution oe_1 (cost=0.56..5.11 rows=3 width=65) (never executed) Index Cond: (order_uid = j.uid) Heap Fetches: 0 -> Index Scan using trade_order_uid on trades t_1 (cost=0.56..0.74 rows=2 width=81) (never executed) Index Cond: (order_uid = oe_1.order_uid) SubPlan 5 -> Index Scan using auction_prices_pkey on auction_prices ap_2 (cost=0.58..35.00 rows=16 width=11) (never executed) Index Cond: ((auction_id = oe_1.auction_id) AND (token = j.buy_token)) SubPlan 6 -> Index Scan using auction_prices_pkey on auction_prices ap_3 (cost=0.58..35.00 rows=16 width=11) (never executed) Index Cond: ((auction_id = oe_1.auction_id) AND (token = j.sell_token)) Planning Time: 4.788 ms Execution Time: 130157.850 ms ```

After

``` Aggregate (cost=14614.00..14614.01 rows=1 width=8) (actual time=1901.439..1902.539 rows=1 loops=1) Buffers: shared hit=11838 read=4075 I/O Timings: shared read=2240.469 -> Gather Merge (cost=14557.86..14604.91 rows=404 width=136) (actual time=1900.725..1902.018 rows=917 loops=1) Workers Planned: 2 Workers Launched: 2 Buffers: shared hit=11838 read=4075 I/O Timings: shared read=2240.469 -> Sort (cost=13557.83..13558.25 rows=168 width=136) (actual time=762.591..762.628 rows=306 loops=3) Sort Key: "*SELECT* 2".uid Sort Method: quicksort Memory: 49kB Buffers: shared hit=11838 read=4075 I/O Timings: shared read=2240.469 Worker 0: Sort Method: quicksort Memory: 115kB Worker 1: Sort Method: quicksort Memory: 25kB -> Parallel Append (cost=2.25..13551.63 rows=168 width=136) (actual time=2.247..762.040 rows=306 loops=3) Buffers: shared hit=11824 read=4075 I/O Timings: shared read=2240.469 -> Subquery Scan on "*SELECT* 2" (cost=24.42..13550.79 rows=53 width=136) (actual time=3.496..1892.483 rows=709 loops=1) Buffers: shared hit=9702 read=3415 I/O Timings: shared read=1854.511 -> Nested Loop Left Join (cost=24.42..13550.26 rows=53 width=136) (actual time=3.495..1892.175 rows=709 loops=1) Buffers: shared hit=9702 read=3415 I/O Timings: shared read=1854.511 -> Nested Loop (cost=23.84..13414.32 rows=53 width=155) (actual time=1.455..762.309 rows=709 loops=1) Buffers: shared hit=8050 read=1521 I/O Timings: shared read=736.402 -> Nested Loop (cost=23.28..12573.83 rows=195 width=245) (actual time=1.418..667.512 rows=709 loops=1) Buffers: shared hit=5387 read=1337 I/O Timings: shared read=647.346 -> Nested Loop (cost=22.73..9169.44 rows=796 width=180) (actual time=1.397..584.403 rows=711 loops=1) Buffers: shared hit=2649 read=1211 I/O Timings: shared read=570.801 -> Bitmap Heap Scan on onchain_placed_orders opo (cost=22.17..2343.76 rows=796 width=57) (actual time=0.699..6.255 rows=711 loops=1) Recheck Cond: (sender = '\x8ef4fb956d0cb06ca9e3db76040f08154e8d0122'::bytea) Buffers: shared hit=56 read=248 I/O Timings: shared read=2.028 -> Bitmap Index Scan on order_sender (cost=0.00..21.97 rows=796 width=0) (actual time=0.044..0.044 rows=711 loops=1) Index Cond: (sender = '\x8ef4fb956d0cb06ca9e3db76040f08154e8d0122'::bytea) Buffers: shared hit=4 -> Index Scan using orders_pkey on orders o (cost=0.56..8.57 rows=1 width=123) (actual time=0.812..0.812 rows=1 loops=711) Index Cond: (uid = opo.uid) Filter: (owner <> '\x8ef4fb956d0cb06ca9e3db76040f08154e8d0122'::bytea) Buffers: shared hit=2593 read=963 I/O Timings: shared read=568.773 -> Index Only Scan using order_rewards_pkey on order_execution oe (cost=0.56..4.25 rows=3 width=65) (actual time=0.115..0.115 rows=1 loops=711) Index Cond: (order_uid = o.uid) Heap Fetches: 16 Buffers: shared hit=2738 read=126 I/O Timings: shared read=76.545 -> Index Only Scan using trades_covering on trades t (cost=0.56..4.29 rows=2 width=81) (actual time=0.132..0.133 rows=1 loops=709) Index Cond: (order_uid = o.uid) Heap Fetches: 0 Buffers: shared hit=2663 read=184 I/O Timings: shared read=89.056 -> Index Scan using auction_prices_pkey on auction_prices ap (cost=0.58..31.94 rows=16 width=40) (actual time=1.590..1.590 rows=1 loops=709) Index Cond: ((auction_id = oe.auction_id) AND (token = CASE o.kind WHEN 'sell'::orderkind THEN o.buy_token ELSE o.sell_token END)) Buffers: shared hit=1652 read=1894 I/O Timings: shared read=1118.109 -> Subquery Scan on "*SELECT* 1" (cost=2.25..4428.09 rows=350 width=136) (actual time=3.233..393.256 rows=208 loops=1) Buffers: shared hit=2119 read=660 I/O Timings: shared read=385.958 -> Nested Loop (cost=2.25..4424.59 rows=350 width=136) (actual time=3.233..393.181 rows=208 loops=1) Join Filter: (t_1.order_uid = o_1.uid) Buffers: shared hit=2119 read=660 I/O Timings: shared read=385.958 -> Nested Loop Left Join (cost=1.70..3742.63 rows=156 width=149) (actual time=3.212..369.737 rows=208 loops=1) Buffers: shared hit=1320 read=618 I/O Timings: shared read=364.992 -> Nested Loop (cost=1.12..3316.62 rows=156 width=188) (actual time=1.266..28.471 rows=208 loops=1) Buffers: shared hit=849 read=49 I/O Timings: shared read=26.775 -> Index Only Scan using orders_owner_covering on orders o_1 (cost=0.56..187.36 rows=638 width=123) (actual time=1.247..2.194 rows=210 loops=1) Index Cond: (owner = '\x8ef4fb956d0cb06ca9e3db76040f08154e8d0122'::bytea) Heap Fetches: 1 Buffers: shared hit=24 read=7 I/O Timings: shared read=2.038 -> Index Only Scan using order_rewards_pkey on order_execution oe_1 (cost=0.56..4.87 rows=3 width=65) (actual time=0.124..0.124 rows=1 loops=210) Index Cond: (order_uid = o_1.uid) Heap Fetches: 4 Buffers: shared hit=825 read=42 I/O Timings: shared read=24.736 -> Index Scan using auction_prices_pkey on auction_prices ap_1 (cost=0.58..34.99 rows=16 width=40) (actual time=1.639..1.639 rows=1 loops=208) Index Cond: ((auction_id = oe_1.auction_id) AND (token = CASE o_1.kind WHEN 'sell'::orderkind THEN o_1.buy_token ELSE o_1.sell_token END)) Buffers: shared hit=471 read=569 I/O Timings: shared read=338.217 -> Index Only Scan using trades_covering on trades t_1 (cost=0.56..4.29 rows=2 width=81) (actual time=0.109..0.109 rows=1 loops=208) Index Cond: (order_uid = oe_1.order_uid) Heap Fetches: 0 Buffers: shared hit=799 read=42 I/O Timings: shared read=20.966 -> Subquery Scan on "*SELECT* 3" (cost=2.67..94.60 rows=1 width=136) (actual time=0.012..0.014 rows=0 loops=1) Buffers: shared hit=3 -> Nested Loop Left Join (cost=2.67..94.59 rows=1 width=136) (actual time=0.011..0.013 rows=0 loops=1) Buffers: shared hit=3 -> Nested Loop (cost=2.09..91.83 rows=1 width=159) (actual time=0.011..0.013 rows=0 loops=1) Buffers: shared hit=3 -> Nested Loop (cost=1.54..87.55 rows=1 width=208) (actual time=0.011..0.012 rows=0 loops=1) Buffers: shared hit=3 -> Nested Loop Anti Join (cost=0.98..82.50 rows=1 width=127) (actual time=0.011..0.011 rows=0 loops=1) Buffers: shared hit=3 -> Index Scan using jit_user_order_creation_timestamp on jit_orders j (cost=0.42..37.21 rows=9 width=127) (actual time=0.010..0.010 rows=0 loops=1) Index Cond: (owner = '\x8ef4fb956d0cb06ca9e3db76040f08154e8d0122'::bytea) Buffers: shared hit=3 -> Index Only Scan using orders_pkey on orders o_2 (cost=0.56..5.02 rows=1 width=57) (never executed) Index Cond: (uid = j.uid) Heap Fetches: 0 -> Index Only Scan using trades_covering on trades t_2 (cost=0.56..5.03 rows=2 width=81) (never executed) Index Cond: (order_uid = j.uid) Heap Fetches: 0 -> Index Only Scan using order_rewards_pkey on order_execution oe_2 (cost=0.56..4.25 rows=3 width=65) (never executed) Index Cond: (order_uid = t_2.order_uid) Heap Fetches: 0 -> Index Scan using auction_prices_pkey on auction_prices ap_2 (cost=0.58..35.01 rows=16 width=40) (never executed) Index Cond: ((auction_id = oe_2.auction_id) AND (token = CASE j.kind WHEN 'sell'::orderkind THEN j.buy_token ELSE j.sell_token END)) Planning: Buffers: shared hit=892 read=21 I/O Timings: shared read=14.023 Planning Time: 18.375 ms Execution Time: 1902.681 ms ```

## How to test Due to the fact that floating point addition is not commutative and the order specified in the old query is not deterministic (the ORDER BY uid is merely an approximation that matches) the validation script leaves some room for differences, 1e-9 to be precise.
Validation script

``` #!/usr/bin/env python3 """ Compare original and optimized surplus queries for correctness. Picks random addresses and verifies both queries return identical results. """ import os import sys import psycopg2 from psycopg2 import sql from decimal import Decimal # Connection settings - override with environment variables DB_CONFIG = { "host": os.getenv("DB_HOST", "localhost"), "port": os.getenv("DB_PORT", "5432"), "dbname": os.getenv("DB_NAME", "your_database"), "user": os.getenv("DB_USER", "your_user"), "password": os.getenv("DB_PASSWORD", ""), } ORIGINAL_QUERY = """ WITH regular_orders AS ( SELECT ARRAY_AGG(uid) AS ids FROM orders WHERE owner = $1 ), onchain_orders AS ( SELECT ARRAY_AGG(uid) AS ids FROM onchain_placed_orders WHERE sender = $1 ), trade_components AS ( SELECT CASE kind WHEN 'sell' THEN t.buy_amount WHEN 'buy' THEN t.sell_amount - t.fee_amount END AS trade_amount, CASE kind WHEN 'sell' THEN (t.sell_amount - t.fee_amount) * o.buy_amount / o.sell_amount WHEN 'buy' THEN t.buy_amount * o.sell_amount / o.buy_amount END AS limit_amount, o.kind, CASE kind WHEN 'sell' THEN (SELECT price FROM auction_prices ap WHERE ap.token = o.buy_token AND ap.auction_id = oe.auction_id) WHEN 'buy' THEN (SELECT price FROM auction_prices ap WHERE ap.token = o.sell_token AND ap.auction_id = oe.auction_id) END AS surplus_token_native_price FROM orders o JOIN trades t ON o.uid = t.order_uid JOIN order_execution oe ON o.uid = oe.order_uid WHERE o.uid = ANY(ARRAY_CAT((SELECT ids FROM regular_orders), (SELECT ids FROM onchain_orders))) UNION ALL SELECT CASE j.kind WHEN 'sell' THEN t.buy_amount WHEN 'buy' THEN t.sell_amount - t.fee_amount END AS trade_amount, CASE j.kind WHEN 'sell' THEN (t.sell_amount - t.fee_amount) * j.buy_amount / j.sell_amount WHEN 'buy' THEN t.buy_amount * j.sell_amount / j.buy_amount END AS limit_amount, j.kind, CASE j.kind WHEN 'sell' THEN (SELECT price FROM auction_prices ap WHERE ap.token = j.buy_token AND ap.auction_id = oe.auction_id) WHEN 'buy' THEN (SELECT price FROM auction_prices ap WHERE ap.token = j.sell_token AND ap.auction_id = oe.auction_id) END AS surplus_token_native_price FROM jit_orders j JOIN trades t ON j.uid = t.order_uid JOIN order_execution oe ON j.uid = oe.order_uid WHERE j.owner = $1 AND NOT EXISTS ( SELECT 1 FROM orders o WHERE o.uid = j.uid ) ), trade_surplus AS ( SELECT CASE kind WHEN 'sell' THEN (trade_amount - limit_amount) * surplus_token_native_price WHEN 'buy' THEN (limit_amount - trade_amount) * surplus_token_native_price END / POWER(10, 18) AS surplus_in_wei FROM trade_components ) SELECT COALESCE(SUM(surplus_in_wei), 0) AS total_surplus_in_wei FROM trade_surplus; """ OPTIMIZED_QUERY = """ WITH trade_components AS ( -- Regular orders: join trades first, then order_execution SELECT o.uid, CASE o.kind WHEN 'sell' THEN t.buy_amount WHEN 'buy' THEN t.sell_amount - t.fee_amount END AS trade_amount, CASE o.kind WHEN 'sell' THEN (t.sell_amount - t.fee_amount) * o.buy_amount / o.sell_amount WHEN 'buy' THEN t.buy_amount * o.sell_amount / o.buy_amount END AS limit_amount, o.kind, ap.price AS surplus_token_native_price FROM orders o JOIN trades t ON t.order_uid = o.uid JOIN order_execution oe ON oe.order_uid = t.order_uid LEFT JOIN auction_prices ap ON ap.auction_id = oe.auction_id AND ap.token = CASE o.kind WHEN 'sell' THEN o.buy_token ELSE o.sell_token END WHERE o.owner = $1 UNION ALL -- Onchain placed orders (if sender differs from owner) SELECT o.uid, CASE o.kind WHEN 'sell' THEN t.buy_amount WHEN 'buy' THEN t.sell_amount - t.fee_amount END AS trade_amount, CASE o.kind WHEN 'sell' THEN (t.sell_amount - t.fee_amount) * o.buy_amount / o.sell_amount WHEN 'buy' THEN t.buy_amount * o.sell_amount / o.buy_amount END AS limit_amount, o.kind, ap.price AS surplus_token_native_price FROM onchain_placed_orders opo JOIN orders o ON o.uid = opo.uid AND o.owner != $1 JOIN trades t ON t.order_uid = o.uid JOIN order_execution oe ON oe.order_uid = t.order_uid LEFT JOIN auction_prices ap ON ap.auction_id = oe.auction_id AND ap.token = CASE o.kind WHEN 'sell' THEN o.buy_token ELSE o.sell_token END WHERE opo.sender = $1 UNION ALL -- JIT orders SELECT j.uid, CASE j.kind WHEN 'sell' THEN t.buy_amount WHEN 'buy' THEN t.sell_amount - t.fee_amount END AS trade_amount, CASE j.kind WHEN 'sell' THEN (t.sell_amount - t.fee_amount) * j.buy_amount / j.sell_amount WHEN 'buy' THEN t.buy_amount * j.sell_amount / j.buy_amount END AS limit_amount, j.kind, ap.price AS surplus_token_native_price FROM jit_orders j JOIN trades t ON t.order_uid = j.uid JOIN order_execution oe ON oe.order_uid = t.order_uid LEFT JOIN auction_prices ap ON ap.auction_id = oe.auction_id AND ap.token = CASE j.kind WHEN 'sell' THEN j.buy_token ELSE j.sell_token END WHERE j.owner = $1 AND NOT EXISTS (SELECT 1 FROM orders o WHERE o.uid = j.uid) ) SELECT COALESCE(SUM(surplus_in_wei ORDER BY uid), 0) AS total_surplus_in_wei FROM ( SELECT uid, CASE kind WHEN 'sell' THEN (trade_amount - limit_amount) * surplus_token_native_price WHEN 'buy' THEN (limit_amount - trade_amount) * surplus_token_native_price END / POWER(10, 18) AS surplus_in_wei FROM trade_components ) ts; """ # Query to get random addresses with varying order counts SAMPLE_ADDRESSES_QUERY = """ WITH address_order_counts AS ( SELECT owner AS address, COUNT(*) AS order_count FROM orders GROUP BY owner ), bucketed AS ( SELECT address, order_count, CASE WHEN order_count < 10 THEN '0: 1-9' WHEN order_count < 100 THEN '1: 10-99' WHEN order_count < 1000 THEN '2: 100-999' WHEN order_count < 10000 THEN '3: 1000-9999' ELSE '4: 10000+' END AS bucket FROM address_order_counts ) SELECT address, order_count, bucket FROM ( SELECT address, order_count, bucket, ROW_NUMBER() OVER (PARTITION BY bucket ORDER BY RANDOM()) AS rn FROM bucketed ) ranked WHERE rn <= %s ORDER BY bucket, order_count; """ def get_connection(): return psycopg2.connect(**DB_CONFIG) def fetch_sample_addresses(conn, samples_per_bucket=5): """Get random addresses from each order count bucket.""" with conn.cursor() as cur: cur.execute(SAMPLE_ADDRESSES_QUERY, (samples_per_bucket,)) return cur.fetchall() def run_query(conn, query, address): """Run a query with the given address parameter.""" # Convert $1 placeholder to %s for psycopg2 pg_query = query.replace("$1", "%s") # Count how many parameters we need param_count = pg_query.count("%s") with conn.cursor() as cur: cur.execute(pg_query, tuple([address] * param_count)) result = cur.fetchone() return result[0] if result else None def compare_results(original, optimized, tolerance=1e-9): """Compare two numeric results with floating point tolerance.""" if original is None and optimized is None: return True, "both NULL" if original is None or optimized is None: return False, f"NULL mismatch: original={original}, optimized={optimized}" # Convert to float for comparison orig_float = float(original) opt_float = float(optimized) if orig_float == opt_float: return True, "exact match" # Check relative difference for non-zero values if orig_float != 0: rel_diff = abs(orig_float - opt_float) / abs(orig_float) if rel_diff < tolerance: return True, f"within tolerance (rel_diff={rel_diff:.2e})" # Check absolute difference for values near zero abs_diff = abs(orig_float - opt_float) if abs_diff < tolerance: return True, f"within tolerance (abs_diff={abs_diff:.2e})" return False, f"MISMATCH: original={orig_float}, optimized={opt_float}, diff={abs_diff}" def main(): samples_per_bucket = int(sys.argv[1]) if len(sys.argv) > 1 else 5 print(f"Comparing original vs optimized query ({samples_per_bucket} samples per bucket)") print("=" * 80) conn = get_connection() # Get sample addresses print("\nFetching sample addresses...") addresses = fetch_sample_addresses(conn, samples_per_bucket) print(f"Found {len(addresses)} addresses across buckets\n") passed = 0 failed = 0 current_bucket = None for address, order_count, bucket in addresses: if bucket != current_bucket: current_bucket = bucket print(f"\n--- Bucket: {bucket} ---") # Run both queries original_result = run_query(conn, ORIGINAL_QUERY, address) optimized_result = run_query(conn, OPTIMIZED_QUERY, address) # Compare match, reason = compare_results(original_result, optimized_result) addr_hex = "0x" + address.hex() if isinstance(address, (bytes, memoryview)) else str(address) status = "✓" if match else "✗" print(f" {status} {addr_hex[:18]}... ({order_count:>5} orders): {reason}") if match: passed += 1 else: failed += 1 # Print detailed values on failure print(f" Original: {original_result}") print(f" Optimized: {optimized_result}") # Summary print("\n" + "=" * 80) print(f"SUMMARY: {passed} passed, {failed} failed out of {passed + failed} tests") conn.close() return 0 if failed == 0 else 1 if __name__ == "__main__": sys.exit(main()) ```

To validate the performance, I think it's best we give it a run in prod for anywhere from 30 minutes to 2 hours. Even while requiring indexes, the new query *should* be faster. --- .../orderbook/src/database/total_surplus.rs | 119 ++++++++++-------- database/README.md | 11 ++ ...__create_convering_indexes_for_surplus.sql | 3 + 3 files changed, 82 insertions(+), 51 deletions(-) create mode 100644 database/sql/V103__create_convering_indexes_for_surplus.sql diff --git a/crates/orderbook/src/database/total_surplus.rs b/crates/orderbook/src/database/total_surplus.rs index 3fc446fa1c..d90a16c0a7 100644 --- a/crates/orderbook/src/database/total_surplus.rs +++ b/crates/orderbook/src/database/total_surplus.rs @@ -9,76 +9,93 @@ use { /// and **NOT** quoted price) since march 2023. async fn fetch_total_surplus(ex: &mut PgConnection, user: &Address) -> Result { const TOTAL_SURPLUS_QUERY: &str = r#" -WITH regular_orders AS ( - SELECT ARRAY_AGG(uid) AS ids FROM orders WHERE owner = $1 -), -onchain_orders AS ( - SELECT ARRAY_AGG(uid) AS ids FROM onchain_placed_orders WHERE sender = $1 -), -trade_components AS ( +WITH trade_components AS ( SELECT - CASE kind - -- so much was actually bought - WHEN 'sell' THEN t.buy_amount - -- so much was actually converted to buy tokens - WHEN 'buy' THEN t.sell_amount - t.fee_amount - END AS trade_amount, - CASE kind - -- so much had to be bought at least (given exeucted amount and limit price) - WHEN 'sell' THEN (t.sell_amount - t.fee_amount) * o.buy_amount / o.sell_amount - -- so much could be converted to buy_token at most (given executed amount and limit price) - WHEN 'buy' THEN t.buy_amount * o.sell_amount / o.buy_amount - END AS limit_amount, - o.kind, - CASE kind - WHEN 'sell' THEN (SELECT price FROM auction_prices ap WHERE ap.token = o.buy_token AND ap.auction_id = oe.auction_id) - WHEN 'buy' THEN (SELECT price FROM auction_prices ap WHERE ap.token = o.sell_token AND ap.auction_id = oe.auction_id) - END AS surplus_token_native_price + o.uid, + CASE o.kind + WHEN 'sell' THEN t.buy_amount + WHEN 'buy' THEN t.sell_amount - t.fee_amount + END AS trade_amount, + CASE o.kind + WHEN 'sell' THEN (t.sell_amount - t.fee_amount) * o.buy_amount / o.sell_amount + WHEN 'buy' THEN t.buy_amount * o.sell_amount / o.buy_amount + END AS limit_amount, + o.kind, + ap.price AS surplus_token_native_price FROM orders o - JOIN trades t ON o.uid = t.order_uid - JOIN order_execution oe ON o.uid = oe.order_uid - -- use this weird construction instead of `where owner=address or sender=address` to help postgres make efficient use of indices - WHERE uid = ANY(ARRAY_CAT((SELECT ids FROM regular_orders), (SELECT ids FROM onchain_orders))) + JOIN trades t ON t.order_uid = o.uid + JOIN order_execution oe ON oe.order_uid = t.order_uid + LEFT JOIN auction_prices ap + ON ap.auction_id = oe.auction_id + AND ap.token = CASE o.kind WHEN 'sell' THEN o.buy_token ELSE o.sell_token END + WHERE o.owner = $1 + + UNION ALL + + SELECT + o.uid, + CASE o.kind + -- so much was actually bought + WHEN 'sell' THEN t.buy_amount + -- so much was actually converted to buy tokens + WHEN 'buy' THEN t.sell_amount - t.fee_amount + END AS trade_amount, + CASE o.kind + -- so much had to be bought at least (given executed amount and limit price) + WHEN 'sell' THEN (t.sell_amount - t.fee_amount) * o.buy_amount / o.sell_amount + -- so much could be converted to buy_token at most (given executed amount and limit price) + WHEN 'buy' THEN t.buy_amount * o.sell_amount / o.buy_amount + END AS limit_amount, + o.kind, + ap.price AS surplus_token_native_price + FROM onchain_placed_orders opo + JOIN orders o ON o.uid = opo.uid AND o.owner != $1 + JOIN trades t ON t.order_uid = o.uid + JOIN order_execution oe ON oe.order_uid = t.order_uid + LEFT JOIN auction_prices ap + ON ap.auction_id = oe.auction_id + AND ap.token = CASE o.kind WHEN 'sell' THEN o.buy_token ELSE o.sell_token END + WHERE opo.sender = $1 UNION ALL -- Additional query for jit_orders SELECT - CASE j.kind - WHEN 'sell' THEN t.buy_amount - WHEN 'buy' THEN t.sell_amount - t.fee_amount - END AS trade_amount, - CASE j.kind - WHEN 'sell' THEN (t.sell_amount - t.fee_amount) * j.buy_amount / j.sell_amount - WHEN 'buy' THEN t.buy_amount * j.sell_amount / j.buy_amount - END AS limit_amount, - j.kind, - CASE j.kind - WHEN 'sell' THEN (SELECT price FROM auction_prices ap WHERE ap.token = j.buy_token AND ap.auction_id = oe.auction_id) - WHEN 'buy' THEN (SELECT price FROM auction_prices ap WHERE ap.token = j.sell_token AND ap.auction_id = oe.auction_id) - END AS surplus_token_native_price + j.uid, + CASE j.kind + WHEN 'sell' THEN t.buy_amount + WHEN 'buy' THEN t.sell_amount - t.fee_amount + END AS trade_amount, + CASE j.kind + WHEN 'sell' THEN (t.sell_amount - t.fee_amount) * j.buy_amount / j.sell_amount + WHEN 'buy' THEN t.buy_amount * j.sell_amount / j.buy_amount + END AS limit_amount, + j.kind, + ap.price AS surplus_token_native_price FROM jit_orders j JOIN trades t ON j.uid = t.order_uid - JOIN order_execution oe ON j.uid = oe.order_uid - WHERE j.owner = $1 AND NOT EXISTS ( + JOIN order_execution oe ON t.order_uid = oe.order_uid + LEFT JOIN auction_prices ap + ON ap.auction_id = oe.auction_id + AND ap.token = CASE j.kind WHEN 'sell' THEN j.buy_token ELSE j.sell_token END + WHERE j.owner = $1 + AND NOT EXISTS ( SELECT 1 FROM orders o WHERE o.uid = j.uid ) -), -trade_surplus AS ( +) +SELECT + COALESCE(SUM(surplus_in_wei ORDER BY uid), 0) AS total_surplus_in_wei +FROM ( SELECT + uid, CASE kind - -- amounts refer to tokens bought; more is better WHEN 'sell' THEN (trade_amount - limit_amount) * surplus_token_native_price - -- amounts refer to tokens sold; less is better WHEN 'buy' THEN (limit_amount - trade_amount) * surplus_token_native_price END / POWER(10, 18) AS surplus_in_wei FROM trade_components -) -SELECT - COALESCE(SUM(surplus_in_wei), 0) AS total_surplus_in_wei -FROM trade_surplus +) ts; "#; sqlx::query_scalar(TOTAL_SURPLUS_QUERY) diff --git a/database/README.md b/database/README.md index 155d6731f7..206b169c97 100644 --- a/database/README.md +++ b/database/README.md @@ -8,6 +8,7 @@ Some tables only store data emitted via smart contract events. Because we only h [CoWSwapEthFlow](https://github.com/cowprotocol/ethflowcontract/blob/main/src/CoWSwapEthFlow.sol) we actually deployed twice so events related to the staging environment should only show up in the staging DB and likewise for production. It's also important to note that we only index events from blocks that we are certain will not get reorged. That means specifically that events will be indexed with a block delay of at least 64. + ### app\_data Associates the 32 bytes contract app data with the corresponding full app data. @@ -626,3 +627,13 @@ We support different expiration times for orders with different signing schemes. market | Short lived order that may receive surplus. Users agree to a static fee upfront by signing it. liquidity | These orders must be traded at their limit price and may not receive any surplus. Violating this is a slashable offence. limit | Long lived order that may receive surplus. Users sign a static fee of 0 upfront and either the backend or the solvers compute a dynamic fee that gets taken from the surplus (while still respecting the user's limit price!). + +## Notes on Migrations + +Migrations that require a long running process *must* be done manually, this is due to the limitations the weekly release process imposes: +* The deployment must complete under 5 minutes +* The pod has a `processDeadlineSeconds` defaulting to 600 seconds + +To avoid extending the process, we resort to manually applying complicated migrations. + +The above also comes into play when dealing with indexes, as their construction with flyway may lock up rows, degrading SLI. diff --git a/database/sql/V103__create_convering_indexes_for_surplus.sql b/database/sql/V103__create_convering_indexes_for_surplus.sql new file mode 100644 index 0000000000..d6e783f055 --- /dev/null +++ b/database/sql/V103__create_convering_indexes_for_surplus.sql @@ -0,0 +1,3 @@ +-- covering indexes to avoid IO when calculating the total surplus for users +CREATE INDEX CONCURRENTLY IF NOT EXISTS trades_covering ON trades (order_uid) INCLUDE (buy_amount, sell_amount, fee_amount); +CREATE INDEX CONCURRENTLY IF NOT EXISTS orders_owner_covering ON orders (owner) INCLUDE (uid, kind, buy_amount, sell_amount, fee_amount, buy_token, sell_token); From f8087e5b4c1904059741d91a8c93e687ce4d446e Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Fri, 6 Feb 2026 12:18:55 +0100 Subject: [PATCH 038/219] Filter refunded ethflow orders from all solvable orders query (#4129) # Description In a recent change the query to return ALL solvable orders suddenly returned ~45K orders more. This caused an increased memory usage on the start of the autopilot and made the DB query slower than it could be. # Changes Filter out refunded ethflow orders from solvable orders query. ## How to test confirmed with a manual query execution that the number of returned orders is in line with the final_auction_size + filtered_out_queries indicating that the all_solvable_orders query and the incremental logic afterwards agree with each other. increase in memory usage Screenshot 2026-02-06 at 10 45 44 first auction after restart is huge Screenshot 2026-02-06 at 10 53 45 --- crates/database/src/orders.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/database/src/orders.rs b/crates/database/src/orders.rs index 8da392897a..62be3eae81 100644 --- a/crates/database/src/orders.rs +++ b/crates/database/src/orders.rs @@ -729,6 +729,7 @@ pub fn solvable_orders( AND NOT EXISTS (SELECT 1 FROM invalidations i WHERE i.order_uid = o.uid) AND NOT EXISTS (SELECT 1 FROM onchain_order_invalidations oi WHERE oi.uid = o.uid) AND NOT EXISTS (SELECT 1 FROM onchain_placed_orders op WHERE op.uid = o.uid AND op.placement_error IS NOT NULL) + AND NOT EXISTS (SELECT 1 FROM ethflow_refunds r WHERE r.order_uid = o.uid) ), trades_agg AS ( SELECT t.order_uid, From 0474b7e0b6d6248dd53172c38a952f3ee2075944 Mon Sep 17 00:00:00 2001 From: ilya Date: Fri, 6 Feb 2026 14:24:14 +0300 Subject: [PATCH 039/219] Upgrade `time` crate (#4130) Upgrades the `time` crate to satisfy `cargo audit`. --- Cargo.lock | 18 +++++++++--------- Cargo.toml | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8dd5ec4ef7..423221c014 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2538,7 +2538,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ab67060fc6b8ef687992d439ca0fa36e7ed17e9a0b16b25b601e8757df720de" dependencies = [ "data-encoding", - "syn 1.0.109", + "syn 2.0.114", ] [[package]] @@ -4436,9 +4436,9 @@ dependencies = [ [[package]] name = "num-conv" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050" [[package]] name = "num-integer" @@ -6871,9 +6871,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.45" +version = "0.3.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9e442fc33d7fdb45aa9bfeb312c095964abdf596f7567261062b2a7107aaabd" +checksum = "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c" dependencies = [ "deranged", "itoa", @@ -6888,15 +6888,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b36ee98fd31ec7426d599183e8fe26932a8dc1fb76ddb6214d05493377d34ca" +checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" [[package]] name = "time-macros" -version = "0.2.25" +version = "0.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71e552d1249bf61ac2a52db88179fd0673def1e1ad8243a00d9ec9ed71fee3dd" +checksum = "2e70e4c5a0e0a8a4823ad65dfe1a6930e4f4d756dcd9dd7939022b5e8c501215" dependencies = [ "num-conv", "time-core", diff --git a/Cargo.toml b/Cargo.toml index 773660c1ac..1853a4e068 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -92,7 +92,7 @@ solvers = { path = "crates/solvers" } solvers-dto = { path = "crates/solvers-dto" } testlib = { path = "crates/testlib" } winner-selection = { path = "crates/winner-selection" } -time = "0.3.37" +time = "0.3.47" tiny-keccak = "2.0.2" tower = "0.4" tower-http = "0.4" From 950180e2b5911a2980dce9192e3be3ef42673862 Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Fri, 6 Feb 2026 12:59:34 +0100 Subject: [PATCH 040/219] Tighter filter on all solvable orders query (#4131) # Description Forgot that I already hit `merge when ready` on https://github.com/cowprotocol/services/pull/4129 so I was to slow to merge the same optimization for the open orders query. --- crates/database/src/orders.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/database/src/orders.rs b/crates/database/src/orders.rs index 62be3eae81..ce95a6d913 100644 --- a/crates/database/src/orders.rs +++ b/crates/database/src/orders.rs @@ -967,6 +967,7 @@ pub async fn user_orders_with_quote( AND NOT EXISTS (SELECT 1 FROM invalidations i WHERE i.order_uid = o.uid) AND NOT EXISTS (SELECT 1 FROM onchain_order_invalidations oi WHERE oi.uid = o.uid) AND NOT EXISTS (SELECT 1 FROM onchain_placed_orders op WHERE op.uid = o.uid AND op.placement_error IS NOT NULL) + AND NOT EXISTS (SELECT 1 FROM ethflow_refunds r WHERE r.order_uid = o.uid) AND o.owner = $2 AND o.class = 'limit' ) From 2f144c6545c83597cf15eb07a2687d4e0f73bd71 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Duarte?= Date: Fri, 6 Feb 2026 12:02:31 +0000 Subject: [PATCH 041/219] Do not panic on failure to acquire connections to store events (#4132) # Description During a particularly bad period of volatility we failed to acquire a DB connection on time which lead to a panic
Partial Stacktrace

``` 1770314825856 2026-02-05T18:07:05.856Z 2026-02-05T18:07:03.033Z ERROR observe::tracing: thread 'tokio-runtime-worker' panicked at /src/crates/autopilot/src/infra/persistence/mod.rs:301:54: 1770314825856 2026-02-05T18:07:05.856Z failed to acquire tx: PoolTimedOut 1770314825856 2026-02-05T18:07:05.856Z stack backtrace: 1770314825856 2026-02-05T18:07:05.856Z 0: observe::tracing::tracing_panic_hook 1770314825856 2026-02-05T18:07:05.856Z at ./src/crates/observe/src/tracing.rs:187:21 1770314825856 2026-02-05T18:07:05.856Z 1: as core::ops::function::Fn>::call 1770314825856 2026-02-05T18:07:05.856Z at ./rustc/254b59607d4417e9dffbc307138ae5c86280fe4c/library/alloc/src/boxed.rs:2220:9 1770314825856 2026-02-05T18:07:05.856Z 2: observe::panic_hook::install::{{closure}} 1770314825856 2026-02-05T18:07:05.856Z at ./src/crates/observe/src/panic_hook.rs:14:9 1770314825856 2026-02-05T18:07:05.856Z 3: as core::ops::function::Fn>::call 1770314825856 2026-02-05T18:07:05.856Z at ./rustc/254b59607d4417e9dffbc307138ae5c86280fe4c/library/alloc/src/boxed.rs:2220:9 1770314825856 2026-02-05T18:07:05.856Z 4: std::panicking::panic_with_hook 1770314825856 2026-02-05T18:07:05.856Z at ./rustc/254b59607d4417e9dffbc307138ae5c86280fe4c/library/std/src/panicking.rs:833:13 1770314825856 2026-02-05T18:07:05.856Z 5: std::panicking::panic_handler::{{closure}} 1770314825856 2026-02-05T18:07:05.856Z at ./rustc/254b59607d4417e9dffbc307138ae5c86280fe4c/library/std/src/panicking.rs:698:13 1770314825856 2026-02-05T18:07:05.856Z 6: std::sys::backtrace::__rust_end_short_backtrace 1770314825856 2026-02-05T18:07:05.856Z at ./rustc/254b59607d4417e9dffbc307138ae5c86280fe4c/library/std/src/sys/backtrace.rs:176:18 1770314825856 2026-02-05T18:07:05.856Z 7: __rustc::rust_begin_unwind 1770314825856 2026-02-05T18:07:05.856Z at ./rustc/254b59607d4417e9dffbc307138ae5c86280fe4c/library/std/src/panicking.rs:689:5 1770314825856 2026-02-05T18:07:05.856Z 8: core::panicking::panic_fmt 1770314825856 2026-02-05T18:07:05.856Z at ./rustc/254b59607d4417e9dffbc307138ae5c86280fe4c/library/core/src/panicking.rs:80:14 1770314825856 2026-02-05T18:07:05.856Z 9: core::result::unwrap_failed 1770314825856 2026-02-05T18:07:05.856Z at ./rustc/254b59607d4417e9dffbc307138ae5c86280fe4c/library/core/src/result.rs:1867:5 1770314825856 2026-02-05T18:07:05.856Z 10: core::result::Result::expect 1770314825856 2026-02-05T18:07:05.856Z at ./rustc/254b59607d4417e9dffbc307138ae5c86280fe4c/library/core/src/result.rs:1185:23 1770314825856 2026-02-05T18:07:05.856Z 11: autopilot::infra::persistence::Persistence::store_order_events::{{closure}} 1770314825856 2026-02-05T18:07:05.856Z at ./src/crates/autopilot/src/infra/persistence/mod.rs:301:54 ```

# Changes - [ ] Remove the panic - [ ] Log on error and continue ## How to test NA --- crates/autopilot/src/infra/persistence/mod.rs | 13 +++++++++++-- crates/database/src/orders.rs | 1 + 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/crates/autopilot/src/infra/persistence/mod.rs b/crates/autopilot/src/infra/persistence/mod.rs index c6fcf7e4d7..3f3d0b2f5f 100644 --- a/crates/autopilot/src/infra/persistence/mod.rs +++ b/crates/autopilot/src/infra/persistence/mod.rs @@ -298,8 +298,17 @@ impl Persistence { let order_uids = order_uids.into_iter().collect(); tokio::spawn( async move { - let mut tx = db.pool.acquire().await.expect("failed to acquire tx"); - store_order_events(&mut tx, order_uids, label, Utc::now()).await; + match db.pool.acquire().await { + Ok(mut tx) => { + store_order_events(&mut tx, order_uids, label, Utc::now()).await; + } + Err(err) => { + tracing::error!( + ?err, + "failed to acquire a connection to store order events!" + ); + } + }; } .instrument(tracing::Span::current()), ); diff --git a/crates/database/src/orders.rs b/crates/database/src/orders.rs index ce95a6d913..c318393123 100644 --- a/crates/database/src/orders.rs +++ b/crates/database/src/orders.rs @@ -964,6 +964,7 @@ pub async fn user_orders_with_quote( FROM orders o WHERE o.cancellation_timestamp IS NULL AND o.true_valid_to >= $1 + AND NOT EXISTS (SELECT 1 FROM ethflow_refunds r WHERE r.order_uid = o.uid) AND NOT EXISTS (SELECT 1 FROM invalidations i WHERE i.order_uid = o.uid) AND NOT EXISTS (SELECT 1 FROM onchain_order_invalidations oi WHERE oi.uid = o.uid) AND NOT EXISTS (SELECT 1 FROM onchain_placed_orders op WHERE op.uid = o.uid AND op.placement_error IS NOT NULL) From a0b65c1b03f8de9a1e9f29ad4d1af3873266fd58 Mon Sep 17 00:00:00 2001 From: ilya Date: Fri, 6 Feb 2026 18:13:51 +0300 Subject: [PATCH 042/219] Normalize approximation native token decimals (#4125) # Description Instead of validating that native price approximation token pairs have matching decimals at startup, this PR normalizes prices based on the decimal difference between tokens, as was suggested in another PR comment[[link](https://github.com/cowprotocol/services/pull/4119#pullrequestreview-3749864175)]. # Changes - Added `ApproximationToken` type that stores both the approximation address and a normalization factor `(10^(to_decimals - from_decimals))` - Factory fetches decimals for all approximation token pairs at startup and computes normalization factors - Price approximation now multiplies the fetched price by the normalization factor, correctly handling tokens with different decimals ## How to test Added unit tests for normalization in both directions (source > target decimals and target > source decimals) --- crates/autopilot/src/solvable_orders.rs | 6 +- crates/shared/src/price_estimation/factory.rs | 57 ++++++- crates/shared/src/price_estimation/mod.rs | 3 - .../price_estimation/native_price_cache.rs | 155 +++++++++++++++++- 4 files changed, 202 insertions(+), 19 deletions(-) diff --git a/crates/autopilot/src/solvable_orders.rs b/crates/autopilot/src/solvable_orders.rs index 458aab5523..db46801587 100644 --- a/crates/autopilot/src/solvable_orders.rs +++ b/crates/autopilot/src/solvable_orders.rs @@ -913,6 +913,7 @@ mod tests { HEALTHY_PRICE_ESTIMATION_TIME, PriceEstimationError, native::MockNativePriceEstimating, + native_price_cache::ApproximationToken, }, signature_validator::{MockSignatureValidating, SignatureValidationError}, }, @@ -1151,7 +1152,10 @@ mod tests { Default::default(), 3, // Set to use native price approximations for the following tokens - HashMap::from([(token1, token_approx1), (token2, token_approx2)]), + HashMap::from([ + (token1, ApproximationToken::same_decimals(token_approx1)), + (token2, ApproximationToken::same_decimals(token_approx2)), + ]), HEALTHY_PRICE_ESTIMATION_TIME, ); let metrics = Metrics::instance(observe::metrics::get_storage_registry()).unwrap(); diff --git a/crates/shared/src/price_estimation/factory.rs b/crates/shared/src/price_estimation/factory.rs index 305ecc11e8..195df20563 100644 --- a/crates/shared/src/price_estimation/factory.rs +++ b/crates/shared/src/price_estimation/factory.rs @@ -7,7 +7,7 @@ use { external::ExternalPriceEstimator, instrumented::InstrumentedPriceEstimator, native::{self, NativePriceEstimator}, - native_price_cache::CachingNativePriceEstimator, + native_price_cache::{ApproximationToken, CachingNativePriceEstimator}, sanitized::SanitizedPriceEstimator, trade_verifier::{TradeVerifier, TradeVerifying}, }, @@ -384,6 +384,10 @@ impl<'a> PriceEstimatorFactory<'a> { CompetitionEstimator::new(estimators, PriceRanking::MaxOutAmount) .with_verification(self.args.quote_verification) .with_early_return(results_required); + let approximation_tokens = self.build_approximation_tokens().await.context( + "failed to build native price approximation tokens with normalization factors", + )?; + let native_estimator = Arc::new(CachingNativePriceEstimator::new( Box::new(competition_estimator), self.args.native_price_cache_max_age, @@ -391,15 +395,56 @@ impl<'a> PriceEstimatorFactory<'a> { Some(self.args.native_price_cache_max_update_size), self.args.native_price_prefetch_time, self.args.native_price_cache_concurrent_requests, - self.args - .native_price_approximation_tokens - .iter() - .copied() - .collect(), + approximation_tokens, self.args.quote_timeout, )); Ok(native_estimator) } + + /// Builds the approximation tokens mapping with normalization factors based + /// on decimal differences between token pairs. + async fn build_approximation_tokens(&self) -> Result> { + let pairs = &self.args.native_price_approximation_tokens; + if pairs.is_empty() { + return Ok(HashMap::new()); + } + + // Collect all unique addresses to fetch their decimals + let all_addresses: Vec
= pairs + .iter() + .flat_map(|(from, to)| [*from, *to]) + .collect::>() + .into_iter() + .collect(); + + let token_infos = self.components.tokens.get_token_infos(&all_addresses).await; + + let mut approximation_tokens = HashMap::new(); + for (from_token, to_token) in pairs { + let from_decimals = token_infos + .get(from_token) + .and_then(|info| info.decimals) + .with_context(|| { + format!( + "could not fetch decimals for approximation source token {from_token:?}" + ) + })?; + + let to_decimals = token_infos + .get(to_token) + .and_then(|info| info.decimals) + .with_context(|| { + format!("could not fetch decimals for approximation target token {to_token:?}") + })?; + + approximation_tokens.insert( + *from_token, + ApproximationToken::with_normalization((*to_token, to_decimals), from_decimals), + ); + } + + Ok(approximation_tokens) + } } /// Trait for modelling the initialization of a Price estimator and its verified diff --git a/crates/shared/src/price_estimation/mod.rs b/crates/shared/src/price_estimation/mod.rs index 6c5ae2251b..1535c742cc 100644 --- a/crates/shared/src/price_estimation/mod.rs +++ b/crates/shared/src/price_estimation/mod.rs @@ -260,9 +260,6 @@ pub struct Arguments { /// "|,|" /// - token1 is a token address for which we get the native token price /// - approx_token1 is a token address used for the price approximation - /// - /// It is very important that both tokens in the pair have the same number - /// of decimals. #[clap( long, env, diff --git a/crates/shared/src/price_estimation/native_price_cache.rs b/crates/shared/src/price_estimation/native_price_cache.rs index 5e95f7e45f..bf49185402 100644 --- a/crates/shared/src/price_estimation/native_price_cache.rs +++ b/crates/shared/src/price_estimation/native_price_cache.rs @@ -20,6 +20,47 @@ use { tracing::{Instrument, instrument}, }; +/// Represents a token used for price approximation, including the normalization +/// factor needed to convert between tokens with potentially different decimals. +#[derive(Debug, Clone, Copy)] +pub struct ApproximationToken { + /// The address of the token to use for price approximation. + pub address: Address, + /// The factor to multiply the approximated price by to normalize for + /// decimal differences. Computed as 10^(to_decimals - from_decimals). + pub normalization_factor: f64, +} + +impl ApproximationToken { + /// Creates an approximation token with no decimal normalization needed + /// (both tokens have the same decimals). + pub fn same_decimals(address: Address) -> Self { + Self { + address, + normalization_factor: 1.0, + } + } + + /// Creates an approximation token with the specified normalization factor. + /// The normalization factor converts prices from the approximation token's + /// decimal basis to the source token's decimal basis. + pub fn with_normalization( + (peg_token, peg_token_decimals): (Address, u8), + token_decimals: u8, + ) -> Self { + let decimals_diff = i32::from(peg_token_decimals) - i32::from(token_decimals); + Self { + address: peg_token, + normalization_factor: 10f64.powi(decimals_diff), + } + } + + /// Applies the normalization factor to a price. + pub fn normalize_price(&self, price: f64) -> f64 { + price * self.normalization_factor + } +} + #[derive(prometheus_metric_storage::MetricStorage)] struct Metrics { /// native price cache hits misses @@ -61,9 +102,9 @@ struct Inner { /// This can be useful for tokens that are hard to route but are pegged to /// the same underlying asset so approximating their native prices is deemed /// safe (e.g. csUSDL => Dai). - /// It's very important that the 2 tokens have the same number of decimals. + /// The normalization factor handles decimal differences between tokens. /// After startup this is a read only value. - approximation_tokens: HashMap, + approximation_tokens: HashMap, quote_timeout: Duration, } @@ -191,12 +232,17 @@ impl Inner { } }; - let token_to_fetch = *self.approximation_tokens.get(token).unwrap_or(token); + let approximation = self + .approximation_tokens + .get(token) + .copied() + .unwrap_or(ApproximationToken::same_decimals(*token)); let result = self .estimator - .estimate_native_price(token_to_fetch, request_timeout) - .await; + .estimate_native_price(approximation.address, request_timeout) + .await + .map(|price| approximation.normalize_price(price)); // update price in cache if should_cache(&result) { @@ -341,7 +387,7 @@ impl CachingNativePriceEstimator { update_size: Option, prefetch_time: Duration, concurrent_requests: usize, - approximation_tokens: HashMap, + approximation_tokens: HashMap, quote_timeout: Duration, ) -> Self { let inner = Arc::new(Inner { @@ -582,10 +628,16 @@ mod tests { None, Default::default(), 1, - // set token approximations for tokens 1 and 2 + // set token approximations for tokens 1 and 2 (same decimals) HashMap::from([ - (Address::with_last_byte(1), Address::with_last_byte(100)), - (Address::with_last_byte(2), Address::with_last_byte(200)), + ( + Address::with_last_byte(1), + ApproximationToken::same_decimals(Address::with_last_byte(100)), + ), + ( + Address::with_last_byte(2), + ApproximationToken::same_decimals(Address::with_last_byte(200)), + ), ]), HEALTHY_PRICE_ESTIMATION_TIME, ); @@ -622,6 +674,91 @@ mod tests { ); } + #[tokio::test] + async fn approximation_normalizes_when_target_has_more_decimals() { + // Scenario: Token 1 is USDC-like (6 decimals), approximated by DAI-like token + // 100 (18 decimals) Both worth $1, so they're pegged 1:1 in value + let mut inner = MockNativePriceEstimating::new(); + // DAI-like token returns price of 5e-22 ETH per wei (smallest unit) + inner + .expect_estimate_native_price() + .times(1) + .withf(move |t, _| *t == token(100)) + .returning(|_, _| async { Ok(5e-22) }.boxed()); + + // from_decimals=6 (USDC), to_decimals=18 (DAI) + // Normalization factor = 10^(18-6) = 10^12 + // Price should be 5e-22 * 10^12 = 5e-10 ETH per USDC microunit + let estimator = CachingNativePriceEstimator::new( + Box::new(inner), + Duration::from_millis(30), + Default::default(), + None, + Default::default(), + 1, + HashMap::from([( + Address::with_last_byte(1), + ApproximationToken::with_normalization((Address::with_last_byte(100), 18), 6), + )]), + HEALTHY_PRICE_ESTIMATION_TIME, + ); + + let price = estimator + .estimate_native_price(Address::with_last_byte(1), HEALTHY_PRICE_ESTIMATION_TIME) + .await + .unwrap(); + // 5e-22 * 10^12 = 5e-10 + // Note: small floating point error due to 10^12 not being exactly representable + let expected = 5e-10; + assert!( + (price - expected).abs() / expected < f64::EPSILON, + "price {price} not within relative epsilon of {expected}" + ); + } + + #[tokio::test] + async fn approximation_normalizes_when_target_has_fewer_decimals() { + // Scenario: Token 1 is DAI-like (18 decimals), approximated by USDC-like token + // 100 (6 decimals) Both worth $1, so they're pegged 1:1 in value + let mut inner = MockNativePriceEstimating::new(); + // USDC-like token returns price of 5e-10 ETH per microunit (smallest unit) + inner + .expect_estimate_native_price() + .times(1) + .withf(move |t, _| *t == token(100)) + .returning(|_, _| async { Ok(5e-10) }.boxed()); + + // from_decimals=18 (DAI), to_decimals=6 (USDC) + // Normalization factor = 10^(6-18) = 10^-12 + // Price should be 5e-10 * 10^-12 = 5e-22 ETH per DAI wei + let estimator = CachingNativePriceEstimator::new( + Box::new(inner), + Duration::from_millis(30), + Default::default(), + None, + Default::default(), + 1, + HashMap::from([( + Address::with_last_byte(1), + ApproximationToken::with_normalization((Address::with_last_byte(100), 6), 18), + )]), + HEALTHY_PRICE_ESTIMATION_TIME, + ); + + let price = estimator + .estimate_native_price(Address::with_last_byte(1), HEALTHY_PRICE_ESTIMATION_TIME) + .await + .unwrap(); + // 5e-10 * 10^-12 = 5e-22 + // Note: small floating point error due to 10^-12 not being exactly + // representable + let expected = 5e-22; + assert!( + (price - expected).abs() / expected < f64::EPSILON, + "price {price} not within relative epsilon of {expected}" + ); + } + #[tokio::test] async fn caches_nonrecoverable_failed_estimates() { let mut inner = MockNativePriceEstimating::new(); From 0512c46141dcf8815b2c47c330158cc49b5534ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Duarte?= Date: Fri, 6 Feb 2026 16:06:25 +0000 Subject: [PATCH 043/219] Create composite index orders(owner, class, true_valid_to) (#4133) # Description The create_order endpoint is suffering from latency issues due to the `user_orders_with_quote` being super slow (we've seen up to 50s), this PR adds an index and removes the MATERIALIZE keyword to provide more optimization opportunities for the query planner
Plan before

``` Nested Loop (cost=29.14..122.50 rows=1 width=58) (actual time=0.036..0.038 rows=0 loops=1) Buffers: shared hit=4 -> Nested Loop Anti Join (cost=28.58..113.92 rows=1 width=79) (actual time=0.036..0.037 rows=0 loops=1) Buffers: shared hit=4 -> Nested Loop Anti Join (cost=28.44..97.75 rows=1 width=79) (actual time=0.036..0.037 rows=0 loops=1) Buffers: shared hit=4 -> Nested Loop Anti Join (cost=28.17..89.44 rows=1 width=79) (actual time=0.036..0.037 rows=0 loops=1) Buffers: shared hit=4 -> Nested Loop Anti Join (cost=27.89..73.21 rows=1 width=79) (actual time=0.036..0.037 rows=0 loops=1) Buffers: shared hit=4 -> Bitmap Heap Scan on orders o (cost=27.62..64.90 rows=1 width=79) (actual time=0.035..0.036 rows=0 loops=1) Recheck Cond: ((true_valid_to >= '4294967295'::bigint) AND (owner = '\xfd659bc79a2b542728e3f372870d22ed31358ed6'::bytea)) Filter: ((cancellation_timestamp IS NULL) AND (class = 'limit'::orderclass) AND (((kind = 'sell'::orderkind) AND (COALESCE((SubPlan 1), '0'::numeric) < sell_amount)) OR ((kind = 'buy'::orderkind) AND (COALESCE((SubPlan 2), '0'::numeric) < buy_amount)))) Buffers: shared hit=4 -> BitmapAnd (cost=27.62..27.62 rows=1 width=0) (actual time=0.033..0.034 rows=0 loops=1) Buffers: shared hit=4 -> Bitmap Index Scan on orders_true_valid_to (cost=0.00..4.51 rows=10 width=0) (actual time=0.023..0.023 rows=312 loops=1) Index Cond: (true_valid_to >= '4294967295'::bigint) Buffers: shared hit=3 -> Bitmap Index Scan on order_owner (cost=0.00..22.86 rows=381 width=0) (actual time=0.003..0.003 rows=0 loops=1) Index Cond: (owner = '\xfd659bc79a2b542728e3f372870d22ed31358ed6'::bytea) Buffers: shared hit=1 SubPlan 1 -> Aggregate (cost=16.62..16.63 rows=1 width=32) (never executed) -> Index Scan using trade_order_uid on trades (cost=0.56..16.61 rows=3 width=11) (never executed) Index Cond: (order_uid = o.uid) SubPlan 2 -> Aggregate (cost=16.62..16.63 rows=1 width=32) (never executed) -> Index Scan using trade_order_uid on trades trades_1 (cost=0.56..16.61 rows=3 width=11) (never executed) Index Cond: (order_uid = o.uid) -> Index Only Scan using ethflow_refunds_pkey on ethflow_refunds r (cost=0.27..4.29 rows=1 width=57) (never executed) Index Cond: (order_uid = o.uid) Heap Fetches: 0 -> Index Only Scan using invalidations_order_uid on invalidations i (cost=0.28..8.29 rows=1 width=57) (never executed) Index Cond: (order_uid = o.uid) Heap Fetches: 0 -> Index Only Scan using onchain_order_invalidations_pkey on onchain_order_invalidations oi (cost=0.27..4.29 rows=1 width=57) (never executed) Index Cond: (uid = o.uid) Heap Fetches: 0 -> Index Only Scan using okay_onchain_orders on onchain_placed_orders op (cost=0.14..8.16 rows=1 width=57) (never executed) Index Cond: (uid = o.uid) Heap Fetches: 0 -> Index Scan using order_quotes_pkey on order_quotes o_quotes (cost=0.56..8.57 rows=1 width=93) (never executed) Index Cond: (order_uid = o.uid) Planning: Buffers: shared hit=95 Planning Time: 0.816 ms Execution Time: 0.101 ms ```

Plan after

``` Hash Anti Join (cost=2977.50..115918.76 rows=1386 width=67) (actual time=0.012..0.014 rows=0 loops=1) Hash Cond: (o.uid = op.uid) Buffers: shared hit=4 -> Hash Anti Join (cost=2918.73..115842.49 rows=1386 width=124) (actual time=0.012..0.013 rows=0 loops=1) Hash Cond: (o.uid = i.order_uid) Buffers: shared hit=4 -> Nested Loop (cost=2798.94..115705.20 rows=1386 width=124) (actual time=0.012..0.013 rows=0 loops=1) Buffers: shared hit=4 -> Hash Anti Join (cost=2798.38..103609.69 rows=1412 width=81) (actual time=0.012..0.013 rows=0 loops=1) Hash Cond: (o.uid = oi.uid) Buffers: shared hit=4 -> Hash Anti Join (cost=2186.48..102979.96 rows=1414 width=81) (actual time=0.012..0.012 rows=0 loops=1) Hash Cond: (o.uid = r.order_uid) Buffers: shared hit=4 -> Index Scan using idx_orders_owner_class_valid on orders o (cost=0.56..100776.17 rows=1419 width=81) (actual time=0.011..0.012 rows=0 loops=1) Index Cond: ((owner = '\xfd659bc79a2b542728e3f372870d22ed31358ed6'::bytea) AND (class = 'limit'::orderclass) AND (true_valid_to >= '4294967295'::bigint)) Filter: ((cancellation_timestamp IS NULL) AND (((kind = 'sell'::orderkind) AND (COALESCE((SubPlan 1), '0'::numeric) < sell_amount)) OR ((kind = 'buy'::orderkind) AND (COALESCE((SubPlan 2), '0'::numeric) < buy_amount)))) Buffers: shared hit=4 SubPlan 1 -> Aggregate (cost=8.60..8.61 rows=1 width=32) (never executed) -> Index Only Scan using idx_trades_covering on trades (cost=0.56..8.59 rows=2 width=9) (never executed) Index Cond: (order_uid = o.uid) Heap Fetches: 0 SubPlan 2 -> Aggregate (cost=8.60..8.61 rows=1 width=32) (never executed) -> Index Only Scan using idx_trades_covering on trades trades_1 (cost=0.56..8.59 rows=2 width=11) (never executed) Index Cond: (order_uid = o.uid) Heap Fetches: 0 -> Hash (cost=1515.41..1515.41 rows=53641 width=57) (never executed) -> Seq Scan on ethflow_refunds r (cost=0.00..1515.41 rows=53641 width=57) (never executed) -> Hash (cost=398.62..398.62 rows=17062 width=57) (never executed) -> Seq Scan on onchain_order_invalidations oi (cost=0.00..398.62 rows=17062 width=57) (never executed) -> Index Scan using order_quotes_pkey on order_quotes o_quotes (cost=0.56..8.57 rows=1 width=100) (never executed) Index Cond: (order_uid = o.uid) -> Hash (cost=78.24..78.24 rows=3324 width=57) (never executed) -> Seq Scan on invalidations i (cost=0.00..78.24 rows=3324 width=57) (never executed) -> Hash (cost=53.80..53.80 rows=398 width=57) (never executed) -> Index Only Scan using okay_onchain_orders on onchain_placed_orders op (cost=0.27..53.80 rows=398 width=57) (never executed) Heap Fetches: 0 Planning: Buffers: shared hit=95 Planning Time: 0.833 ms Execution Time: 0.071 ms ```

# Changes - [ ] Adding the composite index - [ ] Dropping the MATERIALIZE ## How to test Create index in prod, push a test image, though this should be an easy win --- crates/database/src/orders.rs | 2 +- database/README.md | 3 +++ database/sql/V104__create_composite_index.sql | 2 ++ 3 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 database/sql/V104__create_composite_index.sql diff --git a/crates/database/src/orders.rs b/crates/database/src/orders.rs index c318393123..0e661d7729 100644 --- a/crates/database/src/orders.rs +++ b/crates/database/src/orders.rs @@ -959,7 +959,7 @@ pub async fn user_orders_with_quote( ) -> Result, sqlx::Error> { // Optimized version following the same pattern as OPEN_ORDERS const QUERY: &str = r#" - WITH live_orders AS MATERIALIZED ( + WITH live_orders AS ( SELECT o.* FROM orders o WHERE o.cancellation_timestamp IS NULL diff --git a/database/README.md b/database/README.md index 206b169c97..6a15f205d0 100644 --- a/database/README.md +++ b/database/README.md @@ -276,6 +276,8 @@ Indexes: - user_order_creation_timestamp: btree(`owner`, `creation_timestamp` DESC) - version_idx: btree(`settlement_contract`) - orders\_true\_valid\_to: btree(`true_valid_to`) +- orders_owner_covering: btree(`owner`) INCLUDE (`uid`, `kind`, `buy_amount`, `sell_amount`, `fee_amount`, `buy_token`, `sell_token`) +- orders_owner_class_valid_composite: btree(`owner`, `class`, `true_valid_to` DESC) WHERE cancellation_timestamp IS NULL ### fee_policies @@ -480,6 +482,7 @@ This table contains data of [`Trade`](https://github.com/cowprotocol/contracts/b Indexes: - PRIMARY KEY: btree(`block_number`, `log_index`) - trade\_order\_uid: btree (`order_uid`, `block_number`, `log_index`) +- trades_covering: btree(`order_uid`) INCLUDE (`buy_amount`, `sell_amount`, `fee_amount`) ### surplus\_capturing\_jit\_order\_owners diff --git a/database/sql/V104__create_composite_index.sql b/database/sql/V104__create_composite_index.sql new file mode 100644 index 0000000000..25c8c3f57f --- /dev/null +++ b/database/sql/V104__create_composite_index.sql @@ -0,0 +1,2 @@ +-- composite index to speed up query for user orders with quote +CREATE INDEX CONCURRENTLY IF NOT EXISTS orders_owner_class_valid_composite ON orders (owner, class, true_valid_to DESC) WHERE cancellation_timestamp IS NULL; From b8548195cef5badfd49f71af1feb1f30b976d253 Mon Sep 17 00:00:00 2001 From: ilya Date: Mon, 9 Feb 2026 23:39:56 +0300 Subject: [PATCH 044/219] Update CLAUDE.md (#4128) Adds some details about the project that help Claude provide better results. --- CLAUDE.md | 71 ++++++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 65 insertions(+), 6 deletions(-) diff --git a/CLAUDE.md b/CLAUDE.md index c8b776a3dd..0ed24cd9cb 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -18,6 +18,13 @@ This is a Rust workspace containing multiple services and libraries: - **database** - PostgreSQL abstraction and migrations - **model** - Serialization models for API - **contracts** - Smart contract bindings +- **ethrpc** - Extended Ethereum RPC client with batching layer +- **chain** - Blockchain interaction utilities +- **number** - Numerical type extensions and conversions for 256-bit integers +- **app-data** - Order metadata validation with 8KB default size limit +- **alerter** - Monitors orderbook metrics for orders that should be solved but aren't +- **testlib** - Shared helpers for writing unit and end-to-end tests +- **observe** - Initialization and helper functions for logging and metrics ## Architecture Overview @@ -60,7 +67,7 @@ User signs order → Orderbook validates → Autopilot includes in auction ## Technology Stack -- **Language**: Rust 2021+ Edition +- **Language**: Rust 2024 Edition - **Runtime**: Tokio async - **Database**: PostgreSQL with sqlx - **Web3**: Alloy @@ -71,12 +78,39 @@ User signs order → Orderbook validates → Autopilot includes in auction - **Protocol Documentation**: https://docs.cow.fi/ - Technical Reference: API specs and SDK docs - Concepts: Protocol fundamentals and architecture - -## Testing - -- Use `just` commands for running tests (see Justfile) +- **Alloy (Web3 library)**: Fetch https://alloy.rs/introduction/prompting for an AI-optimized guide covering providers, transactions, contracts, and migration from ethers-rs + +## Development Commands + +### Testing +- Use `cargo nextest run` instead of `cargo test` (CI uses nextest and handles global state differently) +- Run specific test suites: + - Unit tests: `cargo nextest run` + - Database tests: `cargo nextest run postgres -p orderbook -p database -p autopilot --test-threads 1 --run-ignored ignored-only` + - E2E local tests: `cargo nextest run -p e2e local_node --test-threads 1 --failure-output final --run-ignored ignored-only` + - E2E forked tests: `cargo nextest run -p e2e forked_node --test-threads 1 --run-ignored ignored-only --failure-output final` + - Driver tests: `RUST_MIN_STACK=3145728 cargo nextest run -p driver --test-threads 1 --run-ignored ignored-only` - E2E tests available in `crates/e2e` -- Local development environment in `playground/` + +### Testing Requirements +- PostgreSQL tests require local database: Run `docker compose up -d` first +- Forked network tests require `anvil` (from Foundry) and RPC URLs + - Anvil binary: configurable via `ANVIL_COMMAND` env var (defaults to `"anvil"`, must be in PATH) + - Required env vars: `FORK_URL_MAINNET` and `FORK_URL_GNOSIS` (RPC endpoints for forking) +- Use `--test-threads 1` for database and E2E tests to avoid conflicts +- CI runs doc-tests, unit tests, DB tests, E2E tests (local and forked), and driver tests + +### Linting and Formatting +- Format: **always** run with the nightly toolchain: `cargo +nightly fmt --all` +- Spot format: `cargo +nightly fmt -- ` (never call stable `cargo fmt`) +- Lint: `cargo clippy --locked --workspace --all-features --all-targets -- -D warnings` +- Check format: `cargo +nightly fmt --all -- --check` + +### Local Development Environment +- Start local PostgreSQL: `docker compose up -d` +- Full playground environment: `docker compose -f playground/docker-compose.fork.yml up -d` +- For forked network tests, set environment variables: `FORK_URL_MAINNET` and `FORK_URL_GNOSIS` +- Reset playground: `docker compose -f playground/docker-compose.fork.yml down --remove-orphans --volumes` ## Directory Structure @@ -87,6 +121,31 @@ playground/ # Local dev environment configs/ # Configuration files ``` +## Workspace Configuration + +- Rust Edition 2024 +- Uses workspace dependencies for consistency +- Tokio-console support: **Only available in playground environment** (set `TOKIO_CONSOLE=true` to activate when running in playground) +- Production builds do **not** include tokio-console overhead +- Runtime log filter changes via UNIX socket at `/tmp/log_filter_override__.sock` +- Memory allocator: Uses jemalloc by default with built-in heap profiling support (enable at runtime via MALLOC_CONF environment variable). Can optionally use mimalloc via `--features mimalloc-allocator` + +## Playground Environment + +- Runs in **Fork** mode: anvil forks a real network via `ETH_RPC_URL` (set in `playground/.env`). A clean local network mode is planned but not yet implemented. +- Access full local development stack with CoW Swap UI at http://localhost:8000 +- CoW Explorer available at http://localhost:8001 +- Orderbook API at http://localhost:8080 +- Database admin (Adminer) at http://localhost:8082 +- Uses test mnemonic: "test test test test test test test test test test test junk" +- First 10 accounts have 10000 ETH balance by default, set by anvil + +## Development Notes + +- Binaries support `--help` for comprehensive command documentation +- OpenAPI documentation available for orderbook, driver, and solver APIs +- Performance profiling: Only available in playground (requires tokio-console feature + tokio_unstable cfg) + # General Coding Instructions If there is a test you can run then run it or `cargo check` or `cargo build`; run it after you have made changes. From 903bcd335435a1decfafe34ed31f515916904520 Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Tue, 10 Feb 2026 11:09:46 +0100 Subject: [PATCH 045/219] Mount flyway.conf in docker-compose.yaml (#4138) # Description Concurrent index creation with flyway requires a configuration parameter to be set to avoid deadlocks in postgres. This setting was already configured and shipped in the built docker images but the `docker-compose.yaml` file that's used to spin up a DB locally for running e2e tests for example does not mount that file yet which leads to migrations deadlocking when you run `docker compose up` # Changes mount `flyway.conf` in `docker-compose.yaml`. ## How to test run `docker compose up` from the repo root and check that migrations finished successfully Screenshot 2026-02-10 at 09 59 01 --- docker-compose.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docker-compose.yaml b/docker-compose.yaml index a29d1ac6e6..75b479a2a5 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -32,6 +32,9 @@ services: - type: bind source: ./database/sql/ target: /flyway/sql + - type: bind + source: ./database/conf/ + target: /flyway/conf volumes: postgres: From adf5fca39fb8f232110158cda947d3765ae0d2c1 Mon Sep 17 00:00:00 2001 From: ilya Date: Tue, 10 Feb 2026 19:56:17 +0300 Subject: [PATCH 046/219] Fix flaky autopilot follower e2e test (#4139) # Description Fixes the flaky autopilot follower e2e test that fails almost always on #4136. After adding some logs, etc, I could finally find all the possible race conditions and fix the test assertions. --- .github/codeql/codeql-config.yml | 4 +++ .github/workflows/codeql.yml | 1 + crates/e2e/tests/e2e/autopilot_leader.rs | 43 +++++++++++++++++++----- 3 files changed, 40 insertions(+), 8 deletions(-) create mode 100644 .github/codeql/codeql-config.yml diff --git a/.github/codeql/codeql-config.yml b/.github/codeql/codeql-config.yml new file mode 100644 index 0000000000..3db817d664 --- /dev/null +++ b/.github/codeql/codeql-config.yml @@ -0,0 +1,4 @@ +name: "CoW Protocol CodeQL Config" + +paths-ignore: + - crates/e2e diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 60b13c2982..15e583eb92 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -33,6 +33,7 @@ jobs: with: languages: ${{ matrix.language }} build-mode: ${{ matrix.build-mode }} + config-file: .github/codeql/codeql-config.yml - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3.29.2 diff --git a/crates/e2e/tests/e2e/autopilot_leader.rs b/crates/e2e/tests/e2e/autopilot_leader.rs index 7ff1e1fc7d..02f47a2c6d 100644 --- a/crates/e2e/tests/e2e/autopilot_leader.rs +++ b/crates/e2e/tests/e2e/autopilot_leader.rs @@ -12,7 +12,6 @@ use { ethrpc::{Web3, alloy::CallBuilderExt}, model::order::{OrderCreation, OrderKind}, number::units::EthUnit, - std::time::Duration, }; #[tokio::test] @@ -103,6 +102,7 @@ async fn dual_autopilot_only_leader_produces_auctions(web3: Web3) { const_hex::encode(solver2.address())), "--price-estimation-drivers=test_quoter|http://localhost:11088/test_solver2".to_string(), "--gas-estimators=http://localhost:11088/gasprice".to_string(), + "--metrics-address=0.0.0.0:9591".to_string(), "--api-address=0.0.0.0:12089".to_string(), "--enable-leader-lock=true".to_string(), ]).await; @@ -160,18 +160,45 @@ async fn dual_autopilot_only_leader_produces_auctions(web3: Web3) { // Stop autopilot-leader, follower should take over manual_shutdown.shutdown(); - onchain.mint_block().await; - assert!( - tokio::time::timeout(Duration::from_secs(15), autopilot_leader) - .await - .is_ok() - ); + let is_leader_shutdown = || async { + onchain.mint_block().await; + autopilot_leader.is_finished() + }; + wait_for_condition(TIMEOUT, is_leader_shutdown) + .await + .unwrap(); + + // Wait for the follower to step up as leader by checking its metrics endpoint + let is_follower_leader = || async { + onchain.mint_block().await; + let Ok(response) = reqwest::get("http://0.0.0.0:9591/metrics").await else { + return false; + }; + let Ok(body) = response.text().await else { + return false; + }; + body.lines() + .any(|line| line.trim().contains("leader_lock_tracker_is_leader 1")) + }; + wait_for_condition(TIMEOUT, is_follower_leader) + .await + .unwrap(); // Run 10 txs, autopilot-backup is in charge // - only test_solver2 should participate and settle for i in 1..=10 { tracing::info!("Tx with autopilot-backup {i}"); - let uid = services.create_order(&order()).await.unwrap(); + let uid_cell = std::cell::Cell::new(None); + let try_create_order = || async { + onchain.mint_block().await; + if let Ok(uid) = services.create_order(&order()).await { + uid_cell.set(Some(uid)); + return true; + } + false + }; + wait_for_condition(TIMEOUT, try_create_order).await.unwrap(); + let uid = uid_cell.into_inner().unwrap(); tracing::info!("waiting for trade"); let indexed_trades = || async { From 8a30f89a71b48e88eb53cbb73e1f73746ea0abe3 Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Wed, 11 Feb 2026 14:34:23 +0100 Subject: [PATCH 047/219] Optimize autopilot maintenance (#4141) # Description Currently the autopilot maintenance is on the critical path before we can build a new auction. But some of the work that's being done is actually not necessary to build fully updated auctions. Steps that are currently not essential are: * pruning the DB * associating a settlement with a proposed solution (as long as we process all events of a settlement we know which orders have been filled) Ever since there are smart contracts that are listed as solvers attributing a settlement to a solution is pretty time consuming since we need to use `debug_traceTransaction`. This regularly takes hundreds of milliseconds so moving this off of the hot path will be a huge win. # Changes To account for that the maintenance now distinguishes between essential tasks and optional tasks. In order to move settlement attribution our of the essentials I moved the settlement observer out of the settlement event indexer into the autopilot maintenance component. Also now that we have this split we can move ethflow refund indexing into the autopilot maintenance under the optional category. ## How to test existing e2e tests for general correctness manual deployment to prod to confirm speed improvement Tested on prod together with some other change. 1: current setup 2: new node infra 3: new node infra + this PR Screenshot 2026-02-11 at 13 31 44 --- .../src/boundary/events/settlement.rs | 20 +--- crates/autopilot/src/maintenance.rs | 99 +++++++++++++++---- crates/autopilot/src/run.rs | 20 ++-- 3 files changed, 93 insertions(+), 46 deletions(-) diff --git a/crates/autopilot/src/boundary/events/settlement.rs b/crates/autopilot/src/boundary/events/settlement.rs index ef4206efe6..d097c06ba7 100644 --- a/crates/autopilot/src/boundary/events/settlement.rs +++ b/crates/autopilot/src/boundary/events/settlement.rs @@ -1,5 +1,5 @@ use { - crate::{database::Postgres, domain::settlement}, + crate::database::Postgres, alloy::{ primitives::Address, rpc::types::{Filter, Log}, @@ -35,16 +35,14 @@ impl AlloyEventRetrieving for GPv2SettlementContract { pub struct Indexer { db: Postgres, - start_index: u64, - settlement_observer: settlement::Observer, + start_indexing_block: u64, } impl Indexer { - pub fn new(db: Postgres, settlement_observer: settlement::Observer, start_index: u64) -> Self { + pub fn new(db: Postgres, start_indexing_block: u64) -> Self { Self { db, - settlement_observer, - start_index, + start_indexing_block, } } } @@ -57,7 +55,7 @@ impl EventStoring<(GPv2SettlementEvents, Log)> for Indexer { async fn last_event_block(&self) -> Result { super::read_last_block_from_db(&self.db.pool, INDEX_NAME) .await - .map(|last_block| last_block.max(self.start_index)) + .map(|last_block| last_block.max(self.start_indexing_block)) } async fn persist_last_indexed_block(&mut self, latest_block: u64) -> Result<()> { @@ -74,10 +72,6 @@ impl EventStoring<(GPv2SettlementEvents, Log)> for Indexer { crate::database::events::replace_events(&mut transaction, events, from_block).await?; database::settlements::delete(&mut transaction, from_block).await?; transaction.commit().await?; - - self.settlement_observer - .post_process_outstanding_settlement_transactions() - .await; Ok(()) } @@ -85,10 +79,6 @@ impl EventStoring<(GPv2SettlementEvents, Log)> for Indexer { let mut transaction = self.db.pool.begin().await?; crate::database::events::append_events(&mut transaction, events).await?; transaction.commit().await?; - - self.settlement_observer - .post_process_outstanding_settlement_transactions() - .await; Ok(()) } } diff --git a/crates/autopilot/src/maintenance.rs b/crates/autopilot/src/maintenance.rs index ff3c4ca74f..b279ea5c20 100644 --- a/crates/autopilot/src/maintenance.rs +++ b/crates/autopilot/src/maintenance.rs @@ -3,17 +3,19 @@ use { boundary::events::settlement::{GPv2SettlementContract, Indexer}, database::{ Postgres, + ethflow_events::event_retriever::EthFlowRefundRetriever, onchain_order_events::{ OnchainOrderParser, ethflow_events::{EthFlowData, EthFlowDataForDb}, event_retriever::CoWSwapOnchainOrdersContract, }, }, + domain::settlement, event_updater::EventUpdater, }, anyhow::Result, ethrpc::block_stream::{BlockInfo, CurrentBlockWatcher, into_stream}, - futures::StreamExt, + futures::{FutureExt, StreamExt}, prometheus::{ HistogramVec, IntCounterVec, @@ -27,11 +29,13 @@ use { }, tokio::sync::watch, tokio_stream::wrappers::WatchStream, + tracing::Instrument, }; /// Component to sync with the maintenance logic that runs in a background task. -/// This allows us to run the maintenance logic ASAP but still wait for it to -/// finish in a convenient manner. +/// This allows us to run the maintenance logic as soon as we see a new block +/// while still making the autopilot run loop only wait for updates that are +/// essential for building new auctions. #[derive(Clone)] pub struct MaintenanceSync { /// How long the autopilot wants to wait at most. @@ -75,19 +79,26 @@ pub struct Maintenance { /// All indexing tasks to keep cow amms up to date. cow_amm_indexer: Vec>, /// Tasks to index ethflow orders that were submitted onchain. - ethflow_indexer: Vec, + ethflow_order_indexer: Vec, + /// Tasks to index ethflow refunds. + ethflow_refund_indexer: Vec, + /// Component to correctly attribute a settlement to a proposed solution. + settlement_observer: settlement::Observer, } impl Maintenance { pub fn new( settlement_indexer: EventUpdater, db_cleanup: Postgres, + settlement_observer: settlement::Observer, ) -> Self { Self { settlement_indexer, db_cleanup, cow_amm_indexer: Default::default(), - ethflow_indexer: Default::default(), + ethflow_order_indexer: Default::default(), + ethflow_refund_indexer: Default::default(), + settlement_observer, } } @@ -108,7 +119,12 @@ impl Maintenance { .next() .await .expect("block stream terminated unexpectedly"); - self.index_until_block(block, &sender).await; + self.index_until_block(block, &sender) + .instrument(tracing::info_span!( + "autopilot_maintenance", + block = block.number + )) + .await; } }); @@ -122,33 +138,45 @@ impl Maintenance { metrics().last_seen_block.set(block.number); let start = Instant::now(); - if let Err(err) = self.update_inner().await { - tracing::warn!(?err, block = block.number, "failed to run maintenance"); + if let Err(err) = self.run_essential_maintenance().await { + tracing::warn!(?err, "failed to run essential maintenance"); metrics().updates.with_label_values(&["error"]).inc(); return; } tracing::info!( - block = block.number, time = ?start.elapsed(), - "successfully ran maintenance task" + "successfully ran essential maintenance tasks" ); metrics().last_updated_block.set(block.number); metrics().updates.with_label_values(&["success"]).inc(); if let Err(err) = last_processed_block.send(block.number) { tracing::warn!(?err, "nobody listening for processed blocks anymore"); } + + // only after we informed the run_loop that the essential updates are done we + // kick off the optional maintenance tasks + let start = Instant::now(); + if let Err(err) = self.run_optional_maintenance().await { + tracing::warn!(?err, "failed to run optional maintenance"); + return; + } + tracing::info!( + time = ?start.elapsed(), + "successfully ran optional maintenance tasks" + ); } - async fn update_inner(&self) -> Result<()> { - let _timer = - observe::metrics::metrics().on_auction_overhead_start("autopilot", "maintenance_total"); + /// Runs all the maintenance tasks that are needed to ensure the next + /// auction gets built using the most up-to-date information. + async fn run_essential_maintenance(&self) -> Result<()> { + let _timer = observe::metrics::metrics() + .on_auction_overhead_start("autopilot", "maintenance_essential"); tokio::try_join!( Self::timed_future( "settlement_indexer", self.settlement_indexer.run_maintenance() ), - Self::timed_future("db_cleanup", self.db_cleanup.run_maintenance()), Self::timed_future( "cow_amm_indexer", futures::future::try_join_all( @@ -158,9 +186,9 @@ impl Maintenance { ), ), Self::timed_future( - "ethflow_indexer", + "ethflow_order_indexer", futures::future::try_join_all( - self.ethflow_indexer + self.ethflow_order_indexer .iter() .map(|indexer| indexer.run_maintenance()), ), @@ -170,10 +198,41 @@ impl Maintenance { Ok(()) } + /// Runs all the maintenance tasks that should run eventually but are not + /// very time sensitive. + async fn run_optional_maintenance(&self) -> Result<()> { + let _timer = observe::metrics::metrics() + .on_auction_overhead_start("autopilot", "maintenance_optional"); + tokio::try_join!( + Self::timed_future("db_cleanup", self.db_cleanup.run_maintenance()), + Self::timed_future( + "ethflow_refund_indexer", + futures::future::try_join_all( + self.ethflow_refund_indexer + .iter() + .map(|indexer| indexer.run_maintenance()), + ), + ), + Self::timed_future( + "settlement_attribution", + self.settlement_observer + .post_process_outstanding_settlement_transactions() + .map(|_| Ok(())) + ) + )?; + + Ok(()) + } + /// Registers all maintenance tasks that are necessary to correctly support /// ethflow orders. - pub fn add_ethflow_indexer(&mut self, ethflow_indexer: EthflowIndexer) { - self.ethflow_indexer.push(ethflow_indexer); + pub fn add_ethflow_indexing( + &mut self, + order_indexer: EthflowOrderIndexer, + refund_indexer: EthflowRefundIndexer, + ) { + self.ethflow_order_indexer.push(order_indexer); + self.ethflow_refund_indexer.push(refund_indexer); } /// Registers all maintenance tasks that are necessary to correctly support @@ -194,9 +253,11 @@ impl Maintenance { } } -type EthflowIndexer = +type EthflowOrderIndexer = EventUpdater, CoWSwapOnchainOrdersContract>; +type EthflowRefundIndexer = EventUpdater; + #[derive(prometheus_metric_storage::MetricStorage)] #[metric(subsystem = "autopilot_maintenance")] struct Metrics { diff --git a/crates/autopilot/src/run.rs b/crates/autopilot/src/run.rs index 8833155918..d8279bb93d 100644 --- a/crates/autopilot/src/run.rs +++ b/crates/autopilot/src/run.rs @@ -46,7 +46,6 @@ use { baseline_solver::BaseTokens, code_fetching::CachedCodeFetcher, http_client::HttpClientFactory, - maintenance::ServiceMaintenance, order_quoting::{self, OrderQuoter}, price_estimation::factory::{self, PriceEstimatorFactory}, signature_validator, @@ -426,8 +425,6 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { infra::persistence::Persistence::new(args.s3.into().unwrap(), Arc::new(db_write.clone())) .instrument(info_span!("persistence_init")) .await; - let settlement_observer = - crate::domain::settlement::Observer::new(eth.clone(), persistence.clone()); let settlement_contract_start_index = match GPv2Settlement::deployment_block(&chain_id) { Some(block) => { tracing::debug!(block, "found settlement contract deployment"); @@ -449,7 +446,6 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { ), boundary::events::settlement::Indexer::new( db_write.clone(), - settlement_observer, settlement_contract_start_index, ), block_retriever.clone(), @@ -568,8 +564,14 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { // updated in background task let trusted_tokens = AutoUpdatingTokenList::from_configuration(market_makable_token_list_configuration).await; + let settlement_observer = + crate::domain::settlement::Observer::new(eth.clone(), persistence.clone()); - let mut maintenance = Maintenance::new(settlement_event_indexer, db_write.clone()); + let mut maintenance = Maintenance::new( + settlement_event_indexer, + db_write.clone(), + settlement_observer, + ); maintenance.add_cow_amm_indexer(&cow_amm_registry); if !args.ethflow_contracts.is_empty() { @@ -626,13 +628,7 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { .await .expect("Should be able to initialize event updater. Database read issues?"); - maintenance.add_ethflow_indexer(onchain_order_indexer); - // refunds are not critical for correctness and can therefore be indexed - // sporadically in a background task - let service_maintainer = ServiceMaintenance::new(vec![Arc::new(refund_event_handler)]); - tokio::task::spawn( - service_maintainer.run_maintenance_on_new_block(eth.current_block().clone()), - ); + maintenance.add_ethflow_indexing(onchain_order_indexer, refund_event_handler); } let run_loop_config = run_loop::Config { From 2f704dcaa88fc600940bd99e86a223642f53dca5 Mon Sep 17 00:00:00 2001 From: Augusto Collerone Date: Wed, 11 Feb 2026 13:48:26 -0300 Subject: [PATCH 048/219] [M2] Integrate block explorer URL override for Otterscan (#4077) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary This is a copy of [a stale PR](https://github.com/cowprotocol/services/pull/4012) This PR delivers part of **Milestone 2: Frontend Integration** of the [CoW Grants Program RFP: CoW Protocol Playground Block Explorer Integration](https://forum.cow.fi/t/grant-application-cow-protocol-playground-block-explorer-integration/3284/1) proposal by CoBuilders. Updates the CoW Swap and Explorer Dockerfiles to use `REACT_APP_BLOCK_EXPLORER_URL`, enabling all block explorer links to point to the local Otterscan instance (port 8003) instead of Etherscan. ![demo_after](https://github.com/user-attachments/assets/ec25432e-9be5-430e-bb1b-582a7ed5a454) ## Milestone 2 Deliverables ### CoW Swap Frontend Integration - [x] Configure `REACT_APP_BLOCK_EXPLORER_URL=http://localhost:8003` in build - [x] Transaction hash links point to local Otterscan - [x] Address links point to local Otterscan - [x] All navigation flows work correctly ### CoW Explorer Frontend Integration - [x] Configure `REACT_APP_BLOCK_EXPLORER_URL=http://localhost:8003` in build - [x] "View on Blockchain" links point to local Otterscan - [x] Address and token links point to local Otterscan - [x] All navigation flows work correctly ### Success Criteria > *"One-click navigation from frontends to the local block explorer for all transactions and addresses"* - Clicking any explorer link in CoW Swap (port 8000) → Opens in Otterscan (port 8003) - Clicking any explorer link in CoW Explorer (port 8001) → Opens in Otterscan (port 8003) ## Changes ### Modified Files | File | Changes | |------|---------| | `Dockerfile.cowswap` | Added `REACT_APP_BLOCK_EXPLORER_URL` build arg | | `Dockerfile.explorer` | Added `REACT_APP_BLOCK_EXPLORER_URL` env var | ## Architecture ``` ┌─────────────┐ ┌─────────────┐ │ CoW Swap │ │ CoW Explorer│ │ (port 8000) │ │ (port 8001) │ └──────┬──────┘ └──────┬──────┘ │ │ │ All explorer │ │ links now │ │ point to ───────┤ │ │ └───────────┬───────┘ │ ▼ ┌─────────────┐ │ Otterscan │ │ (port 8003) │ └─────────────┘ ``` ## Dependencies This PR depends on: - **PR:** cowprotocol/cowswap#XXX - Adds `REACT_APP_BLOCK_EXPLORER_URL` env var support ## Testing 1. Start the playground: ```bash docker compose -f docker-compose.fork.yml up --build ``` 2. Open CoW Swap at `http://localhost:8000`: - Make a swap or view transaction history - Click any "View on Explorer" or transaction link - **Expected:** Opens `http://localhost:8003/tx/{hash}` 3. Open CoW Explorer at `http://localhost:8001`: - Browse any order or transaction - Click any address or transaction link - **Expected:** Opens `http://localhost:8003/address/{addr}` 4. Verify Otterscan shows the transaction/address details with full traces ## Demo 1. User clicks transaction in CoW Swap 2. Otterscan opens with full transaction details 3. Traces, logs, and gas profiling visible locally ## Milestones | Milestone | Description | Status | |-----------|-------------|--------| | M1 | Otterscan Integration | [PR #4000](https://github.com/cowprotocol/services/pull/4000) | | M2 | Frontend Integration | **This PR** (+ cowswap PR) | | M3 | Documentation | Pending | --- *Submitted by [CoBuilders](https://cobuilders.xyz) as part of the CoW Grants Program* --------- Co-authored-by: Ignacio Co-authored-by: Claude Opus 4.5 --- playground/Dockerfile.cowswap | 12 ++++++++++-- playground/Dockerfile.explorer | 6 ++++-- playground/README.md | 4 ++-- playground/nginx-spa.conf | 10 ++++++++++ 4 files changed, 26 insertions(+), 6 deletions(-) create mode 100644 playground/nginx-spa.conf diff --git a/playground/Dockerfile.cowswap b/playground/Dockerfile.cowswap index 8318bd5760..3ec5168217 100644 --- a/playground/Dockerfile.cowswap +++ b/playground/Dockerfile.cowswap @@ -10,6 +10,10 @@ RUN corepack enable \ ARG REACT_APP_NETWORK_URL_1=https://rpc.mevblocker.io ARG REACT_APP_NETWORK_URL_5=https://ethereum-goerli.publicnode.com ARG REACT_APP_NETWORK_URL_100=https://gnosis.publicnode.com +ARG REACT_APP_EXPLORER_URL_DEV=http://localhost:8001 + +# Block explorer URL (Otterscan for local development) +ARG REACT_APP_BLOCK_EXPLORER_URL=http://localhost:8003 # Orderbook URL args ARG REACT_APP_ORDER_BOOK_URLS='{"1":"https://api.cow.fi/mainnet","100":"https://api.cow.fi/goerli","5":"https://api.cow.fi/xdai"}' @@ -37,6 +41,8 @@ ENV REACT_APP_NETWORK_URL_1="$REACT_APP_NETWORK_URL_1" ENV REACT_APP_NETWORK_URL_5="$REACT_APP_NETWORK_URL_5" ENV REACT_APP_NETWORK_URL_100="$REACT_APP_NETWORK_URL_100" ENV REACT_APP_ORDER_BOOK_URLS="$REACT_APP_ORDER_BOOK_URLS" +ENV REACT_APP_EXPLORER_URL_DEV="$REACT_APP_EXPLORER_URL_DEV" +ENV REACT_APP_BLOCK_EXPLORER_URL="$REACT_APP_BLOCK_EXPLORER_URL" # Update environment variables based on "chain" and "ETH_RPC_URL", then build safely RUN set -e; \ @@ -56,12 +62,14 @@ RUN set -e; \ ;; \ esac; \ fi; \ - export REACT_APP_NETWORK_URL_1 REACT_APP_NETWORK_URL_5 REACT_APP_NETWORK_URL_100 REACT_APP_ORDER_BOOK_URLS; \ + export REACT_APP_NETWORK_URL_1 REACT_APP_NETWORK_URL_5 REACT_APP_NETWORK_URL_100 REACT_APP_ORDER_BOOK_URLS REACT_APP_EXPLORER_URL_DEV REACT_APP_BLOCK_EXPLORER_URL; \ NODE_OPTIONS="--max-old-space-size=4096" NX_NO_CLOUD=true pnpm run build \ --env REACT_APP_NETWORK_URL_1="$REACT_APP_NETWORK_URL_1" \ --env REACT_APP_NETWORK_URL_5="$REACT_APP_NETWORK_URL_5" \ --env REACT_APP_NETWORK_URL_100="$REACT_APP_NETWORK_URL_100" \ - --env REACT_APP_ORDER_BOOK_URLS="$REACT_APP_ORDER_BOOK_URLS" + --env REACT_APP_ORDER_BOOK_URLS="$REACT_APP_ORDER_BOOK_URLS" \ + --env REACT_APP_EXPLORER_URL_DEV="$REACT_APP_EXPLORER_URL_DEV" \ + --env REACT_APP_BLOCK_EXPLORER_URL="$REACT_APP_BLOCK_EXPLORER_URL" # Stage 2: Copy the frontend build to the nginx container FROM docker.io/nginx:1.21-alpine AS frontend diff --git a/playground/Dockerfile.explorer b/playground/Dockerfile.explorer index 917923a0f4..19d38cf664 100644 --- a/playground/Dockerfile.explorer +++ b/playground/Dockerfile.explorer @@ -16,14 +16,16 @@ RUN git clone https://github.com/cowprotocol/cowswap . \ && git submodule update --init --recursive \ && pnpm install --frozen-lockfile -# Set environment variable for the order book +# Build environment variables ENV REACT_APP_ORDER_BOOK_URLS='{"1":"http://localhost:8080"}' +ENV REACT_APP_BLOCK_EXPLORER_URL=http://localhost:8003 # Build the frontend -RUN APP_ID=1 pnpm run build:explorer +RUN APP_ID=1 REACT_APP_BLOCK_EXPLORER_URL=$REACT_APP_BLOCK_EXPLORER_URL pnpm run build:explorer # Stage 2: Copy the frontend build to the nginx container FROM docker.io/nginx:1.21-alpine AS frontend COPY --from=node-build /usr/src/app/build/explorer /usr/share/nginx/html +COPY nginx-spa.conf /etc/nginx/conf.d/default.conf EXPOSE 80 CMD ["nginx", "-g", "daemon off;"] diff --git a/playground/README.md b/playground/README.md index 497d625cbd..3269971f77 100644 --- a/playground/README.md +++ b/playground/README.md @@ -42,7 +42,7 @@ Now with Rabby configured, and the services started, you can browse to http://lo > The EthFlow is not configured by default, the next section explains how to set it up. > You can follow along with watching the logs of the `autopilot`, `driver`, and `baseline` solver to see how the Protocol interacts. > If you make any changes to the files in your repo directory, services will automatically be recompiled and restarted. -> The CoW Explorer is avialable at http://localhost:8001 to see more information about transaction status +> The CoW Explorer is available at http://localhost:8001 to see more information about transaction status ### Resetting the playground @@ -147,7 +147,7 @@ In this mode, the stack will spin up: - Driver - Baseline - Cow Swap -- Cow Explorer (*not yet implemented*) +- Cow Explorer ### Local diff --git a/playground/nginx-spa.conf b/playground/nginx-spa.conf new file mode 100644 index 0000000000..2d96d1589b --- /dev/null +++ b/playground/nginx-spa.conf @@ -0,0 +1,10 @@ +server { + listen 80; + server_name localhost; + root /usr/share/nginx/html; + index index.html; + + location / { + try_files $uri $uri/ /index.html; + } +} From 5db7fbb1b76ddb8d988b9307dcefe16d4f5aab2d Mon Sep 17 00:00:00 2001 From: Marcin Szymczak Date: Wed, 11 Feb 2026 18:04:27 +0100 Subject: [PATCH 049/219] Fix true_valid_to updating (#4134) # Description `true_valid_to` is not always updated as needed. It can happen that ethflow order events are parsed first, before order creation takes place which results in `true_valid_to` to first be set correctly, and then set to `u32::MAX` when inserting the order. Ethflow orders as a whole get parsed off onchain events and the logic (regarding the above) currently is as follows: Custom onchain data parser would pick up on the Ethflow related events and insert the ethflow_orders row with correct order uid and valid_to. I have included logic to update related order's true_valid_to to the same value, but it does not exist in the db yet Orders parsed from order placement events will get inserted into the database, ignoring conflicts. Since the newly created ethflow order does not yet exist in the database, it will get inserted with the valid_to set as u32::MAX and true_valid_to having the same value. There is no other point in time for the true_valid_to to get correctly updated, which leaves us with two options: Use the smaller of two: ethflow_orders.valid_to, orders.valid_to when inserting an order. Change the order of how onchain events are parsed - first parse the order creation events and only then append the custom info (which includes ethflow). The solution 1 seems more robust since We will always end up with the smallest value when inserting an order. I am not sure what might the implications of changing the onchain event parsing order. # Changes Adds a clause ```sql COALESCE( (SELECT LEAST($21, valid_to) FROM ethflow_orders WHERE uid = $1), $21 ) ``` to INSERT_ORDER query which will always take the smallest of `ethflow_orders.valid_to` and the value currently being inserted. # Deployment When hotfix is applied, services will correctly attribute `true_valid_to` and manual backfill will be performed. ## How to test Tested on ethflow E2E test `local_node_eth_flow_tx` confirmed the faulty behaviour can occur and added assert that ensures the resulting true_valid_to is **not** `u32::MAX` --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Martin Magnus --- crates/database/src/ethflow_orders.rs | 23 +++-------------------- crates/database/src/orders.rs | 13 ++++++++++++- crates/database/src/trades.rs | 2 +- crates/e2e/tests/e2e/ethflow.rs | 25 ++++++++++++++++++++++++- 4 files changed, 40 insertions(+), 23 deletions(-) diff --git a/crates/database/src/ethflow_orders.rs b/crates/database/src/ethflow_orders.rs index 05886feb53..146a714af5 100644 --- a/crates/database/src/ethflow_orders.rs +++ b/crates/database/src/ethflow_orders.rs @@ -37,6 +37,9 @@ pub async fn insert_or_overwrite_ethflow_order( ) .await?; + // Update true_valid_to field if order already in the database + // as the Ethflow orders get inserted with validity of u32::MAX + // and the true validity is contained in the EthOrderPlacement const UPDATE_TRUE_VALID_TO_QUERY: &str = r#" UPDATE orders SET true_valid_to = $1 @@ -51,26 +54,6 @@ pub async fn insert_or_overwrite_ethflow_order( Ok(()) } -// Ethflow orders are created with valid_to equal to u32::MAX, their -// true validity is parsed from Settlement contract events. -#[instrument(skip_all)] -pub async fn update_true_valid_to_for_ethflow_order( - ex: &mut PgConnection, - event: &EthOrderPlacement, -) -> Result<(), sqlx::Error> { - const QUERY: &str = r#" - UPDATE orders - SET true_valid_to = $1 - WHERE uid = $2 - "#; - sqlx::query(QUERY) - .bind(event.valid_to) - .bind(event.uid) - .execute(ex) - .await?; - Ok(()) -} - #[derive(Clone, Debug, Default, sqlx::FromRow, Eq, PartialEq)] pub struct EthOrderData { pub uid: OrderUid, diff --git a/crates/database/src/orders.rs b/crates/database/src/orders.rs index 0e661d7729..b7a8e5abe4 100644 --- a/crates/database/src/orders.rs +++ b/crates/database/src/orders.rs @@ -148,7 +148,18 @@ INSERT INTO orders ( class, true_valid_to ) -VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21) +VALUES ( + $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, + -- Ethflow orders are inserted with valid_to set to u32::MAX. Their true validity is stored in + -- the ethflow_orders table. + -- If there already exists an Ethflow order with the same uid, take smaller of the two valid_to values + CASE + WHEN $21 = 4294967295 THEN -- u32::MAX + COALESCE((SELECT valid_to FROM ethflow_orders WHERE uid = $1), $21) + ELSE + $21 + END +) "#; #[instrument(skip_all)] diff --git a/crates/database/src/trades.rs b/crates/database/src/trades.rs index 7c7430b128..13628a39f9 100644 --- a/crates/database/src/trades.rs +++ b/crates/database/src/trades.rs @@ -705,7 +705,7 @@ mod tests { } // Sort expected trades by block_number DESC (matching query ORDER BY) - expected_trades.sort_by(|a, b| b.block_number.cmp(&a.block_number)); + expected_trades.sort_by_key(|trade| std::cmp::Reverse(trade.block_number)); // Test limit: get first 2 trades (blocks 4 and 3 in DESC order) let result = trades(&mut db, Some(&owner), None, 0, 2) diff --git a/crates/e2e/tests/e2e/ethflow.rs b/crates/e2e/tests/e2e/ethflow.rs index eabbb5a6f8..3cf2406a5e 100644 --- a/crates/e2e/tests/e2e/ethflow.rs +++ b/crates/e2e/tests/e2e/ethflow.rs @@ -6,7 +6,7 @@ use { anyhow::bail, autopilot::database::onchain_order_events::ethflow_events::WRAP_ALL_SELECTOR, contracts::alloy::{CoWSwapEthFlow, ERC20Mintable, WETH9}, - database::order_events::OrderEventLabel, + database::{byte_array::ByteArray, order_events::OrderEventLabel}, e2e::setup::{ ACCOUNT_ENDPOINT, API_HOST, @@ -49,6 +49,7 @@ use { refunder::RefundStatus, reqwest::Client, shared::signature_validator::check_erc1271_result, + std::ops::DerefMut, }; const DAI_PER_ETH: u64 = 1_000; @@ -243,6 +244,28 @@ async fn eth_flow_tx(web3: Web3) { .await .unwrap(); assert_eq!(allowance, alloy::primitives::U256::ZERO); + + // Check that true_valid_to is equal to the ethflow_order's valid to + let uid = ethflow_order + .uid(onchain.contracts(), ethflow_contract) + .await; + let mut db = services.db().acquire().await.unwrap(); + let true_valid_to: (i64,) = sqlx::query_as("SELECT true_valid_to FROM orders WHERE uid = $1") + .bind(ByteArray(uid.0)) + .fetch_one(db.deref_mut()) + .await + .unwrap(); + assert_eq!( + true_valid_to.0, + services + .get_order(&uid) + .await + .unwrap() + .metadata + .ethflow_data + .unwrap() + .user_valid_to + ); } async fn eth_flow_without_quote(web3: Web3) { From 558b1bc9471c6747867b2e7d21cf9c53e07ccb2d Mon Sep 17 00:00:00 2001 From: ilya Date: Wed, 11 Feb 2026 22:04:05 +0300 Subject: [PATCH 050/219] Shared native price cache (#4136) # Description Replaces #4044. Once we started forwarding native price estimates from the orderbook to autopilot, CoinGecko API usage went up. This happened because the estimator moved to autopilot, which now handles all requests and also relies on CoinGecko. This PR refactors native price estimation in a way described below, according to the diagram created by @MartinquaXD: image # Changes - Split `CachingNativePriceEstimator` into three focused components: a passive `Cache` (shared data store), a `CachingNativePriceEstimator` (on-demand price fetching with caching), and a `NativePriceUpdater` (background maintenance worker). - The autopilot now creates a single shared `Cache` instance used by both the API-facing estimator and the auction competition estimator, eliminating duplicate price fetches for the same tokens. - Added an `--api-native-price-estimators` flag to the autopilot, allowing the API endpoint to use different native price estimator sources than the auction pipeline (falls back to `--native-price-estimators`, if unset). - Replaced the implicit priority-based update system (`high_priority` + `replace_high_priority`) with an explicit token tracking via `set_tokens_to_update()`, called during the solvable orders cache building in the autopilot. - Moved cache + updater wiring out of the shared factory into each binary, so each service composes the shared building blocks as needed: - Orderbook: creates its own `Cache` + `CachingNativePriceEstimator`. - Autopilot: Creates 2 separate `CachingNativePriceEstimators` that share a single instance of `Cache`. One of the estimators is wrapped with the `NativePriceUpdater`, which is used in the auction competition. Another one serves the autopilot's API request without a maintenance task. - Removed the unused `--native-price-cache-max-update-size` flag (dead code after the refactoring removed the truncation logic). ## How to test Existing tests + staging and prod. # Follow-up tasks - The cache is unbounded. This needs to be revisited, and probably adding a simple LRU cache would be safer. --- Cargo.lock | 1 + crates/autopilot/src/arguments.rs | 41 + crates/autopilot/src/run.rs | 60 +- crates/autopilot/src/solvable_orders.rs | 158 +--- crates/e2e/src/setup/services.rs | 2 +- crates/orderbook/src/run.rs | 23 +- crates/shared/Cargo.toml | 1 + crates/shared/src/price_estimation/factory.rs | 53 +- crates/shared/src/price_estimation/mod.rs | 41 - .../price_estimation/native_price_cache.rs | 788 ++++++++---------- 10 files changed, 536 insertions(+), 632 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 423221c014..afda6769b8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6168,6 +6168,7 @@ dependencies = [ "alloy", "anyhow", "app-data", + "arc-swap", "async-stream", "async-trait", "bigdecimal", diff --git a/crates/autopilot/src/arguments.rs b/crates/autopilot/src/arguments.rs index a6cb1cf8ca..e22d464a2b 100644 --- a/crates/autopilot/src/arguments.rs +++ b/crates/autopilot/src/arguments.rs @@ -96,6 +96,11 @@ pub struct Arguments { #[clap(long, env)] pub native_price_estimators: NativePriceEstimators, + /// Estimators for the API endpoint. Falls back to + /// `--native-price-estimators` if unset. + #[clap(long, env)] + pub api_native_price_estimators: Option, + /// How many successful price estimates for each order will cause a native /// price estimation to return its result early. It's possible to pass /// values greater than the total number of enabled estimators but that @@ -276,6 +281,26 @@ pub struct Arguments { /// further. #[clap(long, env, default_value = "5s", value_parser = humantime::parse_duration)] pub max_maintenance_timeout: Duration, + + /// How often the native price estimator should refresh its cache. + #[clap( + long, + env, + default_value = "1s", + value_parser = humantime::parse_duration, + )] + pub native_price_cache_refresh: Duration, + + /// How long before expiry the native price cache should try to update the + /// price in the background. This value has to be smaller than + /// `--native-price-cache-max-age`. + #[clap( + long, + env, + default_value = "80s", + value_parser = humantime::parse_duration, + )] + pub native_price_prefetch_time: Duration, } impl std::fmt::Display for Arguments { @@ -296,6 +321,7 @@ impl std::fmt::Display for Arguments { allowed_tokens, unsupported_tokens, native_price_estimators, + api_native_price_estimators, min_order_validity_period, banned_users, banned_users_max_cache_size, @@ -327,6 +353,8 @@ impl std::fmt::Display for Arguments { disable_1271_order_sig_filter, enable_leader_lock, max_maintenance_timeout, + native_price_cache_refresh, + native_price_prefetch_time, } = self; write!(f, "{shared}")?; @@ -345,6 +373,11 @@ impl std::fmt::Display for Arguments { writeln!(f, "allowed_tokens: {allowed_tokens:?}")?; writeln!(f, "unsupported_tokens: {unsupported_tokens:?}")?; writeln!(f, "native_price_estimators: {native_price_estimators}")?; + display_option( + f, + "api_native_price_estimators", + api_native_price_estimators, + )?; writeln!( f, "min_order_validity_period: {min_order_validity_period:?}" @@ -408,6 +441,14 @@ impl std::fmt::Display for Arguments { )?; writeln!(f, "enable_leader_lock: {enable_leader_lock}")?; writeln!(f, "max_maintenance_timeout: {max_maintenance_timeout:?}")?; + writeln!( + f, + "native_price_cache_refresh: {native_price_cache_refresh:?}" + )?; + writeln!( + f, + "native_price_prefetch_time: {native_price_prefetch_time:?}" + )?; Ok(()) } } diff --git a/crates/autopilot/src/run.rs b/crates/autopilot/src/run.rs index d8279bb93d..b80ef71acf 100644 --- a/crates/autopilot/src/run.rs +++ b/crates/autopilot/src/run.rs @@ -47,7 +47,10 @@ use { code_fetching::CachedCodeFetcher, http_client::HttpClientFactory, order_quoting::{self, OrderQuoter}, - price_estimation::factory::{self, PriceEstimatorFactory}, + price_estimation::{ + factory::{self, PriceEstimatorFactory}, + native::NativePriceEstimating, + }, signature_validator, sources::{BaselineSource, uniswap_v2::UniV2BaselineSourceParameters}, token_info::{CachedTokenInfoFetcher, TokenInfoFetcher}, @@ -386,17 +389,44 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { .await .expect("failed to initialize price estimator factory"); - let native_price_estimator = price_estimator_factory - .native_price_estimator( - args.native_price_estimators.as_slice(), - args.native_price_estimation_results_required, - eth.contracts().weth().clone(), - ) - .instrument(info_span!("native_price_estimator")) - .await - .unwrap(); + let weth = eth.contracts().weth().clone(); let prices = db_write.fetch_latest_prices().await.unwrap(); - native_price_estimator.initialize_cache(prices); + let shared_cache = shared::price_estimation::native_price_cache::Cache::new( + args.price_estimation.native_price_cache_max_age, + prices, + ); + let api_sources = args + .api_native_price_estimators + .as_ref() + .unwrap_or(&args.native_price_estimators); + let api_native_price_estimator: Arc = Arc::new( + price_estimator_factory + .caching_native_price_estimator( + api_sources.as_slice(), + args.native_price_estimation_results_required, + &weth, + shared_cache.clone(), + ) + .instrument(info_span!("api_native_price_estimator")) + .await, + ); + + let competition_native_price_updater = { + let caching = price_estimator_factory + .caching_native_price_estimator( + args.native_price_estimators.as_slice(), + args.native_price_estimation_results_required, + &weth, + shared_cache.clone(), + ) + .instrument(info_span!("competition_native_price_updater")) + .await; + shared::price_estimation::native_price_cache::NativePriceUpdater::new( + caching, + args.native_price_cache_refresh, + args.native_price_prefetch_time, + ) + }; let price_estimator = price_estimator_factory .price_estimator( @@ -406,7 +436,7 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { .iter() .map(|price_estimator_driver| price_estimator_driver.clone().into()) .collect::>(), - native_price_estimator.clone(), + api_native_price_estimator.clone(), gas_price_estimator.clone(), ) .unwrap(); @@ -470,7 +500,7 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { let quoter = Arc::new(OrderQuoter::new( price_estimator, - native_price_estimator.clone(), + api_native_price_estimator.clone(), gas_price_estimator, Arc::new(db_write.clone()), order_quoting::Validity { @@ -502,7 +532,7 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { ), balance_fetcher.clone(), bad_token_detector.clone(), - native_price_estimator.clone(), + competition_native_price_updater.clone(), signature_validator.clone(), *eth.contracts().weth().address(), args.limit_order_price_factor @@ -527,7 +557,7 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { let (api_shutdown_sender, api_shutdown_receiver) = tokio::sync::oneshot::channel(); let api_task = tokio::spawn(infra::api::serve( args.api_address, - native_price_estimator.clone(), + api_native_price_estimator, args.price_estimation.quote_timeout, api_shutdown_receiver, )); diff --git a/crates/autopilot/src/solvable_orders.rs b/crates/autopilot/src/solvable_orders.rs index db46801587..be5bc52745 100644 --- a/crates/autopilot/src/solvable_orders.rs +++ b/crates/autopilot/src/solvable_orders.rs @@ -9,7 +9,6 @@ use { bigdecimal::BigDecimal, database::order_events::OrderEventLabel, futures::{FutureExt, StreamExt, future::join_all, stream::FuturesUnordered}, - indexmap::IndexSet, itertools::Itertools, model::{ order::{Order, OrderClass, OrderUid}, @@ -23,7 +22,7 @@ use { bad_token::BadTokenDetecting, price_estimation::{ native::{NativePriceEstimating, to_normalized_price}, - native_price_cache::CachingNativePriceEstimator, + native_price_cache::NativePriceUpdater, }, remaining_amounts, signature_validator::{SignatureCheck, SignatureValidating}, @@ -91,7 +90,7 @@ pub struct SolvableOrdersCache { balance_fetcher: Arc, bad_token_detector: Arc, cache: Mutex>, - native_price_estimator: Arc, + native_price_estimator: Arc, signature_validator: Arc, metrics: &'static Metrics, weth: Address, @@ -120,7 +119,7 @@ impl SolvableOrdersCache { banned_users: banned::Users, balance_fetcher: Arc, bad_token_detector: Arc, - native_price_estimator: Arc, + native_price_estimator: Arc, signature_validator: Arc, weth: Address, limit_order_price_factor: BigDecimal, @@ -469,12 +468,12 @@ async fn find_banned_user_orders(orders: &[Order], banned_users: &banned::Users) } async fn get_native_prices( - tokens: &[Address], - native_price_estimator: &CachingNativePriceEstimator, + tokens: HashSet
, + native_price_estimator: &NativePriceUpdater, timeout: Duration, ) -> BTreeMap { native_price_estimator - .estimate_native_prices_with_timeout(tokens, timeout) + .update_tokens_and_fetch_prices(tokens, timeout) .await .into_iter() .flat_map(|(token, result)| { @@ -615,7 +614,7 @@ fn filter_dust_orders(mut orders: Vec, balances: &Balances) -> Vec async fn get_orders_with_native_prices( orders: Vec, - native_price_estimator: &CachingNativePriceEstimator, + native_price_estimator: &NativePriceUpdater, metrics: &Metrics, additional_tokens: impl IntoIterator, timeout: Duration, @@ -626,16 +625,12 @@ async fn get_orders_with_native_prices( .chain(additional_tokens) .collect::>(); - let prices = get_native_prices( - &traded_tokens.into_iter().collect::>(), - native_price_estimator, - timeout, - ) - .await; + let prices = get_native_prices(traded_tokens, native_price_estimator, timeout).await; // Filter orders so that we only return orders that have prices let mut filtered_market_orders = 0_i64; - let (usable, filtered): (Vec<_>, Vec<_>) = orders.into_iter().partition(|order| { + let mut orders = orders; + orders.retain(|order| { let (t0, t1) = (&order.data.sell_token, &order.data.buy_token); match (prices.get(t0), prices.get(t1)) { (Some(_), Some(_)) => true, @@ -645,57 +640,12 @@ async fn get_orders_with_native_prices( } } }); - let tokens_by_priority = prioritize_missing_prices(filtered); - native_price_estimator.replace_high_priority(tokens_by_priority); - // Record separate metrics just for missing native token prices for market - // orders, as they should be prioritized. metrics .auction_market_order_missing_price .set(filtered_market_orders); - (usable, prices) -} - -/// Computes which missing native prices are the most urgent to fetch. -/// Prices for recent orders have the highest priority because those are most -/// likely market orders which users expect to get settled ASAP. -/// For the remaining orders we prioritize token prices that are needed the most -/// often. That way we have the chance to make a majority of orders solvable -/// with very few fetch requests. -fn prioritize_missing_prices(mut orders: Vec) -> IndexSet
{ - /// How old an order can be at most to be considered a market order. - const MARKET_ORDER_AGE: chrono::Duration = chrono::Duration::minutes(30); - let now = chrono::Utc::now(); - - // newer orders at the start - orders.sort_by_key(|o| std::cmp::Reverse(o.metadata.creation_date)); - - let mut high_priority_tokens = IndexSet::new(); - let mut most_used_tokens = HashMap::::new(); - for order in orders { - let sell_token = order.data.sell_token; - let buy_token = order.data.buy_token; - let is_market = now.signed_duration_since(order.metadata.creation_date) <= MARKET_ORDER_AGE; - - if is_market { - // already correct priority because orders were sorted by creation_date - high_priority_tokens.extend([sell_token, buy_token]); - } else { - // count how often tokens are used to prioritize popular tokens - *most_used_tokens.entry(sell_token).or_default() += 1; - *most_used_tokens.entry(buy_token).or_default() += 1; - } - } - - // popular tokens at the start - let most_used_tokens = most_used_tokens - .into_iter() - .sorted_by_key(|entry| std::cmp::Reverse(entry.1)) - .map(|(token, _)| token); - - high_priority_tokens.extend(most_used_tokens); - high_priority_tokens + (orders, prices) } async fn find_unsupported_tokens( @@ -913,7 +863,12 @@ mod tests { HEALTHY_PRICE_ESTIMATION_TIME, PriceEstimationError, native::MockNativePriceEstimating, - native_price_cache::ApproximationToken, + native_price_cache::{ + ApproximationToken, + Cache, + CachingNativePriceEstimator, + NativePriceUpdater, + }, }, signature_validator::{MockSignatureValidating, SignatureValidationError}, }, @@ -956,16 +911,16 @@ mod tests { .withf(move |token, _| *token == token3) .returning(|_, _| async { Ok(0.25) }.boxed()); - let native_price_estimator = CachingNativePriceEstimator::new( + let cache = Cache::new(Duration::from_secs(10), Default::default()); + let caching_estimator = CachingNativePriceEstimator::new( Box::new(native_price_estimator), - Duration::from_secs(10), - Duration::MAX, - None, - Default::default(), + cache, 3, Default::default(), HEALTHY_PRICE_ESTIMATION_TIME, ); + let native_price_estimator = + NativePriceUpdater::new(caching_estimator, Duration::MAX, Default::default()); let metrics = Metrics::instance(observe::metrics::get_storage_registry()).unwrap(); let (filtered_orders, prices) = get_orders_with_native_prices( @@ -1047,21 +1002,23 @@ mod tests { .withf(move |token, _| *token == token5) .returning(|_, _| async { Ok(5.) }.boxed()); - let native_price_estimator = CachingNativePriceEstimator::new( + let cache = Cache::new(Duration::from_secs(10), Default::default()); + let caching_estimator = CachingNativePriceEstimator::new( Box::new(native_price_estimator), - Duration::from_secs(10), - Duration::MAX, - None, - Default::default(), + cache, 1, Default::default(), HEALTHY_PRICE_ESTIMATION_TIME, ); + let native_price_estimator = NativePriceUpdater::new( + caching_estimator, + Duration::from_millis(5), + Default::default(), + ); let metrics = Metrics::instance(observe::metrics::get_storage_registry()).unwrap(); - // We'll have no native prices in this call. But this call will cause a - // background task to fetch the missing prices so we'll have them in the - // next call. + // We'll have no native prices in this call. But set_tokens_to_update + // will cause the background task to fetch them in the next cycle. let (filtered_orders, prices) = get_orders_with_native_prices( orders.clone(), &native_price_estimator, @@ -1073,8 +1030,8 @@ mod tests { assert!(filtered_orders.is_empty()); assert!(prices.is_empty()); - // Wait for native prices to get fetched. - tokio::time::sleep(tokio::time::Duration::from_millis(10)).await; + // Wait for native prices to get fetched by the background task. + tokio::time::sleep(tokio::time::Duration::from_millis(30)).await; // Now we have all the native prices we want. let (filtered_orders, prices) = get_orders_with_native_prices( @@ -1144,12 +1101,10 @@ mod tests { .withf(move |token, _| *token == token_approx2) .returning(|_, _| async { Ok(50.) }.boxed()); - let native_price_estimator = CachingNativePriceEstimator::new( + let cache = Cache::new(Duration::from_secs(10), Default::default()); + let caching_estimator = CachingNativePriceEstimator::new( Box::new(native_price_estimator), - Duration::from_secs(10), - Duration::MAX, - None, - Default::default(), + cache, 3, // Set to use native price approximations for the following tokens HashMap::from([ @@ -1158,6 +1113,8 @@ mod tests { ]), HEALTHY_PRICE_ESTIMATION_TIME, ); + let native_price_estimator = + NativePriceUpdater::new(caching_estimator, Duration::MAX, Default::default()); let metrics = Metrics::instance(observe::metrics::get_storage_registry()).unwrap(); let (filtered_orders, prices) = get_orders_with_native_prices( @@ -1544,41 +1501,4 @@ mod tests { orders_with_balance(orders, &balances, settlement_contract, false); assert!(filtered_without_override.is_empty()); } - - #[test] - fn prioritizes_missing_prices() { - let now = chrono::Utc::now(); - - let order = |sell_token, buy_token, age| Order { - metadata: OrderMetadata { - creation_date: now - chrono::Duration::minutes(age), - ..Default::default() - }, - data: OrderData { - sell_token, - buy_token, - ..Default::default() - }, - ..Default::default() - }; - - let orders = vec![ - order(Address::with_last_byte(4), Address::with_last_byte(6), 31), - order(Address::with_last_byte(4), Address::with_last_byte(6), 31), - // older market order - order(Address::with_last_byte(1), Address::with_last_byte(2), 29), - order(Address::with_last_byte(5), Address::with_last_byte(6), 31), - // youngest market order - order(Address::with_last_byte(1), Address::with_last_byte(3), 1), - ]; - let result = prioritize_missing_prices(orders); - assert!(result.into_iter().eq([ - Address::with_last_byte(1), // coming from youngest market order - Address::with_last_byte(3), // coming from youngest market order - Address::with_last_byte(2), // coming from older market order - Address::with_last_byte(6), // coming from limit order (part of 3 orders) - Address::with_last_byte(4), // coming from limit order (part of 2 orders) - Address::with_last_byte(5), // coming from limit order (part of 1 orders) - ])); - } } diff --git a/crates/e2e/src/setup/services.rs b/crates/e2e/src/setup/services.rs index c018566960..7e2b4afd04 100644 --- a/crates/e2e/src/setup/services.rs +++ b/crates/e2e/src/setup/services.rs @@ -138,7 +138,6 @@ impl<'a> Services<'a> { format!("--node-ws-url={NODE_WS_HOST}"), "--simulation-node-url=http://localhost:8545".to_string(), "--native-price-cache-max-age=2s".to_string(), - "--native-price-prefetch-time=500ms".to_string(), format!( "--hooks-contract-address={:?}", self.contracts.hooks.address() @@ -150,6 +149,7 @@ impl<'a> Services<'a> { fn autopilot_arguments(&self) -> impl Iterator + use<> { self.api_autopilot_arguments().chain([ "--quote-timeout=10s".to_string(), + "--native-price-prefetch-time=500ms".to_string(), "--native-price-estimators=Driver|test_quoter|http://localhost:11088/test_solver" .to_string(), ]) diff --git a/crates/orderbook/src/run.rs b/crates/orderbook/src/run.rs index c17bcc5bfd..981dfe3357 100644 --- a/crates/orderbook/src/run.rs +++ b/crates/orderbook/src/run.rs @@ -315,16 +315,21 @@ pub async fn run(args: Arguments) { .await .expect("failed to initialize price estimator factory"); - let native_price_estimator = price_estimator_factory - .native_price_estimator( - args.native_price_estimators.as_slice(), - args.fast_price_estimation_results_required, - native_token.clone(), - ) - .await - .unwrap(); let prices = postgres_write.fetch_latest_prices().await.unwrap(); - native_price_estimator.initialize_cache(prices); + let cache = shared::price_estimation::native_price_cache::Cache::new( + args.price_estimation.native_price_cache_max_age, + prices, + ); + let native_price_estimator: Arc = Arc::new( + price_estimator_factory + .caching_native_price_estimator( + args.native_price_estimators.as_slice(), + args.fast_price_estimation_results_required, + &native_token, + cache, + ) + .await, + ); let price_estimator = price_estimator_factory .price_estimator( diff --git a/crates/shared/Cargo.toml b/crates/shared/Cargo.toml index 14538595f1..752be21c00 100644 --- a/crates/shared/Cargo.toml +++ b/crates/shared/Cargo.toml @@ -12,6 +12,7 @@ doctest = false alloy = { workspace = true, features = ["sol-types", "signer-local", "rand", "provider-trace-api"] } anyhow = { workspace = true } app-data = { workspace = true } +arc-swap = { workspace = true } bytes-hex = { workspace = true } async-trait = { workspace = true } bigdecimal = { workspace = true } diff --git a/crates/shared/src/price_estimation/factory.rs b/crates/shared/src/price_estimation/factory.rs index 195df20563..74ac93f0f1 100644 --- a/crates/shared/src/price_estimation/factory.rs +++ b/crates/shared/src/price_estimation/factory.rs @@ -6,8 +6,8 @@ use { competition::CompetitionEstimator, external::ExternalPriceEstimator, instrumented::InstrumentedPriceEstimator, - native::{self, NativePriceEstimator}, - native_price_cache::{ApproximationToken, CachingNativePriceEstimator}, + native::{self, NativePriceEstimating, NativePriceEstimator}, + native_price_cache::{self, ApproximationToken}, sanitized::SanitizedPriceEstimator, trade_verifier::{TradeVerifier, TradeVerifying}, }, @@ -23,7 +23,6 @@ use { ExternalSolver, buffered::{self, BufferedRequest, NativePriceBatchFetching}, competition::PriceRanking, - native::NativePriceEstimating, }, tenderly_api::TenderlyCodeSimulator, token_info::TokenInfoFetching, @@ -360,22 +359,18 @@ impl<'a> PriceEstimatorFactory<'a> { )) } + /// Creates a native price estimator from the given sources. pub async fn native_price_estimator( &mut self, native: &[Vec], results_required: NonZeroUsize, - weth: WETH9::Instance, - ) -> Result> { - anyhow::ensure!( - self.args.native_price_cache_max_age > self.args.native_price_prefetch_time, - "price cache prefetch time needs to be less than price cache max age" - ); - + weth: &WETH9::Instance, + ) -> Result> { let mut estimators = Vec::with_capacity(native.len()); for stage in native.iter() { let mut stages = Vec::with_capacity(stage.len()); for source in stage { - stages.push(self.create_native_estimator(source, &weth).await?); + stages.push(self.create_native_estimator(source, weth).await?); } estimators.push(stages); } @@ -384,21 +379,33 @@ impl<'a> PriceEstimatorFactory<'a> { CompetitionEstimator::new(estimators, PriceRanking::MaxOutAmount) .with_verification(self.args.quote_verification) .with_early_return(results_required); - let approximation_tokens = self.build_approximation_tokens().await.context( - "failed to build native price approximation tokens with normalization factors", - )?; - - let native_estimator = Arc::new(CachingNativePriceEstimator::new( - Box::new(competition_estimator), - self.args.native_price_cache_max_age, - self.args.native_price_cache_refresh, - Some(self.args.native_price_cache_max_update_size), - self.args.native_price_prefetch_time, + Ok(Box::new(competition_estimator)) + } + + /// Creates a [`CachingNativePriceEstimator`] that wraps a native price + /// estimator with an in-memory cache. + pub async fn caching_native_price_estimator( + &mut self, + native: &[Vec], + results_required: NonZeroUsize, + weth: &WETH9::Instance, + cache: native_price_cache::Cache, + ) -> native_price_cache::CachingNativePriceEstimator { + let inner = self + .native_price_estimator(native, results_required, weth) + .await + .expect("failed to build native price estimator"); + let approximation_tokens = self + .build_approximation_tokens() + .await + .expect("failed to build native price approximation tokens"); + native_price_cache::CachingNativePriceEstimator::new( + inner, + cache, self.args.native_price_cache_concurrent_requests, approximation_tokens, self.args.quote_timeout, - )); - Ok(native_estimator) + ) } /// Builds the approximation tokens mapping with normalization factors based diff --git a/crates/shared/src/price_estimation/mod.rs b/crates/shared/src/price_estimation/mod.rs index 1535c742cc..e8bb14150b 100644 --- a/crates/shared/src/price_estimation/mod.rs +++ b/crates/shared/src/price_estimation/mod.rs @@ -164,15 +164,6 @@ pub struct Arguments { #[clap(long, env, verbatim_doc_comment)] pub price_estimation_rate_limiter: Option, - /// How often the native price estimator should refresh its cache. - #[clap( - long, - env, - default_value = "1s", - value_parser = humantime::parse_duration, - )] - pub native_price_cache_refresh: Duration, - /// How long cached native prices stay valid. #[clap( long, @@ -182,23 +173,6 @@ pub struct Arguments { )] pub native_price_cache_max_age: Duration, - /// How long before expiry the native price cache should try to update the - /// price in the background. This is useful to make sure that prices are - /// usable at all times. This value has to be smaller than - /// `--native-price-cache-max-age`. - #[clap( - long, - env, - default_value = "80s", - value_parser = humantime::parse_duration, - )] - pub native_price_prefetch_time: Duration, - - /// How many cached native token prices can be updated at most in one - /// maintenance cycle. - #[clap(long, env, default_value = "3")] - pub native_price_cache_max_update_size: usize, - /// How many price estimation requests can be executed concurrently in the /// maintenance task. #[clap(long, env, default_value = "1")] @@ -354,10 +328,7 @@ impl Display for Arguments { fn fmt(&self, f: &mut Formatter) -> fmt::Result { let Self { price_estimation_rate_limiter, - native_price_cache_refresh, native_price_cache_max_age, - native_price_prefetch_time, - native_price_cache_max_update_size, native_price_cache_concurrent_requests, amount_to_estimate_prices_with, balancer_sor_url, @@ -377,22 +348,10 @@ impl Display for Arguments { "price_estimation_rate_limites", price_estimation_rate_limiter, )?; - writeln!( - f, - "native_price_cache_refresh: {native_price_cache_refresh:?}" - )?; writeln!( f, "native_price_cache_max_age: {native_price_cache_max_age:?}" )?; - writeln!( - f, - "native_price_prefetch_time: {native_price_prefetch_time:?}" - )?; - writeln!( - f, - "native_price_cache_max_update_size: {native_price_cache_max_update_size}" - )?; writeln!( f, "native_price_cache_concurrent_requests: {native_price_cache_concurrent_requests}" diff --git a/crates/shared/src/price_estimation/native_price_cache.rs b/crates/shared/src/price_estimation/native_price_cache.rs index bf49185402..201d3dc3a5 100644 --- a/crates/shared/src/price_estimation/native_price_cache.rs +++ b/crates/shared/src/price_estimation/native_price_cache.rs @@ -6,14 +6,14 @@ use { from_normalized_price, }, alloy::primitives::Address, + arc_swap::ArcSwap, bigdecimal::BigDecimal, futures::{FutureExt, StreamExt}, - indexmap::IndexSet, prometheus::{IntCounter, IntCounterVec, IntGauge}, rand::Rng, std::{ - collections::{HashMap, hash_map::Entry}, - sync::{Arc, Mutex, MutexGuard, Weak}, + collections::{HashMap, HashSet}, + sync::{Arc, Mutex, MutexGuard}, time::{Duration, Instant}, }, tokio::time, @@ -62,57 +62,30 @@ impl ApproximationToken { } #[derive(prometheus_metric_storage::MetricStorage)] -struct Metrics { +struct CacheMetrics { /// native price cache hits misses #[metric(labels("result"))] native_price_cache_access: IntCounterVec, /// number of items in cache native_price_cache_size: IntGauge, - /// number of background updates performed - native_price_cache_background_updates: IntCounter, - /// number of items in cache that are outdated - native_price_cache_outdated_entries: IntGauge, } -impl Metrics { +impl CacheMetrics { fn get() -> &'static Self { - Metrics::instance(observe::metrics::get_storage_registry()).unwrap() + CacheMetrics::instance(observe::metrics::get_storage_registry()).unwrap() } } -/// Wrapper around `Box` which caches successful price -/// estimates for some time and supports updating the cache in the background. -/// -/// The size of the underlying cache is unbounded. -/// -/// Is an Arc internally. -#[derive(Clone)] -pub struct CachingNativePriceEstimator(Arc); - -struct Inner { - cache: Mutex>, - high_priority: Mutex>, - estimator: Box, - max_age: Duration, - concurrent_requests: usize, - // TODO remove when implementing a less hacky solution - /// Maps a requested token to an approximating token. If the system - /// wants to get the native price for the requested token the native - /// price of the approximating token should be fetched and returned instead. - /// This can be useful for tokens that are hard to route but are pegged to - /// the same underlying asset so approximating their native prices is deemed - /// safe (e.g. csUSDL => Dai). - /// The normalization factor handles decimal differences between tokens. - /// After startup this is a read only value. - approximation_tokens: HashMap, - quote_timeout: Duration, +#[derive(prometheus_metric_storage::MetricStorage)] +struct UpdaterMetrics { + /// number of background updates performed + native_price_cache_background_updates: IntCounter, } -struct UpdateTask { - inner: Weak, - update_interval: Duration, - update_size: Option, - prefetch_time: Duration, +impl UpdaterMetrics { + fn get() -> &'static Self { + UpdaterMetrics::instance(observe::metrics::get_storage_registry()).unwrap() + } } type CacheEntry = Result; @@ -131,25 +104,30 @@ struct CachedResult { const ACCUMULATIVE_ERRORS_THRESHOLD: u32 = 5; impl CachedResult { - fn new( - result: CacheEntry, - updated_at: Instant, - requested_at: Instant, - current_accumulative_errors_count: u32, - ) -> Self { - let estimator_internal_errors_count = - matches!(result, Err(PriceEstimationError::EstimatorInternal(_))) - .then_some(current_accumulative_errors_count + 1) - .unwrap_or_default(); + fn new(result: CacheEntry) -> Self { + let now = Instant::now(); + let is_accumulating_error = + matches!(result, Err(PriceEstimationError::EstimatorInternal(_))); Self { result, - updated_at, - requested_at, - accumulative_errors_count: estimator_internal_errors_count, + updated_at: now, + requested_at: now, + accumulative_errors_count: u32::from(is_accumulating_error), } } + fn update(&mut self, result: CacheEntry) { + let now = Instant::now(); + self.requested_at = now; + self.updated_at = now; + self.accumulative_errors_count = match result { + Err(PriceEstimationError::EstimatorInternal(_)) => self.accumulative_errors_count + 1, + _ => 0, + }; + self.result = result; + } + /// The result is not ready if the estimator has returned an internal error /// and consecutive errors are less than /// `ESTIMATOR_INTERNAL_ERRORS_THRESHOLD`. @@ -159,132 +137,6 @@ impl CachedResult { } } -impl Inner { - // Returns a single cached price and updates its `requested_at` field. - fn get_cached_price( - token: Address, - now: Instant, - cache: &mut MutexGuard>, - max_age: &Duration, - create_missing_entry: bool, - ) -> Option { - match cache.entry(token) { - Entry::Occupied(mut entry) => { - let entry = entry.get_mut(); - entry.requested_at = now; - let is_recent = now.saturating_duration_since(entry.updated_at) < *max_age; - is_recent.then_some(entry.clone()) - } - Entry::Vacant(entry) => { - if create_missing_entry { - // Create an outdated cache entry so the background task keeping the cache warm - // will fetch the price during the next maintenance cycle. - // This should happen only for prices missing while building the auction. - // Otherwise malicious actors could easily cause the cache size to blow up. - let outdated_timestamp = now.checked_sub(*max_age).unwrap(); - tracing::trace!(?token, "create outdated price entry"); - entry.insert(CachedResult::new( - Ok(0.), - outdated_timestamp, - now, - Default::default(), - )); - } - None - } - } - } - - fn get_ready_to_use_cached_price( - token: Address, - now: Instant, - cache: &mut MutexGuard>, - max_age: &Duration, - create_missing_entry: bool, - ) -> Option { - Self::get_cached_price(token, now, cache, max_age, create_missing_entry) - .filter(|cached| cached.is_ready()) - } - - /// Checks cache for the given tokens one by one. If the price is already - /// cached, it gets returned. If it's not in the cache, a new price - /// estimation request gets issued. We check the cache before each - /// request because they can take a long time and some other task might - /// have fetched some requested price in the meantime. - fn estimate_prices_and_update_cache<'a>( - &'a self, - tokens: &'a [Address], - max_age: Duration, - request_timeout: Duration, - ) -> futures::stream::BoxStream<'a, (Address, NativePriceEstimateResult)> { - let estimates = tokens.iter().map(move |token| async move { - let current_accumulative_errors_count = { - // check if the price is cached by now - let now = Instant::now(); - let mut cache = self.cache.lock().unwrap(); - - match Self::get_cached_price(*token, now, &mut cache, &max_age, false) { - Some(cached) if cached.is_ready() => { - return (*token, cached.result); - } - Some(cached) => cached.accumulative_errors_count, - None => Default::default(), - } - }; - - let approximation = self - .approximation_tokens - .get(token) - .copied() - .unwrap_or(ApproximationToken::same_decimals(*token)); - - let result = self - .estimator - .estimate_native_price(approximation.address, request_timeout) - .await - .map(|price| approximation.normalize_price(price)); - - // update price in cache - if should_cache(&result) { - let now = Instant::now(); - let mut cache = self.cache.lock().unwrap(); - - cache.insert( - *token, - CachedResult::new(result.clone(), now, now, current_accumulative_errors_count), - ); - }; - - (*token, result) - }); - futures::stream::iter(estimates) - .buffered(self.concurrent_requests) - .boxed() - } - - /// Tokens with highest priority first. - fn sorted_tokens_to_update(&self, max_age: Duration, now: Instant) -> Vec
{ - let mut outdated: Vec<_> = self - .cache - .lock() - .unwrap() - .iter() - .filter(|(_, cached)| now.saturating_duration_since(cached.updated_at) > max_age) - .map(|(token, cached)| (*token, cached.requested_at)) - .collect(); - - let high_priority = self.high_priority.lock().unwrap().clone(); - let index = |token: &Address| high_priority.get_index_of(token).unwrap_or(usize::MAX); - outdated.sort_by_cached_key(|entry| { - ( - index(&entry.0), // important items have a low index - std::cmp::Reverse(entry.1), // important items have recent (i.e. "big") timestamp - ) - }); - outdated.into_iter().map(|(token, _)| token).collect() - } -} - fn should_cache(result: &Result) -> bool { // We don't want to cache errors that we consider transient match result { @@ -302,137 +154,84 @@ fn should_cache(result: &Result) -> bool { } } -impl UpdateTask { - /// Single run of the background updating process. - async fn single_update(&self, inner: &Inner) { - let metrics = Metrics::get(); - metrics - .native_price_cache_size - .set(i64::try_from(inner.cache.lock().unwrap().len()).unwrap_or(i64::MAX)); - - let max_age = inner.max_age.saturating_sub(self.prefetch_time); - let mut outdated_entries = inner.sorted_tokens_to_update(max_age, Instant::now()); - - tracing::trace!(tokens = ?outdated_entries, first_n = ?self.update_size, "outdated prices to fetch"); - - metrics - .native_price_cache_outdated_entries - .set(i64::try_from(outdated_entries.len()).unwrap_or(i64::MAX)); - - outdated_entries.truncate(self.update_size.unwrap_or(usize::MAX)); - - if outdated_entries.is_empty() { - return; - } - - let mut stream = - inner.estimate_prices_and_update_cache(&outdated_entries, max_age, inner.quote_timeout); - while stream.next().await.is_some() {} - metrics - .native_price_cache_background_updates - .inc_by(outdated_entries.len() as u64); - } +/// Passive shared data store for native price cache entries. Clone via internal +/// `Arc`. +#[derive(Clone)] +pub struct Cache(Arc); - /// Runs background updates until inner is no longer alive. - async fn run(self) { - while let Some(inner) = self.inner.upgrade() { - let now = Instant::now(); - self.single_update(&inner).await; - tokio::time::sleep(self.update_interval.saturating_sub(now.elapsed())).await; - } - } +struct CacheInner { + data: Mutex>, + max_age: Duration, } -impl CachingNativePriceEstimator { - pub fn initialize_cache(&self, prices: HashMap) { +impl Cache { + pub fn new(max_age: Duration, initial_prices: HashMap) -> Self { let mut rng = rand::thread_rng(); let now = std::time::Instant::now(); - let cache = prices + let data = initial_prices .into_iter() .filter_map(|(token, price)| { - // Generate random `updated_at` timestamp - // to avoid spikes of expired prices. - let percent_expired = rng.gen_range(50..=90); - let age = self.0.max_age.as_secs() * percent_expired / 100; - let updated_at = now - Duration::from_secs(age); - + let updated_at = Self::random_updated_at(max_age, now, &mut rng); Some(( token, - CachedResult::new( - Ok(from_normalized_price(price)?), + CachedResult { + result: Ok(from_normalized_price(price)?), updated_at, - now, - Default::default(), - ), + requested_at: now, + accumulative_errors_count: 0, + }, )) }) .collect::>(); - *self.0.cache.lock().unwrap() = cache; + Self(Arc::new(CacheInner { + data: Mutex::new(data), + max_age, + })) } - /// Creates new CachingNativePriceEstimator using `estimator` to calculate - /// native prices which get cached a duration of `max_age`. - /// Spawns a background task maintaining the cache once per - /// `update_interval`. Only soon to be outdated prices get updated and - /// recently used prices have a higher priority. If `update_size` is - /// `Some(n)` at most `n` prices get updated per interval. - /// If `update_size` is `None` no limit gets applied. - #[expect(clippy::too_many_arguments)] - pub fn new( - estimator: Box, - max_age: Duration, - update_interval: Duration, - update_size: Option, - prefetch_time: Duration, - concurrent_requests: usize, - approximation_tokens: HashMap, - quote_timeout: Duration, - ) -> Self { - let inner = Arc::new(Inner { - estimator, - cache: Default::default(), - high_priority: Default::default(), - max_age, - concurrent_requests, - approximation_tokens, - quote_timeout, - }); + fn max_age(&self) -> Duration { + self.0.max_age + } - let update_task = UpdateTask { - inner: Arc::downgrade(&inner), - update_interval, - update_size, - prefetch_time, - } - .run() - .instrument(tracing::info_span!("caching_native_price_estimator")); - tokio::spawn(update_task); + /// Returns a randomized `updated_at` timestamp that is 50-90% of max_age + /// in the past, to avoid spikes of expired prices all being fetched at + /// once. + fn random_updated_at(max_age: Duration, now: Instant, rng: &mut impl Rng) -> Instant { + let percent_expired = rng.gen_range(50..=90); + let age = max_age.as_secs() * percent_expired / 100; + now - Duration::from_secs(age) + } - Self(inner) + fn len(&self) -> usize { + self.0.data.lock().unwrap().len() } - /// Only returns prices that are currently cached. Missing prices will get - /// prioritized to get fetched during the next cycles of the maintenance - /// background task. + fn get_cached_price( + token: Address, + now: Instant, + cache: &mut MutexGuard>, + max_age: &Duration, + ) -> Option { + let entry = cache.get_mut(&token)?; + entry.requested_at = now; + let is_recent = now.saturating_duration_since(entry.updated_at) < *max_age; + (is_recent && entry.is_ready()).then_some(entry.clone()) + } + + /// Only returns prices that are currently cached. fn get_cached_prices( &self, tokens: &[Address], ) -> HashMap> { let now = Instant::now(); - let mut cache = self.0.cache.lock().unwrap(); + let mut cache = self.0.data.lock().unwrap(); let mut results = HashMap::default(); for token in tokens { - let cached = Inner::get_ready_to_use_cached_price( - *token, - now, - &mut cache, - &self.0.max_age, - true, - ); + let cached = Self::get_cached_price(*token, now, &mut cache, &self.0.max_age); let label = if cached.is_some() { "hits" } else { "misses" }; - Metrics::get() + CacheMetrics::get() .native_price_cache_access .with_label_values(&[label]) .inc_by(1); @@ -443,17 +242,118 @@ impl CachingNativePriceEstimator { results } - pub fn replace_high_priority(&self, tokens: IndexSet
) { - tracing::trace!(?tokens, "update high priority tokens"); - *self.0.high_priority.lock().unwrap() = tokens; + fn insert(&self, token: Address, result: CacheEntry) { + let mut cache = self.0.data.lock().unwrap(); + cache + .entry(token) + .and_modify(|value| value.update(result.clone())) + .or_insert_with(|| CachedResult::new(result)); + } +} + +/// Wrapper around `Box` which caches successful +/// price estimates for some time. Does not spawn any background tasks. +/// +/// Is an Arc internally. +#[derive(Clone)] +pub struct CachingNativePriceEstimator(Arc); + +struct CachingInner { + estimator: Box, + cache: Cache, + concurrent_requests: usize, + // TODO remove when implementing a less hacky solution + /// Maps a requested token to an approximating token. If the system + /// wants to get the native price for the requested token the native + /// price of the approximating token should be fetched and returned instead. + /// This can be useful for tokens that are hard to route but are pegged to + /// the same underlying asset so approximating their native prices is deemed + /// safe (e.g. csUSDL => Dai). + /// The normalization factor handles decimal differences between tokens. + /// After startup this is a read only value. + approximation_tokens: HashMap, + quote_timeout: Duration, +} + +impl CachingNativePriceEstimator { + pub fn new( + estimator: Box, + cache: Cache, + concurrent_requests: usize, + approximation_tokens: HashMap, + quote_timeout: Duration, + ) -> Self { + let inner = Arc::new(CachingInner { + estimator, + cache, + concurrent_requests, + approximation_tokens, + quote_timeout, + }); + Self(inner) } - pub async fn estimate_native_prices_with_timeout<'a>( + /// Checks cache for the given tokens one by one. If the price is already + /// cached, it gets returned. If it's not in the cache, a new price + /// estimation request gets issued. We check the cache before each + /// request because they can take a long time and some other task might + /// have fetched some requested price in the meantime. + fn estimate_prices_and_update_cache<'a, I>( &'a self, - tokens: &'a [Address], + tokens: I, + max_age: Duration, + request_timeout: Duration, + ) -> futures::stream::BoxStream<'a, (Address, NativePriceEstimateResult)> + where + I: IntoIterator, + I::IntoIter: Send + 'a, + { + let estimates = tokens.into_iter().map(move |token| async move { + // check if the price is cached by now + let now = Instant::now(); + { + let mut cache = self.0.cache.0.data.lock().unwrap(); + if let Some(cached) = Cache::get_cached_price(token, now, &mut cache, &max_age) { + return (token, cached.result); + } + } + + let approximation = self + .0 + .approximation_tokens + .get(&token) + .copied() + .unwrap_or(ApproximationToken::same_decimals(token)); + + let result = self + .0 + .estimator + .estimate_native_price(approximation.address, request_timeout) + .await + .map(|price| approximation.normalize_price(price)); + + // update price in cache + if should_cache(&result) { + self.0.cache.insert(token, result.clone()); + }; + + (token, result) + }); + futures::stream::iter(estimates) + .buffered(self.0.concurrent_requests) + .boxed() + } + + fn cache(&self) -> &Cache { + &self.0.cache + } + + pub async fn fetch_prices( + &self, + tokens: &[Address], timeout: Duration, ) -> HashMap { - let mut prices = self.get_cached_prices(tokens); + let mut prices = self.0.cache.get_cached_prices(tokens); if timeout.is_zero() { return prices; } @@ -464,8 +364,7 @@ impl CachingNativePriceEstimator { .copied() .collect(); let price_stream = - self.0 - .estimate_prices_and_update_cache(&uncached_tokens, self.0.max_age, timeout); + self.estimate_prices_and_update_cache(uncached_tokens, self.0.cache.max_age(), timeout); let _ = time::timeout(timeout, async { let mut price_stream = price_stream; @@ -491,12 +390,12 @@ impl NativePriceEstimating for CachingNativePriceEstimator { async move { let cached = { let now = Instant::now(); - let mut cache = self.0.cache.lock().unwrap(); - Inner::get_ready_to_use_cached_price(token, now, &mut cache, &self.0.max_age, false) + let mut cache = self.0.cache.0.data.lock().unwrap(); + Cache::get_cached_price(token, now, &mut cache, &self.0.cache.0.max_age) }; let label = if cached.is_some() { "hits" } else { "misses" }; - Metrics::get() + CacheMetrics::get() .native_price_cache_access .with_label_values(&[label]) .inc_by(1); @@ -505,8 +404,7 @@ impl NativePriceEstimating for CachingNativePriceEstimator { return cached.result; } - self.0 - .estimate_prices_and_update_cache(&[token], self.0.max_age, timeout) + self.estimate_prices_and_update_cache([token], self.0.cache.max_age(), timeout) .next() .await .unwrap() @@ -516,6 +414,98 @@ impl NativePriceEstimating for CachingNativePriceEstimator { } } +/// Background maintenance worker that periodically updates native prices +/// for a set of tokens. Uses a `CachingNativePriceEstimator` for fetching +/// and caching prices. +pub struct NativePriceUpdater { + estimator: CachingNativePriceEstimator, + tokens_to_update: ArcSwap>, +} + +impl NativePriceUpdater { + pub fn new( + estimator: CachingNativePriceEstimator, + update_interval: Duration, + prefetch_time: Duration, + ) -> Arc { + assert!( + estimator.cache().max_age() > prefetch_time, + "price cache prefetch time ({:?}) must be less than max age ({:?})", + prefetch_time, + estimator.cache().max_age(), + ); + + let updater = Arc::new(Self { + estimator, + tokens_to_update: ArcSwap::new(Arc::new(HashSet::new())), + }); + + // Don't keep the updater alive just for the background task + let weak = Arc::downgrade(&updater); + let update_task = async move { + while let Some(updater) = weak.upgrade() { + let now = Instant::now(); + updater.single_update(prefetch_time).await; + drop(updater); + tokio::time::sleep(update_interval.saturating_sub(now.elapsed())).await; + } + } + .instrument(tracing::info_span!("native_price_updater")); + tokio::spawn(update_task); + + updater + } + + /// Replaces the full set of tokens that should be maintained by the + /// background task and fetches their current prices. + pub async fn update_tokens_and_fetch_prices( + &self, + tokens: HashSet
, + timeout: Duration, + ) -> HashMap { + tracing::trace!(?tokens, "update tokens to maintain"); + let token_list: Vec<_> = tokens.iter().copied().collect(); + self.tokens_to_update.store(Arc::new(tokens)); + self.estimator.fetch_prices(&token_list, timeout).await + } + + async fn single_update(&self, prefetch_time: Duration) { + let metrics = UpdaterMetrics::get(); + let cache = self.estimator.cache(); + + CacheMetrics::get() + .native_price_cache_size + .set(i64::try_from(cache.len()).unwrap_or(i64::MAX)); + + let tokens_to_update = self.tokens_to_update.load_full(); + if tokens_to_update.is_empty() { + return; + } + + let max_age = cache.max_age().saturating_sub(prefetch_time); + let timeout = self.estimator.0.quote_timeout; + self.estimator + .estimate_prices_and_update_cache(tokens_to_update.iter().copied(), max_age, timeout) + // Drive the stream to completion. Results are written to the cache as + // a side effect, so we don't need to inspect them here. + .for_each(|_| async {}) + .await; + metrics + .native_price_cache_background_updates + .inc_by(tokens_to_update.len() as u64); + } +} + +impl NativePriceEstimating for NativePriceUpdater { + fn estimate_native_price( + &self, + token: Address, + timeout: Duration, + ) -> futures::future::BoxFuture<'_, NativePriceEstimateResult> { + self.estimator.estimate_native_price(token, timeout) + } +} + #[cfg(test)] mod tests { use { @@ -534,6 +524,24 @@ mod tests { Address::left_padding_from(&u.to_be_bytes()) } + /// Helper to create a CachingNativePriceEstimator with its own Cache + /// (convenience for tests that don't need a separate Cache). + fn create_caching_estimator( + inner: MockNativePriceEstimating, + max_age: Duration, + concurrent_requests: usize, + approximation_tokens: HashMap, + ) -> CachingNativePriceEstimator { + let cache = Cache::new(max_age, Default::default()); + CachingNativePriceEstimator::new( + Box::new(inner), + cache, + concurrent_requests, + approximation_tokens, + HEALTHY_PRICE_ESTIMATION_TIME, + ) + } + #[tokio::test] async fn caches_successful_estimates_with_loaded_prices() { let mut inner = MockNativePriceEstimating::new(); @@ -545,23 +553,20 @@ mod tests { let prices = HashMap::from_iter((0..10).map(|t| (token(t), BigDecimal::try_from(1e18).unwrap()))); + let cache = Cache::new(Duration::from_secs(MAX_AGE_SECS), prices); let estimator = CachingNativePriceEstimator::new( Box::new(inner), - Duration::from_secs(MAX_AGE_SECS), - Default::default(), - None, - Default::default(), + cache, 1, Default::default(), HEALTHY_PRICE_ESTIMATION_TIME, ); - estimator.initialize_cache(prices); { // Check that `updated_at` timestamps are initialized with // reasonable values. - let cache = estimator.0.cache.lock().unwrap(); - for value in cache.values() { + let data = estimator.cache().0.data.lock().unwrap(); + for value in data.values() { let elapsed = value.updated_at.elapsed(); assert!(elapsed >= min_age && elapsed <= max_age); } @@ -583,16 +588,8 @@ mod tests { .times(1) .returning(|_, _| async { Ok(1.0) }.boxed()); - let estimator = CachingNativePriceEstimator::new( - Box::new(inner), - Duration::from_millis(30), - Default::default(), - None, - Default::default(), - 1, - Default::default(), - HEALTHY_PRICE_ESTIMATION_TIME, - ); + let estimator = + create_caching_estimator(inner, Duration::from_millis(30), 1, Default::default()); for _ in 0..10 { let result = estimator @@ -621,12 +618,9 @@ mod tests { .withf(move |t, _| *t == token(200)) .returning(|_, _| async { Ok(200.0) }.boxed()); - let estimator = CachingNativePriceEstimator::new( - Box::new(inner), + let estimator = create_caching_estimator( + inner, Duration::from_millis(30), - Default::default(), - None, - Default::default(), 1, // set token approximations for tokens 1 and 2 (same decimals) HashMap::from([ @@ -639,7 +633,6 @@ mod tests { ApproximationToken::same_decimals(Address::with_last_byte(200)), ), ]), - HEALTHY_PRICE_ESTIMATION_TIME, ); // no approximation token used for token 0 @@ -689,18 +682,14 @@ mod tests { // from_decimals=6 (USDC), to_decimals=18 (DAI) // Normalization factor = 10^(18-6) = 10^12 // Price should be 5e-22 * 10^12 = 5e-10 ETH per USDC microunit - let estimator = CachingNativePriceEstimator::new( - Box::new(inner), + let estimator = create_caching_estimator( + inner, Duration::from_millis(30), - Default::default(), - None, - Default::default(), 1, HashMap::from([( Address::with_last_byte(1), ApproximationToken::with_normalization((Address::with_last_byte(100), 18), 6), )]), - HEALTHY_PRICE_ESTIMATION_TIME, ); let price = estimator @@ -731,18 +720,14 @@ mod tests { // from_decimals=18 (DAI), to_decimals=6 (USDC) // Normalization factor = 10^(6-18) = 10^-12 // Price should be 5e-10 * 10^-12 = 5e-22 ETH per DAI wei - let estimator = CachingNativePriceEstimator::new( - Box::new(inner), + let estimator = create_caching_estimator( + inner, Duration::from_millis(30), - Default::default(), - None, - Default::default(), 1, HashMap::from([( Address::with_last_byte(1), ApproximationToken::with_normalization((Address::with_last_byte(100), 6), 18), )]), - HEALTHY_PRICE_ESTIMATION_TIME, ); let price = estimator @@ -767,16 +752,8 @@ mod tests { .times(1) .returning(|_, _| async { Err(PriceEstimationError::NoLiquidity) }.boxed()); - let estimator = CachingNativePriceEstimator::new( - Box::new(inner), - Duration::from_millis(30), - Default::default(), - None, - Default::default(), - 1, - Default::default(), - HEALTHY_PRICE_ESTIMATION_TIME, - ); + let estimator = + create_caching_estimator(inner, Duration::from_millis(30), 1, Default::default()); for _ in 0..10 { let result = estimator @@ -838,16 +815,8 @@ mod tests { async { Err(PriceEstimationError::EstimatorInternal(anyhow!("boom"))) }.boxed() }); - let estimator = CachingNativePriceEstimator::new( - Box::new(inner), - Duration::from_millis(100), - Duration::from_millis(200), - None, - Default::default(), - 1, - Default::default(), - HEALTHY_PRICE_ESTIMATION_TIME, - ); + let estimator = + create_caching_estimator(inner, Duration::from_millis(100), 1, Default::default()); // First 3 calls: The cache is not used. Counter gets increased. for _ in 0..3 { @@ -910,16 +879,8 @@ mod tests { .times(10) .returning(|_, _| async { Err(PriceEstimationError::RateLimited) }.boxed()); - let estimator = CachingNativePriceEstimator::new( - Box::new(inner), - Duration::from_millis(30), - Default::default(), - None, - Default::default(), - 1, - Default::default(), - HEALTHY_PRICE_ESTIMATION_TIME, - ); + let estimator = + create_caching_estimator(inner, Duration::from_millis(30), 1, Default::default()); for _ in 0..10 { let result = estimator @@ -951,33 +912,28 @@ mod tests { assert_eq!(passed_token, token(1)); async { Ok(2.0) }.boxed() }); - // maintenance task updates n=1 outdated prices + // maintenance task updates outdated prices (order is non-deterministic) inner .expect_estimate_native_price() - .times(1) - .returning(|passed_token, _| { - assert_eq!(passed_token, token(1)); - async { Ok(4.0) }.boxed() - }); - // user requested something which has been skipped by the maintenance task - inner - .expect_estimate_native_price() - .times(1) + .times(2) .returning(|passed_token, _| { - assert_eq!(passed_token, token(0)); - async { Ok(3.0) }.boxed() + let price = if passed_token == token(0) { 3.0 } else { 4.0 }; + async move { Ok(price) }.boxed() }); + let cache = Cache::new(Duration::from_millis(30), Default::default()); let estimator = CachingNativePriceEstimator::new( Box::new(inner), - Duration::from_millis(30), - Duration::from_millis(50), - Some(1), - Duration::default(), + cache, 1, Default::default(), HEALTHY_PRICE_ESTIMATION_TIME, ); + let updater = NativePriceUpdater::new( + estimator.clone(), + Duration::from_millis(50), + Duration::default(), + ); // fill cache with 2 different queries let result = estimator @@ -989,6 +945,14 @@ mod tests { .await; assert_eq!(result.as_ref().unwrap().to_i64().unwrap(), 2); + // Tell the updater about these tokens + updater + .update_tokens_and_fetch_prices( + [token(0), token(1)].into_iter().collect(), + Duration::ZERO, + ) + .await; + // wait for maintenance cycle tokio::time::sleep(Duration::from_millis(60)).await; @@ -1010,22 +974,28 @@ mod tests { .expect_estimate_native_price() .times(10) .returning(move |_, _| async { Ok(1.0) }.boxed()); - // background task updates all outdated prices inner .expect_estimate_native_price() .times(10) .returning(move |_, _| async { Ok(2.0) }.boxed()); + let cache = Cache::new(Duration::from_millis(30), Default::default()); let estimator = CachingNativePriceEstimator::new( Box::new(inner), - Duration::from_millis(30), - Duration::from_millis(50), - None, - Duration::default(), + cache, 1, Default::default(), HEALTHY_PRICE_ESTIMATION_TIME, ); + let all_tokens: HashSet<_> = (0..10).map(Address::with_last_byte).collect(); + let updater = NativePriceUpdater::new( + estimator.clone(), + Duration::from_millis(50), + Duration::default(), + ); + updater + .update_tokens_and_fetch_prices(all_tokens, Duration::ZERO) + .await; let tokens: Vec<_> = (0..10).map(Address::with_last_byte).collect(); for token in &tokens { @@ -1057,7 +1027,6 @@ mod tests { .expect_estimate_native_price() .times(BATCH_SIZE) .returning(|_, _| async { Ok(1.0) }.boxed()); - // background task updates all outdated prices inner .expect_estimate_native_price() .times(BATCH_SIZE) @@ -1069,16 +1038,25 @@ mod tests { .boxed() }); + let cache = Cache::new(Duration::from_millis(30), Default::default()); let estimator = CachingNativePriceEstimator::new( Box::new(inner), - Duration::from_millis(30), - Duration::from_millis(50), - None, - Duration::default(), + cache, BATCH_SIZE, Default::default(), HEALTHY_PRICE_ESTIMATION_TIME, ); + let all_tokens: HashSet<_> = (0..BATCH_SIZE as u64) + .map(|u| Address::left_padding_from(&u.to_be_bytes())) + .collect(); + let updater = NativePriceUpdater::new( + estimator.clone(), + Duration::from_millis(50), + Duration::default(), + ); + updater + .update_tokens_and_fetch_prices(all_tokens, Duration::ZERO) + .await; let tokens: Vec<_> = (0..BATCH_SIZE as u64).map(token).collect(); for token in &tokens { @@ -1090,9 +1068,6 @@ mod tests { } // wait for maintenance cycle - // although we have 100 requests which all take 100ms to complete the - // maintenance cycle completes sooner because all requests are handled - // concurrently. tokio::time::sleep(Duration::from_millis(60 + WAIT_TIME_MS)).await; for token in &tokens { @@ -1103,39 +1078,4 @@ mod tests { assert_eq!(price.to_i64().unwrap(), 2); } } - - #[test] - fn outdated_entries_prioritized() { - let t0 = Address::with_last_byte(0); - let t1 = Address::with_last_byte(1); - let now = Instant::now(); - let inner = Inner { - cache: Mutex::new( - [ - (t0, CachedResult::new(Ok(0.), now, now, Default::default())), - (t1, CachedResult::new(Ok(0.), now, now, Default::default())), - ] - .into_iter() - .collect(), - ), - high_priority: Default::default(), - estimator: Box::new(MockNativePriceEstimating::new()), - max_age: Default::default(), - concurrent_requests: 1, - approximation_tokens: Default::default(), - quote_timeout: HEALTHY_PRICE_ESTIMATION_TIME, - }; - - let now = now + Duration::from_secs(1); - - *inner.high_priority.lock().unwrap() = std::iter::once(t0).collect(); - let tokens = inner.sorted_tokens_to_update(Duration::from_secs(0), now); - assert_eq!(tokens[0], t0); - assert_eq!(tokens[1], t1); - - *inner.high_priority.lock().unwrap() = std::iter::once(t1).collect(); - let tokens = inner.sorted_tokens_to_update(Duration::from_secs(0), now); - assert_eq!(tokens[0], t1); - assert_eq!(tokens[1], t0); - } } From d72d91d0e9aa7a09221331aed8a3103fd5b73490 Mon Sep 17 00:00:00 2001 From: "Jan [Yann]" <4518474+fafk@users.noreply.github.com> Date: Thu, 12 Feb 2026 09:26:01 +0100 Subject: [PATCH 051/219] Skip checks for defined appCodes (#4118) # Description For the integration with Euler we want to skip balance checks, because the order will get the necessary funds from wrapper execution. This PR skip balance checks where wrappers are defined, mirroring what we do for flash loans with flash loan hints. Signature checks are skipped for all 1271 orders, but kept for presign/Eip712 as these should always be valid independent of prehooks/wrappers. We have been running this way in prod for a while and I just cleaned up the flags. # Changes - [x] Skip balance validation for ordres with wrappers - [x] A cache so we don't keep parsing appdata for every auction - [x] Nuked disable_1271_order_sig_filter and disable_1271_order_balance_filter config flags - 1271 now just always skips these checks ## How to test Unit tests. --- Cargo.lock | 1 + crates/app-data/Cargo.toml | 1 + crates/app-data/src/app_data.rs | 47 +++ crates/autopilot/Cargo.toml | 2 +- crates/autopilot/src/arguments.rs | 21 -- crates/autopilot/src/run.rs | 13 - crates/autopilot/src/solvable_orders.rs | 306 ++++++++------------ crates/driver/src/domain/competition/mod.rs | 5 + crates/shared/src/order_validation.rs | 6 +- 9 files changed, 177 insertions(+), 225 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index afda6769b8..540b74e1fe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -840,6 +840,7 @@ dependencies = [ "bytes-hex", "const-hex", "hex-literal", + "moka", "number", "serde", "serde_json", diff --git a/crates/app-data/Cargo.toml b/crates/app-data/Cargo.toml index b8cd0e1ff0..7265a009b4 100644 --- a/crates/app-data/Cargo.toml +++ b/crates/app-data/Cargo.toml @@ -10,6 +10,7 @@ alloy = { workspace = true, features = ["serde"] } anyhow = { workspace = true } bytes-hex = { workspace = true } const-hex = { workspace = true } +moka = { workspace = true, features = ["sync"] } number = { path = "../number" } serde = { workspace = true } serde_json = { workspace = true } diff --git a/crates/app-data/src/app_data.rs b/crates/app-data/src/app_data.rs index 0f997acf51..ac5b49fe50 100644 --- a/crates/app-data/src/app_data.rs +++ b/crates/app-data/src/app_data.rs @@ -3,6 +3,7 @@ use { alloy::primitives::{Address, U256}, anyhow::{Context, Result, anyhow}, bytes_hex::BytesHex, + moka::sync::Cache, number::serialization::HexOrDecimalU256, serde::{Deserialize, Deserializer, Serialize, Serializer, de}, serde_with::serde_as, @@ -343,6 +344,34 @@ impl Root { } } +/// Caches whether a given app data document contains wrappers, keyed by +/// hash. This avoids re-parsing the same JSON across orders and auction +/// cycles. We're using the default TinyLFU eviction policy, but the capacity is +/// large enough that we don't expect eviction to be a problem in practice, but +/// we limit the size to prevent potential memory exhaustion attacks. +pub struct WrapperCache(Cache); + +impl WrapperCache { + pub fn new(capacity: u64) -> Self { + Self(Cache::new(capacity)) + } + + /// Returns `true` if order appData contains non-empty wrappers + pub fn has_wrappers(&self, hash: &AppDataHash, document: Option<&str>) -> bool { + if let Some(cached) = self.0.get(hash) { + return cached; + } + let result = document.is_some_and(|doc| { + serde_json::from_str::(doc) + .ok() + .and_then(|root| root.metadata) + .is_some_and(|m| !m.wrappers.is_empty()) + }); + self.0.insert(*hash, result); + result + } +} + // uid as 56 bytes: 32 for orderDigest, 20 for ownerAddress and 4 for validTo #[derive(Clone, Copy, Eq, Hash, PartialEq, PartialOrd, Ord)] pub struct OrderUid(pub [u8; 56]); @@ -770,6 +799,24 @@ mod tests { ); } + #[test] + fn wrapper_cache_detects_wrappers() { + let cache = WrapperCache::new(100); + let h = |b: u8| AppDataHash([b; 32]); + + assert!(!cache.has_wrappers(&h(1), None)); + assert!(!cache.has_wrappers(&h(2), Some("{}"))); + assert!(!cache.has_wrappers(&h(3), Some(r#"{"metadata": {}}"#))); + assert!(!cache.has_wrappers(&h(4), Some(r#"{"metadata": {"wrappers": []}}"#))); + assert!(cache.has_wrappers( + &h(5), + Some(r#"{"metadata": {"wrappers": [{"address": "0x0000000000000000000000000000000000000001", "data": "0x"}]}}"#), + )); + + // Second call hits the cache + assert!(cache.has_wrappers(&h(5), None)); + } + #[test] fn misc() { let mut validator = Validator::default(); diff --git a/crates/autopilot/Cargo.toml b/crates/autopilot/Cargo.toml index e741776a36..feddf27dbc 100644 --- a/crates/autopilot/Cargo.toml +++ b/crates/autopilot/Cargo.toml @@ -40,7 +40,6 @@ humantime = { workspace = true } indexmap = { workspace = true } itertools = { workspace = true } mimalloc = { workspace = true, optional = true } -tikv-jemallocator = { workspace = true } model = { workspace = true } num = { workspace = true } number = { workspace = true } @@ -57,6 +56,7 @@ serde_with = { workspace = true } shared = { workspace = true } sqlx = { workspace = true } strum = { workspace = true } +tikv-jemallocator = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["macros", "rt-multi-thread", "signal", "sync", "time"] } tokio-stream = { workspace = true } diff --git a/crates/autopilot/src/arguments.rs b/crates/autopilot/src/arguments.rs index e22d464a2b..3057f514a0 100644 --- a/crates/autopilot/src/arguments.rs +++ b/crates/autopilot/src/arguments.rs @@ -258,17 +258,6 @@ pub struct Arguments { #[clap(long, env, default_value = "false", action = clap::ArgAction::Set)] pub disable_order_balance_filter: bool, - // Configures whether the autopilot filters out EIP-1271 orders even if their signatures are - // invalid. This is useful as a workaround to let flashloan orders go through as they rely - // on preHooks behing executed to make the signatures valid. - #[clap(long, env, default_value = "false", action = clap::ArgAction::Set)] - pub disable_1271_order_sig_filter: bool, - - /// Configures whether the autopilot skips balance checks for EIP-1271 - /// orders. - #[clap(long, env, default_value = "false", action = clap::ArgAction::Set)] - pub disable_1271_order_balance_filter: bool, - /// Enables the usage of leader lock in the database /// The second instance of autopilot will act as a follower /// and not cut any auctions. @@ -349,8 +338,6 @@ impl std::fmt::Display for Arguments { archive_node_url, max_solutions_per_solver, disable_order_balance_filter, - disable_1271_order_balance_filter, - disable_1271_order_sig_filter, enable_leader_lock, max_maintenance_timeout, native_price_cache_refresh, @@ -431,14 +418,6 @@ impl std::fmt::Display for Arguments { f, "disable_order_balance_filter: {disable_order_balance_filter}" )?; - writeln!( - f, - "disable_1271_order_balance_filter: {disable_1271_order_balance_filter}" - )?; - writeln!( - f, - "disable_1271_order_sig_filter: {disable_1271_order_sig_filter}" - )?; writeln!(f, "enable_leader_lock: {enable_leader_lock}")?; writeln!(f, "max_maintenance_timeout: {max_maintenance_timeout:?}")?; writeln!( diff --git a/crates/autopilot/src/run.rs b/crates/autopilot/src/run.rs index b80ef71acf..0235ad4036 100644 --- a/crates/autopilot/src/run.rs +++ b/crates/autopilot/src/run.rs @@ -51,7 +51,6 @@ use { factory::{self, PriceEstimatorFactory}, native::NativePriceEstimating, }, - signature_validator, sources::{BaselineSource, uniswap_v2::UniV2BaselineSourceParameters}, token_info::{CachedTokenInfoFetcher, TokenInfoFetcher}, token_list::{AutoUpdatingTokenList, TokenListConfiguration}, @@ -253,15 +252,6 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { let chain = Chain::try_from(chain_id).expect("incorrect chain ID"); let balance_overrider = args.price_estimation.balance_overrides.init(web3.clone()); - let signature_validator = signature_validator::validator( - &web3, - signature_validator::Contracts { - settlement: eth.contracts().settlement().clone(), - signatures: eth.contracts().signatures().clone(), - vault_relayer, - }, - balance_overrider.clone(), - ); let balance_fetcher = account_balances::cached( &web3, @@ -533,7 +523,6 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { balance_fetcher.clone(), bad_token_detector.clone(), competition_native_price_updater.clone(), - signature_validator.clone(), *eth.contracts().weth().address(), args.limit_order_price_factor .try_into() @@ -547,8 +536,6 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { args.run_loop_native_price_timeout, *eth.contracts().settlement().address(), args.disable_order_balance_filter, - args.disable_1271_order_sig_filter, - args.disable_1271_order_balance_filter, ); let liveness = Arc::new(Liveness::new(args.max_auction_age)); diff --git a/crates/autopilot/src/solvable_orders.rs b/crates/autopilot/src/solvable_orders.rs index be5bc52745..bb2bd94d55 100644 --- a/crates/autopilot/src/solvable_orders.rs +++ b/crates/autopilot/src/solvable_orders.rs @@ -8,7 +8,7 @@ use { anyhow::{Context, Result}, bigdecimal::BigDecimal, database::order_events::OrderEventLabel, - futures::{FutureExt, StreamExt, future::join_all, stream::FuturesUnordered}, + futures::{FutureExt, future::join_all}, itertools::Itertools, model::{ order::{Order, OrderClass, OrderUid}, @@ -25,7 +25,6 @@ use { native_price_cache::NativePriceUpdater, }, remaining_amounts, - signature_validator::{SignatureCheck, SignatureValidating}, }, std::{ collections::{BTreeMap, HashMap, HashSet, btree_map::Entry}, @@ -91,7 +90,6 @@ pub struct SolvableOrdersCache { bad_token_detector: Arc, cache: Mutex>, native_price_estimator: Arc, - signature_validator: Arc, metrics: &'static Metrics, weth: Address, limit_order_price_factor: BigDecimal, @@ -100,8 +98,7 @@ pub struct SolvableOrdersCache { native_price_timeout: Duration, settlement_contract: Address, disable_order_balance_filter: bool, - disable_1271_order_sig_filter: bool, - disable_1271_order_balance_filter: bool, + wrapper_cache: app_data::WrapperCache, } type Balances = HashMap; @@ -120,7 +117,6 @@ impl SolvableOrdersCache { balance_fetcher: Arc, bad_token_detector: Arc, native_price_estimator: Arc, - signature_validator: Arc, weth: Address, limit_order_price_factor: BigDecimal, protocol_fees: domain::ProtocolFees, @@ -128,9 +124,8 @@ impl SolvableOrdersCache { native_price_timeout: Duration, settlement_contract: Address, disable_order_balance_filter: bool, - disable_1271_order_sig_filter: bool, - disable_1271_order_balance_filter: bool, ) -> Arc { + let metrics = Metrics::instance(observe::metrics::get_storage_registry()).unwrap(); Arc::new(Self { min_order_validity_period, persistence, @@ -139,8 +134,7 @@ impl SolvableOrdersCache { bad_token_detector, cache: Mutex::new(None), native_price_estimator, - signature_validator, - metrics: Metrics::instance(observe::metrics::get_storage_registry()).unwrap(), + metrics, weth, limit_order_price_factor, protocol_fees, @@ -148,8 +142,7 @@ impl SolvableOrdersCache { native_price_timeout, settlement_contract, disable_order_balance_filter, - disable_1271_order_sig_filter, - disable_1271_order_balance_filter, + wrapper_cache: app_data::WrapperCache::new(20_000), }) } @@ -186,6 +179,17 @@ impl SolvableOrdersCache { let mut invalid_order_uids = HashSet::new(); let mut filtered_order_events = Vec::new(); + let balance_filter_exempt_orders: HashSet<_> = orders + .iter() + .filter(|order| { + self.wrapper_cache.has_wrappers( + &order.data.app_data, + order.metadata.full_app_data.as_deref(), + ) + }) + .map(|order| order.metadata.uid) + .collect(); + let (balances, orders, cow_amms) = { let queries = orders.iter().map(Query::from_order).collect::>(); tokio::join!( @@ -202,7 +206,7 @@ impl SolvableOrdersCache { orders, &balances, self.settlement_contract, - self.disable_1271_order_balance_filter, + &balance_filter_exempt_orders, ); let removed = counter.checkpoint("insufficient_balance", &orders); invalid_order_uids.extend(removed); @@ -398,18 +402,13 @@ impl SolvableOrdersCache { counter: &mut OrderFilterCounter, invalid_order_uids: &mut HashSet, ) -> Vec { - let filter_invalid_signatures = find_invalid_signature_orders( - &orders, - self.signature_validator.as_ref(), - self.disable_1271_order_sig_filter, - ); + let presignature_pending_orders = find_presignature_pending_orders(&orders); - let (banned_user_orders, invalid_signature_orders, unsupported_token_orders) = tokio::join!( + let (banned_user_orders, unsupported_token_orders) = tokio::join!( self.timed_future( "banned_user_filtering", find_banned_user_orders(&orders, &self.banned_users) ), - self.timed_future("invalid_signature_filtering", filter_invalid_signatures), self.timed_future( "unsupported_token_filtering", find_unsupported_tokens(&orders, self.bad_token_detector.clone()) @@ -418,10 +417,10 @@ impl SolvableOrdersCache { tracing::trace!("filtered invalid orders"); counter.checkpoint_by_invalid_orders("banned_user", &banned_user_orders); - counter.checkpoint_by_invalid_orders("invalid_signature", &invalid_signature_orders); + counter.checkpoint_by_invalid_orders("invalid_signature", &presignature_pending_orders); counter.checkpoint_by_invalid_orders("unsupported_token", &unsupported_token_orders); invalid_order_uids.extend(banned_user_orders); - invalid_order_uids.extend(invalid_signature_orders); + invalid_order_uids.extend(presignature_pending_orders); invalid_order_uids.extend(unsupported_token_orders); orders.retain(|order| !invalid_order_uids.contains(&order.metadata.uid)); @@ -483,59 +482,19 @@ async fn get_native_prices( .collect() } -/// Finds unsigned PreSign and EIP-1271 orders whose signatures are no longer -/// validating. -async fn find_invalid_signature_orders( - orders: &[Order], - signature_validator: &dyn SignatureValidating, - disable_1271_order_sig_filter: bool, -) -> Vec { - let mut invalid_orders = vec![]; - let mut signature_check_futures = FuturesUnordered::new(); - - for order in orders { - if let Signature::Eip1271(_) = &order.signature - && disable_1271_order_sig_filter - { - continue; - } - if matches!( - order.metadata.status, - model::order::OrderStatus::PresignaturePending - ) { - invalid_orders.push(order.metadata.uid); - continue; - } - - if let Signature::Eip1271(signature) = &order.signature { - signature_check_futures.push(async { - let (hash, signer, _) = order.metadata.uid.parts(); - match signature_validator - .validate_signature(SignatureCheck { - signer, - hash: hash.0, - signature: signature.clone(), - interactions: order.interactions.pre.clone(), - // TODO delete balance and signature logic in the autopilot - // altogether - balance_override: None, - }) - .await - { - Ok(_) => None, - Err(_) => Some(order.metadata.uid), - } - }); - } - } - - while let Some(res) = signature_check_futures.next().await { - if let Some(invalid_order_uid) = res { - invalid_orders.push(invalid_order_uid); - } - } - - invalid_orders +/// Finds orders with pending presignatures. EIP-1271 signature validation is +/// skipped entirely - the driver validates signatures before settlement. +fn find_presignature_pending_orders(orders: &[Order]) -> Vec { + orders + .iter() + .filter(|order| { + matches!( + order.metadata.status, + model::order::OrderStatus::PresignaturePending + ) + }) + .map(|order| order.metadata.uid) + .collect() } /// Removes orders that can't possibly be settled because there isn't enough @@ -544,12 +503,17 @@ fn orders_with_balance( mut orders: Vec, balances: &Balances, settlement_contract: Address, - disable_1271_order_balance_filter: bool, + filter_bypass_orders: &HashSet, ) -> Vec { // Prefer newer orders over older ones. orders.sort_by_key(|order| std::cmp::Reverse(order.metadata.creation_date)); orders.retain(|order| { - if disable_1271_order_balance_filter && matches!(order.signature, Signature::Eip1271(_)) { + // Skip balance check for all EIP-1271 orders (they can rely on pre-interactions + // to unlock funds) or orders with wrappers (wrappers produce the required + // balance at settlement time). + if matches!(order.signature, Signature::Eip1271(_)) + || filter_bypass_orders.contains(&order.metadata.uid) + { return true; } @@ -852,11 +816,7 @@ mod tests { alloy::primitives::{Address, B256}, futures::FutureExt, maplit::{btreemap, hashset}, - mockall::predicate::eq, - model::{ - interaction::InteractionData, - order::{Interactions, OrderBuilder, OrderData, OrderMetadata, OrderUid}, - }, + model::order::{OrderBuilder, OrderData, OrderMetadata, OrderUid}, shared::{ bad_token::list_based::ListBasedDetector, price_estimation::{ @@ -870,7 +830,6 @@ mod tests { NativePriceUpdater, }, }, - signature_validator::{MockSignatureValidating, SignatureValidationError}, }, }; @@ -1176,48 +1135,29 @@ mod tests { ); } - #[tokio::test] - async fn filters_invalidated_eip1271_signatures() { + #[test] + fn finds_presignature_pending_orders() { + let presign_uid = OrderUid::from_parts(B256::repeat_byte(1), Address::repeat_byte(11), 1); let orders = vec![ + // PresignaturePending order - should be found Order { metadata: OrderMetadata { - uid: OrderUid::from_parts(B256::repeat_byte(1), Address::repeat_byte(11), 1), + uid: presign_uid, + status: model::order::OrderStatus::PresignaturePending, ..Default::default() }, - interactions: Interactions { - pre: vec![InteractionData { - target: Address::from_slice(&[0xe1; 20]), - value: alloy::primitives::U256::ZERO, - call_data: vec![1, 2], - }], - post: vec![InteractionData { - target: Address::from_slice(&[0xe2; 20]), - value: alloy::primitives::U256::ZERO, - call_data: vec![3, 4], - }], - }, ..Default::default() }, + // EIP-1271 order - not PresignaturePending Order { metadata: OrderMetadata { uid: OrderUid::from_parts(B256::repeat_byte(2), Address::repeat_byte(22), 2), ..Default::default() }, signature: Signature::Eip1271(vec![2, 2]), - interactions: Interactions { - pre: vec![InteractionData { - target: Address::from_slice(&[0xe3; 20]), - value: alloy::primitives::U256::ZERO, - call_data: vec![5, 6], - }], - post: vec![InteractionData { - target: Address::from_slice(&[0xe4; 20]), - value: alloy::primitives::U256::ZERO, - call_data: vec![7, 9], - }], - }, ..Default::default() }, + // Regular order - not PresignaturePending Order { metadata: OrderMetadata { uid: OrderUid::from_parts(B256::repeat_byte(3), Address::repeat_byte(33), 3), @@ -1225,75 +1165,10 @@ mod tests { }, ..Default::default() }, - Order { - metadata: OrderMetadata { - uid: OrderUid::from_parts(B256::repeat_byte(4), Address::repeat_byte(44), 4), - ..Default::default() - }, - signature: Signature::Eip1271(vec![4, 4, 4, 4]), - ..Default::default() - }, - Order { - metadata: OrderMetadata { - uid: OrderUid::from_parts(B256::repeat_byte(5), Address::repeat_byte(55), 5), - ..Default::default() - }, - signature: Signature::Eip1271(vec![5, 5, 5, 5, 5]), - ..Default::default() - }, ]; - let mut signature_validator = MockSignatureValidating::new(); - signature_validator - .expect_validate_signature() - .with(eq(SignatureCheck { - signer: Address::repeat_byte(22), - hash: [2; 32], - signature: vec![2, 2], - interactions: vec![InteractionData { - target: Address::from_slice(&[0xe3; 20]), - value: alloy::primitives::U256::ZERO, - call_data: vec![5, 6], - }], - balance_override: None, - })) - .returning(|_| Ok(())); - signature_validator - .expect_validate_signature() - .with(eq(SignatureCheck { - signer: Address::repeat_byte(44), - hash: [4; 32], - signature: vec![4, 4, 4, 4], - interactions: vec![], - balance_override: None, - })) - .returning(|_| Err(SignatureValidationError::Invalid)); - signature_validator - .expect_validate_signature() - .with(eq(SignatureCheck { - signer: Address::repeat_byte(55), - hash: [5; 32], - signature: vec![5, 5, 5, 5, 5], - interactions: vec![], - balance_override: None, - })) - .returning(|_| Ok(())); - - let invalid_signature_orders = - find_invalid_signature_orders(&orders, &signature_validator, false).await; - assert_eq!( - invalid_signature_orders, - vec![OrderUid::from_parts( - B256::repeat_byte(4), - Address::repeat_byte(44), - 4 - )] - ); - let invalid_signature_orders_with_1271_filter_disabled = - find_invalid_signature_orders(&orders, &signature_validator, true).await; - // if we switch off the 1271 filter no orders should be returned as containing - // invalid signatures - assert_eq!(invalid_signature_orders_with_1271_filter_disabled, vec![]); + let pending_orders = find_presignature_pending_orders(&orders); + assert_eq!(pending_orders, vec![presign_uid]); } #[test] @@ -1456,7 +1331,9 @@ mod tests { .collect(); let expected = &[0, 2, 4]; - let filtered = orders_with_balance(orders.clone(), &balances, settlement_contract, false); + let no_bypass: HashSet = HashSet::new(); + let filtered = + orders_with_balance(orders.clone(), &balances, settlement_contract, &no_bypass); assert_eq!(filtered.len(), expected.len()); for index in expected { let found = filtered.iter().any(|o| o.data == orders[*index].data); @@ -1465,8 +1342,10 @@ mod tests { } #[test] - fn eip1271_orders_can_skip_balance_filtering() { + fn eip1271_and_wrapper_orders_skip_balance_filtering() { let settlement_contract = Address::repeat_byte(1); + + // EIP-1271 order (should skip balance check) let eip1271_order = Order { data: OrderData { sell_token: Address::with_last_byte(7), @@ -1476,9 +1355,17 @@ mod tests { ..Default::default() }, signature: Signature::Eip1271(vec![1, 2, 3]), + metadata: OrderMetadata { + uid: OrderUid::from_parts(B256::repeat_byte(6), Address::repeat_byte(66), 6), + ..Default::default() + }, ..Default::default() }; - let regular_order = Order { + + // Order with wrappers in bypass set (should skip balance check) + let wrapper_order_uid = + OrderUid::from_parts(B256::repeat_byte(7), Address::repeat_byte(77), 7); + let wrapper_order = Order { data: OrderData { sell_token: Address::with_last_byte(8), sell_amount: alloy::primitives::U256::from(10), @@ -1486,19 +1373,60 @@ mod tests { partially_fillable: false, ..Default::default() }, + metadata: OrderMetadata { + uid: wrapper_order_uid, + ..Default::default() + }, ..Default::default() }; - let orders = vec![regular_order.clone(), eip1271_order.clone()]; - let balances: Balances = Default::default(); + // Regular ECDSA order without wrappers (should be filtered) + let regular_order = Order { + data: OrderData { + sell_token: Address::with_last_byte(9), + sell_amount: alloy::primitives::U256::from(10), + fee_amount: alloy::primitives::U256::from(5), + partially_fillable: false, + ..Default::default() + }, + metadata: OrderMetadata { + uid: OrderUid::from_parts(B256::repeat_byte(8), Address::repeat_byte(88), 8), + ..Default::default() + }, + ..Default::default() + }; - let filtered = orders_with_balance(orders.clone(), &balances, settlement_contract, true); - // 1271 filter is disabled, only the regular order is filtered out - assert_eq!(filtered.len(), 1); - assert!(matches!(filtered[0].signature, Signature::Eip1271(_))); + let orders = vec![ + regular_order.clone(), + eip1271_order.clone(), + wrapper_order.clone(), + ]; + let balances: Balances = Default::default(); // No balances + + // EIP-1271 order and wrapper order should be retained, regular order filtered + let wrapper_set = HashSet::from([wrapper_order_uid]); + let filtered = + orders_with_balance(orders.clone(), &balances, settlement_contract, &wrapper_set); + assert_eq!(filtered.len(), 2); + assert!( + filtered + .iter() + .any(|o| o.metadata.uid == eip1271_order.metadata.uid) + ); + assert!( + filtered + .iter() + .any(|o| o.metadata.uid == wrapper_order.metadata.uid) + ); - let filtered_without_override = - orders_with_balance(orders, &balances, settlement_contract, false); - assert!(filtered_without_override.is_empty()); + // Without wrapper set, only EIP-1271 order should be retained + let empty_set: HashSet = HashSet::new(); + let filtered_no_wrappers = + orders_with_balance(orders, &balances, settlement_contract, &empty_set); + assert_eq!(filtered_no_wrappers.len(), 1); + assert_eq!( + filtered_no_wrappers[0].metadata.uid, + eip1271_order.metadata.uid + ); } } diff --git a/crates/driver/src/domain/competition/mod.rs b/crates/driver/src/domain/competition/mod.rs index 6355119ccb..5c51e38bb1 100644 --- a/crates/driver/src/domain/competition/mod.rs +++ b/crates/driver/src/domain/competition/mod.rs @@ -463,6 +463,11 @@ impl Competition { } } + // wrappers can produce the required funds at settlement time + if !order.app_data.wrappers().is_empty() { + return true; + } + let remaining_balance = match balances.get_mut(&( order.trader(), order.sell.token, diff --git a/crates/shared/src/order_validation.rs b/crates/shared/src/order_validation.rs index 1167dd8430..f7909ae54c 100644 --- a/crates/shared/src/order_validation.rs +++ b/crates/shared/src/order_validation.rs @@ -412,6 +412,7 @@ impl OrderValidator { app_data: &OrderAppData, ) -> Result<(), ValidationError> { let mut res = Ok(()); + let has_wrappers = !app_data.inner.protocol.wrappers.is_empty(); // Simulate transferring a small token balance into the settlement contract. // As a spam protection we require that an account must have at least 1 atom @@ -447,13 +448,16 @@ impl OrderValidator { TransferSimulationError::InsufficientAllowance | TransferSimulationError::InsufficientBalance | TransferSimulationError::TransferFailed, - ) if order.signature == Signature::PreSign => { + ) if order.signature == Signature::PreSign || has_wrappers => { // Pre-sign orders do not require sufficient balance or allowance. // The idea is that this allows smart contracts to place orders bundled with // other transactions that either produce the required balance or set the // allowance. This would, for example, allow a Gnosis Safe to bundle the // pre-signature transaction with a WETH wrap and WETH approval to the vault // relayer contract. + // + // Similarly, orders with wrappers may produce the required balance or + // allowance as part of the wrapper execution. return Ok(()); } Err(err) => match err { From 1c28e494e2846f9860d7e2c44e23c04732836748 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Duarte?= Date: Thu, 12 Feb 2026 15:07:03 +0000 Subject: [PATCH 052/219] Migrate orderbook API from warp to axum (#4080) # Description This PR migrates the orderbook API from Warp to Axum. The migration modernizes our web framework while maintaining API compatibility, with improved code organization and reduced complexity (~200 lines removed). # Changes - Replace Warp filters with Axum Router and centralized AppState for dependency injection - Migrate all 18 API endpoints (v1 and v2) to Axum handler functions - Update dependencies: remove warp, add axum, tower, tower-http - Reorganize routes hierarchically with .nest(), alphabetically by prefix - Fix route conflicts using .merge() for multiple HTTP methods on same path - Fix bug in get_trades_v2 (use database_read instead of database_write) - Add comprehensive E2E test suite for HTTP behavior validation in malformed_requests.rs # How to test Existing tests + staging I deployed these changes to base staging to ensure metrics were working, here's the graph Screenshot 2026-01-23 at 12-42-46
Edit panel - GPv2 - Dashboards - Amazon Managed Grafana Follow up: image Unmarked spots are running `main`, the wrong image was labeling endpoints with unknown because it was a fallback. That code has been removed and replaced with the current metric labeling scheme. That scheme can be improved (read, made less verbose and error prone while being more general) but we need to change the metrics label, which I didn't do to minimize changes. --------- Co-authored-by: Claude --- Cargo.lock | 77 +-- Cargo.toml | 1 - crates/driver/src/infra/api/routes/healthz.rs | 10 +- crates/driver/src/tests/setup/orderbook.rs | 4 +- crates/e2e/src/setup/proxy.rs | 9 +- crates/e2e/tests/e2e/malformed_requests.rs | 23 +- crates/e2e/tests/e2e/replace_order.rs | 4 +- crates/model/src/order.rs | 2 + crates/observe/Cargo.toml | 1 - crates/observe/src/distributed_tracing/mod.rs | 1 - .../src/distributed_tracing/tracing_warp.rs | 23 - crates/orderbook/Cargo.toml | 4 +- crates/orderbook/openapi.yml | 16 +- crates/orderbook/src/api.rs | 536 ++++++++++-------- crates/orderbook/src/api/cancel_order.rs | 156 +++-- crates/orderbook/src/api/cancel_orders.rs | 59 +- crates/orderbook/src/api/get_app_data.rs | 66 +-- crates/orderbook/src/api/get_auction.rs | 49 +- crates/orderbook/src/api/get_native_price.rs | 66 +-- crates/orderbook/src/api/get_order_by_uid.rs | 66 +-- crates/orderbook/src/api/get_order_status.rs | 55 +- crates/orderbook/src/api/get_orders_by_tx.rs | 61 +- .../src/api/get_solver_competition.rs | 158 +++--- .../src/api/get_solver_competition_v2.rs | 109 ++-- .../orderbook/src/api/get_token_metadata.rs | 47 +- crates/orderbook/src/api/get_total_surplus.rs | 55 +- crates/orderbook/src/api/get_trades.rs | 131 ++--- crates/orderbook/src/api/get_trades_v2.rs | 189 +++--- crates/orderbook/src/api/get_user_orders.rs | 107 ++-- crates/orderbook/src/api/post_order.rs | 332 +++++------ crates/orderbook/src/api/post_quote.rs | 139 ++--- crates/orderbook/src/api/put_app_data.rs | 112 ++-- crates/orderbook/src/api/version.rs | 17 +- crates/orderbook/src/app_data.rs | 19 +- crates/orderbook/src/run.rs | 52 +- crates/orderbook/src/solver_competition.rs | 2 +- crates/solvers/src/api/routes/healthz.rs | 9 +- playground/docker-compose.non-interactive.yml | 2 +- 38 files changed, 1295 insertions(+), 1474 deletions(-) delete mode 100644 crates/observe/src/distributed_tracing/tracing_warp.rs diff --git a/Cargo.lock b/Cargo.lock index 540b74e1fe..422f11eeb6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3364,30 +3364,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "headers" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06683b93020a07e3dbcf5f8c0f6d40080d725bea7936fc01ad345c01b97dc270" -dependencies = [ - "base64 0.21.7", - "bytes", - "headers-core", - "http 0.2.12", - "httpdate", - "mime", - "sha1", -] - -[[package]] -name = "headers-core" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" -dependencies = [ - "http 0.2.12", -] - [[package]] name = "heck" version = "0.4.1" @@ -4231,16 +4207,6 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" -[[package]] -name = "mime_guess" -version = "2.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" -dependencies = [ - "mime", - "unicase", -] - [[package]] name = "minimal-lexical" version = "0.2.1" @@ -4583,7 +4549,6 @@ dependencies = [ "tracing-opentelemetry", "tracing-serde", "tracing-subscriber", - "warp", ] [[package]] @@ -4743,6 +4708,7 @@ dependencies = [ "anyhow", "app-data", "async-trait", + "axum", "bigdecimal", "cached", "chain", @@ -4776,10 +4742,11 @@ dependencies = [ "thiserror 1.0.69", "tikv-jemallocator", "tokio", + "tower 0.4.13", + "tower-http 0.4.4", "tracing", "url", "vergen", - "warp", ] [[package]] @@ -5871,12 +5838,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "scoped-tls" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" - [[package]] name = "scopeguard" version = "1.2.0" @@ -7408,12 +7369,6 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" -[[package]] -name = "unicase" -version = "2.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbc4bc3a9f746d862c45cb89d705aa10f187bb96c76001afab07a0d35ce60142" - [[package]] name = "unicode-bidi" version = "0.3.18" @@ -7567,32 +7522,6 @@ dependencies = [ "try-lock", ] -[[package]] -name = "warp" -version = "0.3.7" -source = "git+https://github.com/cowprotocol/warp.git?rev=586244e#586244eabb564b9f9573436ee0e23edfc73f4861" -dependencies = [ - "bytes", - "futures-channel", - "futures-util", - "headers", - "http 0.2.12", - "hyper 0.14.32", - "log", - "mime", - "mime_guess", - "percent-encoding", - "pin-project", - "scoped-tls", - "serde", - "serde_json", - "serde_urlencoded", - "tokio", - "tokio-util", - "tower-service", - "tracing", -] - [[package]] name = "wasi" version = "0.11.1+wasi-snapshot-preview1" diff --git a/Cargo.toml b/Cargo.toml index 1853a4e068..57ed351635 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -53,7 +53,6 @@ tokio-stream = { version = "0.1.15", features = ["sync"] } tracing = "0.1.41" tracing-subscriber = { version = "0.3.19", features = ["json"] } url = "2.5.0" -warp = { git = 'https://github.com/cowprotocol/warp.git', rev = "586244e", default-features = false } app-data = { path = "crates/app-data" } arc-swap = "1.7.1" async-stream = "0.3.5" diff --git a/crates/driver/src/infra/api/routes/healthz.rs b/crates/driver/src/infra/api/routes/healthz.rs index 157f557a93..62fc74b469 100644 --- a/crates/driver/src/infra/api/routes/healthz.rs +++ b/crates/driver/src/infra/api/routes/healthz.rs @@ -1,9 +1,13 @@ -use axum::{http::StatusCode, response::IntoResponse, routing::get}; +use axum::{ + http::StatusCode, + response::{IntoResponse, Response}, + routing::get, +}; pub(in crate::infra::api) fn healthz(app: axum::Router<()>) -> axum::Router<()> { app.route("/healthz", get(route)) } -async fn route() -> impl IntoResponse { - StatusCode::OK +async fn route() -> Response { + StatusCode::OK.into_response() } diff --git a/crates/driver/src/tests/setup/orderbook.rs b/crates/driver/src/tests/setup/orderbook.rs index 440b6ec9cf..8b3da45a3a 100644 --- a/crates/driver/src/tests/setup/orderbook.rs +++ b/crates/driver/src/tests/setup/orderbook.rs @@ -7,7 +7,7 @@ use { Router, extract::Path, http::StatusCode, - response::IntoResponse, + response::{IntoResponse, Response}, routing::get, }, std::{collections::HashMap, net::SocketAddr}, @@ -59,7 +59,7 @@ impl Orderbook { async fn app_data_handler( Path(app_data): Path, Extension(app_data_storage): Extension>, - ) -> impl IntoResponse { + ) -> Response { tracing::debug!("Orderbook received an app_data request: {}", app_data); let app_data_hash = match app_data.parse::() { diff --git a/crates/e2e/src/setup/proxy.rs b/crates/e2e/src/setup/proxy.rs index 9173213c26..85474f8ae3 100644 --- a/crates/e2e/src/setup/proxy.rs +++ b/crates/e2e/src/setup/proxy.rs @@ -12,7 +12,12 @@ //! cluster. use { - axum::{Router, body::Body, http::Request, response::IntoResponse}, + axum::{ + Router, + body::Body, + http::Request, + response::{IntoResponse, Response}, + }, hyper::body::to_bytes, std::{collections::VecDeque, net::SocketAddr, sync::Arc}, tokio::{sync::RwLock, task::JoinHandle}, @@ -109,7 +114,7 @@ async fn handle_request( client: reqwest::Client, state: ProxyState, req: Request, -) -> impl IntoResponse { +) -> Response { let (parts, body) = req.into_parts(); // Convert body to bytes once for reuse across retries diff --git a/crates/e2e/tests/e2e/malformed_requests.rs b/crates/e2e/tests/e2e/malformed_requests.rs index 8dc9c09fab..fb9aff9df4 100644 --- a/crates/e2e/tests/e2e/malformed_requests.rs +++ b/crates/e2e/tests/e2e/malformed_requests.rs @@ -119,13 +119,18 @@ async fn http_validation(web3: Web3) { } // Test malformed auction IDs - let invalid_auction_ids: Vec<(&str, &str)> = vec![ - ("not-a-number", "non-numeric"), - ("-1", "negative number"), - ("99999999999999999999999", "u64 overflow"), - ]; - - for (id, description) in invalid_auction_ids { + // Note: "-1" returns 404 because it doesn't match the u64 route pattern at all, + // while non-numeric strings return 400 as they match the path but fail + // deserialization + for (id, description, expected_status) in [ + ("not-a-number", "non-numeric", StatusCode::NOT_FOUND), + ("-1", "negative number", StatusCode::NOT_FOUND), + ( + "99999999999999999999999", + "u64 overflow", + StatusCode::NOT_FOUND, + ), + ] { let response = client .get(format!("{API_HOST}/api/v1/solver_competition/{id}")) .send() @@ -134,8 +139,8 @@ async fn http_validation(web3: Web3) { assert_eq!( response.status(), - StatusCode::NOT_FOUND, - "Expected 404 for invalid AuctionId ({description}): {id}" + expected_status, + "Expected {expected_status} for invalid AuctionId ({description}): {id}" ); } diff --git a/crates/e2e/tests/e2e/replace_order.rs b/crates/e2e/tests/e2e/replace_order.rs index ef76ccdcc6..c1c8d65cff 100644 --- a/crates/e2e/tests/e2e/replace_order.rs +++ b/crates/e2e/tests/e2e/replace_order.rs @@ -23,7 +23,7 @@ fn parse_order_replacement_error(status: StatusCode, body: &str) -> Option match error.error_type { + StatusCode::BAD_REQUEST => match error.error_type.as_ref() { "InvalidSignature" => Some(OrderReplacementError::InvalidSignature), "OldOrderActivelyBidOn" => Some(OrderReplacementError::OldOrderActivelyBidOn), _ => None, @@ -46,7 +46,7 @@ fn parse_order_cancellation_error( let error: ApiError = serde_json::from_str(body).ok()?; match status { - StatusCode::BAD_REQUEST => match error.error_type { + StatusCode::BAD_REQUEST => match error.error_type.as_ref() { "InvalidSignature" => Some(OrderCancellationError::InvalidSignature), "AlreadyCancelled" => Some(OrderCancellationError::AlreadyCancelled), "OrderFullyExecuted" => Some(OrderCancellationError::OrderFullyExecuted), diff --git a/crates/model/src/order.rs b/crates/model/src/order.rs index e27279e540..cd4085a9fd 100644 --- a/crates/model/src/order.rs +++ b/crates/model/src/order.rs @@ -731,6 +731,8 @@ pub struct OrderMetadata { pub quote: Option, } +pub const ORDER_UID_LIMIT: usize = 1024; + // uid as 56 bytes: 32 for orderDigest, 20 for ownerAddress and 4 for validTo #[derive(Clone, Copy, Eq, Hash, PartialEq, PartialOrd, Ord)] pub struct OrderUid(pub [u8; 56]); diff --git a/crates/observe/Cargo.toml b/crates/observe/Cargo.toml index 91578c049f..ee99edca35 100644 --- a/crates/observe/Cargo.toml +++ b/crates/observe/Cargo.toml @@ -26,7 +26,6 @@ tracing = { workspace = true } tracing-opentelemetry = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter", "fmt", "time"] } tracing-serde = { workspace = true } -warp = { workspace = true } jemalloc_pprof = { workspace = true } [lints] diff --git a/crates/observe/src/distributed_tracing/mod.rs b/crates/observe/src/distributed_tracing/mod.rs index 03d8ca7d3c..6bb9302c14 100644 --- a/crates/observe/src/distributed_tracing/mod.rs +++ b/crates/observe/src/distributed_tracing/mod.rs @@ -1,4 +1,3 @@ pub mod request_id; pub mod trace_id_format; pub mod tracing_axum; -pub mod tracing_warp; diff --git a/crates/observe/src/distributed_tracing/tracing_warp.rs b/crates/observe/src/distributed_tracing/tracing_warp.rs deleted file mode 100644 index 7688397081..0000000000 --- a/crates/observe/src/distributed_tracing/tracing_warp.rs +++ /dev/null @@ -1,23 +0,0 @@ -use { - crate::{distributed_tracing::request_id::request_id, tracing::HeaderExtractor}, - opentelemetry::global, - tracing::info, - tracing_opentelemetry::OpenTelemetrySpanExt, - warp::http::HeaderMap, -}; - -pub fn make_span(info: warp::trace::Info) -> tracing::Span { - let headers: &HeaderMap = info.request_headers(); - - // Extract OTEL context from headers - let parent_cx = global::get_text_map_propagator(|prop| prop.extract(&HeaderExtractor(headers))); - - let span = tracing::info_span!("http_request", request_id = %request_id(headers)); - span.set_parent(parent_cx); // sets parent context for distributed trace - { - let _span = span.enter(); - info!(method = %info.method(), path = %info.path(), "HTTP request"); - } - - span -} diff --git a/crates/orderbook/Cargo.toml b/crates/orderbook/Cargo.toml index 1f26181049..936b89986d 100644 --- a/crates/orderbook/Cargo.toml +++ b/crates/orderbook/Cargo.toml @@ -20,6 +20,7 @@ alloy = { workspace = true } anyhow = { workspace = true } app-data = { workspace = true } async-trait = { workspace = true } +axum = { workspace = true } bigdecimal = { workspace = true } cached = { workspace = true } chain = { workspace = true } @@ -52,9 +53,10 @@ strum = { workspace = true } sqlx = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["macros", "rt-multi-thread", "signal", "sync", "time"] } +tower = { workspace = true } +tower-http = { workspace = true, features = ["cors", "trace"] } tracing = { workspace = true } url = { workspace = true } -warp = { workspace = true } [dev-dependencies] mockall = { workspace = true } diff --git a/crates/orderbook/openapi.yml b/crates/orderbook/openapi.yml index 79a07a488a..9434a62d98 100644 --- a/crates/orderbook/openapi.yml +++ b/crates/orderbook/openapi.yml @@ -81,6 +81,8 @@ paths: description: "Forbidden, your account is deny-listed." "404": description: No route was found quoting the order. + "422": + description: Unable to parse request body as valid JSON. "429": description: Too many order placements. "500": @@ -121,6 +123,8 @@ paths: description: Invalid signature. "404": description: One or more orders were not found and no orders were cancelled. + "422": + description: Unable to parse request body as valid JSON. "/api/v1/orders/{UID}": get: operationId: getOrder @@ -177,6 +181,8 @@ paths: description: Invalid signature. "404": description: Order was not found. + "422": + description: Unable to parse request body as valid JSON. "/api/v1/orders/{UID}/status": get: operationId: getOrderStatus @@ -437,6 +443,8 @@ paths: $ref: "#/components/schemas/PriceEstimationError" "404": description: No route was found for the specified order. + "422": + description: Unable to parse request body as valid JSON. "429": description: Too many order quotes. "500": @@ -626,6 +634,8 @@ paths: $ref: "#/components/schemas/AppDataHash" "400": description: Error validating full `appData` + "422": + description: Unable to parse request body as valid JSON. "500": description: Error storing the full `appData` /api/v1/app_data: @@ -657,6 +667,8 @@ paths: $ref: "#/components/schemas/AppDataHash" "400": description: Error validating full `appData` + "422": + description: Unable to parse request body as valid JSON. "500": description: Error storing the full `appData` "/api/v1/users/{address}/total_surplus": @@ -1643,7 +1655,9 @@ components: - UnsupportedToken - InvalidAppData - AppDataHashMismatch + - AppDataMismatch - AppdataFromMismatch + - MetadataSerializationFailed - OldOrderActivelyBidOn description: type: string @@ -1676,7 +1690,7 @@ components: enum: - QuoteNotVerified - UnsupportedToken - - ZeroAmount + - NoLiquidity - UnsupportedOrderType description: type: string diff --git a/crates/orderbook/src/api.rs b/crates/orderbook/src/api.rs index 0fa277a975..7123b5a0e0 100644 --- a/crates/orderbook/src/api.rs +++ b/crates/orderbook/src/api.rs @@ -1,23 +1,30 @@ use { - crate::{app_data, database::Postgres, orderbook::Orderbook, quoter::QuoteHandler}, - anyhow::Result, - observe::distributed_tracing::tracing_warp::make_span, - serde::{Deserialize, Serialize, de::DeserializeOwned}, + crate::{ + app_data, + database::Postgres, + orderbook::Orderbook, + quoter::QuoteHandler, + solver_competition::LoadSolverCompetitionError, + }, + axum::{ + Router, + extract::DefaultBodyLimit, + http::{Request, StatusCode}, + middleware::{self, Next}, + response::{IntoResponse, Json, Response}, + }, + hyper::header::USER_AGENT, + observe::distributed_tracing::tracing_axum::{self, record_trace_id}, + serde::{Deserialize, Serialize}, shared::price_estimation::{PriceEstimationError, native::NativePriceEstimating}, std::{ - convert::Infallible, + borrow::Cow, fmt::Debug, sync::Arc, time::{Duration, Instant}, }, - warp::{ - Filter, - Rejection, - Reply, - filters::BoxedFilter, - hyper::StatusCode, - reply::{Json, WithStatus, json, with_status}, - }, + tower::ServiceBuilder, + tower_http::{cors::CorsLayer, trace::TraceLayer}, }; mod cancel_order; @@ -40,126 +47,250 @@ mod post_quote; mod put_app_data; mod version; +const ALLOWED_METHODS: &[axum::http::Method] = &[ + axum::http::Method::GET, + axum::http::Method::POST, + axum::http::Method::DELETE, + axum::http::Method::OPTIONS, + axum::http::Method::PUT, + axum::http::Method::PATCH, + axum::http::Method::HEAD, +]; + +/// Centralized application state shared across all API handlers +pub struct AppState { + pub database_write: Postgres, + pub database_read: Postgres, + pub orderbook: Arc, + pub quotes: QuoteHandler, + pub app_data: Arc, + pub native_price_estimator: Arc, + pub quote_timeout: Duration, +} + +async fn summarize_request( + req: Request, + next: Next, +) -> Response { + let method = req.method().to_string(); + let uri = req.uri().to_string(); + + let user_agent = req + .headers() + .get(USER_AGENT) + .map(|user_agent| user_agent.to_str().unwrap_or("invalid (non-ASCII)")) + .unwrap_or("unset") + .to_string(); + + let timer = Instant::now(); + let response = next.run(req).await; + let status = response.status().as_u16(); + + tracing::info!( + method, + uri, + user_agent, + status, + elapsed = ?timer.elapsed(), + "request_summary", + ); + + response +} + +/// Middleware that automatically tracks metrics using Axum's MatchedPath +async fn with_matched_path_metric( + req: Request, + next: Next, +) -> Response { + let metrics = ApiMetrics::instance(observe::metrics::get_storage_registry()).unwrap(); + + // Extract matched path and HTTP method + let matched_path = req + .extensions() + .get::() + .map(|path| path.as_str()) + .unwrap_or("unknown") + .to_string(); + + let response = { + let _timer = metrics + .requests_duration_seconds + .with_label_values(&[&matched_path]) + .start_timer(); + next.run(req).await + }; + let status = response.status(); + + // Track completed requests + metrics + .requests_complete + .with_label_values(&[&matched_path, status.as_str()]) + .inc(); + + // Track rejected requests (4xx and 5xx status codes) + if status.is_client_error() || status.is_server_error() { + metrics + .requests_rejected + .with_label_values(&[status.as_str()]) + .inc(); + } + + response +} + +const MAX_JSON_BODY_PAYLOAD: u64 = 1024 * 16; + pub fn handle_all_routes( database_write: Postgres, database_read: Postgres, orderbook: Arc, - quotes: Arc, + quotes: QuoteHandler, app_data: Arc, native_price_estimator: Arc, quote_timeout: Duration, -) -> impl Filter + Clone { - // Note that we add a string with endpoint's name to all responses. - // This string will be used later to report metrics. - // It is not used to form the actual server response. - - let routes = vec![ +) -> Router { + let app_data_size_limit = app_data.size_limit(); + + let state = Arc::new(AppState { + database_write, + database_read, + orderbook, + quotes, + app_data, + native_price_estimator, + quote_timeout, + }); + + let routes = [ + // V1 routes + ( + "/api/v1/account/:owner/orders", + axum::routing::get(get_user_orders::get_user_orders_handler), + ), + ( + "/api/v1/app_data", + axum::routing::put(put_app_data::put_app_data_without_hash) + .layer(DefaultBodyLimit::max(app_data_size_limit)), + ), ( - "v1/create_order", - box_filter(post_order::post_order(orderbook.clone())), + "/api/v1/app_data/:hash", + axum::routing::get(get_app_data::get_app_data_handler).merge( + axum::routing::put(put_app_data::put_app_data_with_hash) + .layer(DefaultBodyLimit::max(app_data_size_limit)), + ), ), ( - "v1/get_order", - box_filter(get_order_by_uid::get_order_by_uid(orderbook.clone())), + "/api/v1/auction", + axum::routing::get(get_auction::get_auction_handler), ), ( - "v1/get_order_status", - box_filter(get_order_status::get_status(orderbook.clone())), + "/api/v1/orders", + axum::routing::post(post_order::post_order_handler) + .merge(axum::routing::delete(cancel_orders::cancel_orders_handler)), ), ( - "v1/get_trades", - box_filter(get_trades::get_trades(database_read.clone())), + "/api/v1/orders/:uid", + axum::routing::get(get_order_by_uid::get_order_by_uid_handler) + .merge(axum::routing::delete(cancel_order::cancel_order_handler)), ), ( - "v2/get_trades", - box_filter(get_trades_v2::get_trades(database_read.clone())), + "/api/v1/orders/:uid/status", + axum::routing::get(get_order_status::get_status_handler), ), ( - "v1/cancel_order", - box_filter(cancel_order::cancel_order(orderbook.clone())), + "/api/v1/quote", + axum::routing::post(post_quote::post_quote_handler), ), + // /solver_competition routes (specific before parameterized) ( - "v1/cancel_orders", - box_filter(cancel_orders::filter(orderbook.clone())), + "/api/v1/solver_competition/latest", + axum::routing::get(get_solver_competition::get_solver_competition_latest_handler), ), ( - "v1/get_user_orders", - box_filter(get_user_orders::get_user_orders(orderbook.clone())), + "/api/v1/solver_competition/by_tx_hash/:tx_hash", + axum::routing::get(get_solver_competition::get_solver_competition_by_hash_handler), ), ( - "v1/get_orders_by_tx", - box_filter(get_orders_by_tx::get_orders_by_tx(orderbook.clone())), + "/api/v1/solver_competition/:auction_id", + axum::routing::get(get_solver_competition::get_solver_competition_by_id_handler), ), - ("v1/post_quote", box_filter(post_quote::post_quote(quotes))), ( - "v1/auction", - box_filter(get_auction::get_auction(orderbook.clone())), + "/api/v1/token/:token/metadata", + axum::routing::get(get_token_metadata::get_token_metadata_handler), ), ( - "v1/solver_competition", - box_filter(get_solver_competition::get(Arc::new( - database_write.clone(), - ))), + "/api/v1/token/:token/native_price", + axum::routing::get(get_native_price::get_native_price_handler), ), ( - "v2/solver_competition", - box_filter(get_solver_competition_v2::get(database_write.clone())), + "/api/v1/trades", + axum::routing::get(get_trades::get_trades_handler), ), ( - "v1/solver_competition/latest", - box_filter(get_solver_competition::get_latest(Arc::new( - database_write.clone(), - ))), + "/api/v1/transactions/:hash/orders", + axum::routing::get(get_orders_by_tx::get_orders_by_tx_handler), ), ( - "v2/solver_competition/latest", - box_filter(get_solver_competition_v2::get_latest( - database_write.clone(), - )), + "/api/v1/users/:user/total_surplus", + axum::routing::get(get_total_surplus::get_total_surplus_handler), ), - ("v1/version", box_filter(version::version())), ( - "v1/get_native_price", - box_filter(get_native_price::get_native_price( - native_price_estimator, - quote_timeout, - )), + "/api/v1/version", + axum::routing::get(version::version_handler), ), + // V2 routes + // /solver_competition routes (specific before parameterized) ( - "v1/get_app_data", - get_app_data::get(database_read.clone()).boxed(), + "/api/v2/solver_competition/latest", + axum::routing::get(get_solver_competition_v2::get_solver_competition_latest_handler), ), ( - "v1/put_app_data", - box_filter(put_app_data::filter(app_data)), + "/api/v2/solver_competition/by_tx_hash/:tx_hash", + axum::routing::get(get_solver_competition_v2::get_solver_competition_by_hash_handler), ), ( - "v1/get_total_surplus", - box_filter(get_total_surplus::get(database_read.clone())), + "/api/v2/solver_competition/:auction_id", + axum::routing::get(get_solver_competition_v2::get_solver_competition_by_id_handler), ), ( - "v1/get_token_metadata", - box_filter(get_token_metadata::get_token_metadata(database_read)), + "/api/v2/trades", + axum::routing::get(get_trades_v2::get_trades_handler), ), ]; - finalize_router(routes, "orderbook::api::request_summary") -} - -pub type ApiReply = WithStatus; - -// We turn Rejection into Reply to workaround warp not setting CORS headers on -// rejections. -async fn handle_rejection(err: Rejection) -> Result { - let response = err.default_response(); - + // Initialize metrics let metrics = ApiMetrics::instance(observe::metrics::get_storage_registry()).unwrap(); - metrics - .requests_rejected - .with_label_values(&[response.status().as_str()]) - .inc(); + metrics.reset_requests_rejected(); - Ok(response) + let mut api_router = Router::new(); + for (path, method_router) in routes { + metrics.reset_requests_complete(path); + api_router = api_router.route(path, method_router); + } + let api_router = api_router.with_state(state); + + let cors = CorsLayer::new() + .allow_origin(tower_http::cors::Any) + .allow_methods(ALLOWED_METHODS.to_vec()) + .allow_headers(vec![ + axum::http::header::ORIGIN, + axum::http::header::CONTENT_TYPE, + // Must be lower case due to the HTTP-2 spec + axum::http::HeaderName::from_static("x-auth-token"), + axum::http::HeaderName::from_static("x-appid"), + ]); + + api_router + .layer(DefaultBodyLimit::max(MAX_JSON_BODY_PAYLOAD as usize)) + .layer(cors) + .layer(middleware::from_fn(summarize_request)) + .layer(middleware::from_fn(with_matched_path_metric)) + .layer( + ServiceBuilder::new() + .layer(TraceLayer::new_for_http().make_span_with(tracing_axum::make_span)) + .map_request(record_trace_id), + ) } #[derive(prometheus_metric_storage::MetricStorage, Clone, Debug)] @@ -200,43 +331,38 @@ impl ApiMetrics { } } - fn reset_requests_complete(&self, method: &str) { + fn reset_requests_complete(&self, path: &str) { for status in Self::INITIAL_STATUSES { self.requests_complete - .with_label_values(&[method, status.as_str()]) + .with_label_values(&[path, status.as_str()]) .reset(); } } - - fn on_request_completed(&self, method: &str, status: StatusCode, timer: Instant) { - self.requests_complete - .with_label_values(&[method, status.as_str()]) - .inc(); - self.requests_duration_seconds - .with_label_values(&[method]) - .observe(timer.elapsed().as_secs_f64()); - } } #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] -pub struct Error<'a> { - pub error_type: &'a str, - pub description: &'a str, +pub struct Error { + pub error_type: Cow<'static, str>, + pub description: Cow<'static, str>, /// Additional arbitrary data that can be attached to an API error. #[serde(skip_serializing_if = "Option::is_none")] pub data: Option, } -pub fn error(error_type: &str, description: impl AsRef) -> Json { - json(&Error { - error_type, - description: description.as_ref(), +pub fn error(error_type: &'static str, description: impl AsRef) -> Json { + Json(Error { + error_type: error_type.into(), + description: Cow::Owned(description.as_ref().to_owned()), data: None, }) } -pub fn rich_error(error_type: &str, description: impl AsRef, data: impl Serialize) -> Json { +pub fn rich_error( + error_type: &'static str, + description: impl AsRef, + data: impl Serialize, +) -> Json { let data = match serde_json::to_value(&data) { Ok(value) => Some(value), Err(err) => { @@ -245,146 +371,52 @@ pub fn rich_error(error_type: &str, description: impl AsRef, data: impl Ser } }; - json(&Error { - error_type, - description: description.as_ref(), + Json(Error { + error_type: error_type.into(), + description: Cow::Owned(description.as_ref().to_owned()), data, }) } -pub fn internal_error_reply() -> ApiReply { - with_status( - error("InternalServerError", ""), +pub fn internal_error_reply() -> Response { + ( StatusCode::INTERNAL_SERVER_ERROR, + error("InternalServerError", ""), ) + .into_response() } -pub fn convert_json_response(result: Result) -> WithStatus -where - T: Serialize, - E: IntoWarpReply + Debug, -{ - match result { - Ok(response) => with_status(warp::reply::json(&response), StatusCode::OK), - Err(err) => err.into_warp_reply(), - } -} - -pub trait IntoWarpReply { - fn into_warp_reply(self) -> ApiReply; -} +// Newtype wrapper for PriceEstimationError to allow IntoResponse implementation +// (orphan rules prevent implementing IntoResponse directly on external types) +pub(crate) struct PriceEstimationErrorWrapper(pub(crate) PriceEstimationError); -pub async fn response_body(response: warp::hyper::Response) -> Vec { - let mut body = response.into_body(); - let mut result = Vec::new(); - while let Some(bytes) = futures::StreamExt::next(&mut body).await { - result.extend_from_slice(bytes.unwrap().as_ref()); - } - result -} - -const MAX_JSON_BODY_PAYLOAD: u64 = 1024 * 16; - -pub fn extract_payload() --> impl Filter + Clone { - // (rejecting huge payloads)... - extract_payload_with_max_size(MAX_JSON_BODY_PAYLOAD) -} - -pub fn extract_payload_with_max_size( - max_size: u64, -) -> impl Filter + Clone { - warp::body::content_length_limit(max_size).and(warp::body::json()) -} - -pub type BoxedRoute = BoxedFilter<(Box,)>; - -pub fn box_filter(filter: Filter_) -> BoxedFilter<(Box,)> -where - Filter_: Filter + Send + Sync + 'static, - Reply_: Reply + Send + 'static, -{ - filter.map(|a| Box::new(a) as Box).boxed() -} - -/// Sets up basic metrics, cors and proper log tracing for all routes. -/// -/// # Panics -/// -/// This method panics if `routes` is empty. -pub fn finalize_router( - routes: Vec<(&'static str, BoxedRoute)>, - log_prefix: &'static str, -) -> impl Filter + Clone { - let metrics = ApiMetrics::instance(observe::metrics::get_storage_registry()).unwrap(); - metrics.reset_requests_rejected(); - for (method, _) in &routes { - metrics.reset_requests_complete(method); - } - - let router = routes - .into_iter() - .fold( - Option::)>>::None, - |router, (method, route)| { - let route = route.map(move |result| (method, result)).untuple_one(); - let next = match router { - Some(router) => router.or(route).unify().boxed(), - None => route.boxed(), - }; - Some(next) - }, - ) - .expect("routes cannot be empty"); - - let instrumented = - warp::any() - .map(Instant::now) - .and(router) - .map(|timer, method, reply: Box| { - let response = reply.into_response(); - metrics.on_request_completed(method, response.status(), timer); - response - }); - - // Final setup - let cors = warp::cors() - .allow_any_origin() - .allow_methods(vec![ - "GET", "POST", "DELETE", "OPTIONS", "PUT", "PATCH", "HEAD", - ]) - .allow_headers(vec!["Origin", "Content-Type", "X-Auth-Token", "X-AppId"]); - - warp::path!("api" / ..) - .and(instrumented) - .recover(handle_rejection) - .with(cors) - .with(warp::log::log(log_prefix)) - .with(warp::trace::trace(make_span)) -} - -impl IntoWarpReply for PriceEstimationError { - fn into_warp_reply(self) -> WithStatus { - match self { - Self::UnsupportedToken { token, reason } => with_status( +impl IntoResponse for PriceEstimationErrorWrapper { + fn into_response(self) -> Response { + match self.0 { + PriceEstimationError::UnsupportedToken { token, reason } => ( + StatusCode::BAD_REQUEST, error( "UnsupportedToken", format!("Token {token:?} is unsupported: {reason:}"), ), + ) + .into_response(), + PriceEstimationError::UnsupportedOrderType(order_type) => ( StatusCode::BAD_REQUEST, - ), - Self::UnsupportedOrderType(order_type) => with_status( error( "UnsupportedOrderType", format!("{order_type} not supported"), ), - StatusCode::BAD_REQUEST, - ), - Self::NoLiquidity | Self::RateLimited | Self::EstimatorInternal(_) => with_status( - error("NoLiquidity", "no route found"), + ) + .into_response(), + PriceEstimationError::NoLiquidity + | PriceEstimationError::RateLimited + | PriceEstimationError::EstimatorInternal(_) => ( StatusCode::NOT_FOUND, - ), - Self::ProtocolInternal(err) => { + error("NoLiquidity", "no route found"), + ) + .into_response(), + PriceEstimationError::ProtocolInternal(err) => { tracing::error!(?err, "PriceEstimationError::Other"); internal_error_reply() } @@ -392,6 +424,36 @@ impl IntoWarpReply for PriceEstimationError { } } +impl IntoResponse for LoadSolverCompetitionError { + fn into_response(self) -> Response { + match self { + err @ LoadSolverCompetitionError::NotFound => { + (StatusCode::NOT_FOUND, error("NotFound", err.to_string())).into_response() + } + LoadSolverCompetitionError::Other(err) => { + tracing::error!(?err, "failed to load solver competition"); + internal_error_reply() + } + } + } +} + +#[cfg(test)] +pub async fn response_body(response: axum::http::Response) -> Vec +where + B: axum::body::HttpBody + Unpin, + B::Data: AsRef<[u8]>, + B::Error: Debug, +{ + let mut body = response.into_body(); + let mut result = Vec::new(); + while let Some(frame) = body.data().await { + let bytes = frame.unwrap(); + result.extend_from_slice(bytes.as_ref()); + } + result +} + #[cfg(test)] mod tests { use {super::*, serde::ser, serde_json::json}; @@ -400,8 +462,8 @@ mod tests { fn rich_errors_skip_unset_data_field() { assert_eq!( serde_json::to_value(&Error { - error_type: "foo", - description: "bar", + error_type: "foo".into(), + description: "bar".into(), data: None, }) .unwrap(), @@ -412,8 +474,8 @@ mod tests { ); assert_eq!( serde_json::to_value(Error { - error_type: "foo", - description: "bar", + error_type: "foo".into(), + description: "bar".into(), data: Some(json!(42)), }) .unwrap(), @@ -427,6 +489,8 @@ mod tests { #[tokio::test] async fn rich_errors_handle_serialization_errors() { + use axum::body::HttpBody; + struct AlwaysErrors; impl Serialize for AlwaysErrors { fn serialize(&self, _: S) -> Result @@ -437,16 +501,16 @@ mod tests { } } - let body = warp::hyper::body::to_bytes( - rich_error("foo", "bar", AlwaysErrors) - .into_response() - .into_body(), - ) - .await - .unwrap(); + let response = rich_error("foo", "bar", AlwaysErrors).into_response(); + let mut body = response.into_body(); + let mut bytes = Vec::new(); + while let Some(frame) = body.data().await { + let chunk = frame.unwrap(); + bytes.extend_from_slice(&chunk); + } assert_eq!( - serde_json::from_slice::(&body).unwrap(), + serde_json::from_slice::(&bytes).unwrap(), json!({ "errorType": "foo", "description": "bar", diff --git a/crates/orderbook/src/api/cancel_order.rs b/crates/orderbook/src/api/cancel_order.rs index ae141b299d..a0c331c3e9 100644 --- a/crates/orderbook/src/api/cancel_order.rs +++ b/crates/orderbook/src/api/cancel_order.rs @@ -1,60 +1,84 @@ use { - crate::{ - api::{IntoWarpReply, convert_json_response, extract_payload}, - orderbook::{OrderCancellationError, Orderbook}, + crate::{api::AppState, orderbook::OrderCancellationError}, + axum::{ + Json, + body, + extract::{Path, State}, + http::StatusCode, + response::{IntoResponse, Response}, }, - anyhow::Result, model::order::{CancellationPayload, OrderCancellation, OrderUid}, - std::{convert::Infallible, sync::Arc}, - warp::{Filter, Rejection, hyper::StatusCode, reply::with_status}, + std::{str::FromStr, sync::Arc}, }; -pub fn cancel_order_request() --> impl Filter + Clone { - warp::path!("v1" / "orders" / OrderUid) - .and(warp::delete()) - .and(extract_payload()) - .map(|uid, payload: CancellationPayload| OrderCancellation { - order_uid: uid, - signature: payload.signature, - signing_scheme: payload.signing_scheme, - }) +pub async fn cancel_order_handler( + State(state): State>, + Path(uid): Path, + body: body::Bytes, +) -> Response { + // TODO: remove after all downstream callers have been notified of the status + // code changes + let Ok(uid) = OrderUid::from_str(&uid) else { + return StatusCode::NOT_FOUND.into_response(); + }; + let Ok(payload) = serde_json::from_slice::(&body) else { + return StatusCode::BAD_REQUEST.into_response(); + }; + + let order_cancellation = OrderCancellation { + order_uid: uid, + signature: payload.signature, + signing_scheme: payload.signing_scheme, + }; + state + .orderbook + .cancel_order(order_cancellation) + .await + .map(|_| Json("Cancelled")) + .into_response() } -impl IntoWarpReply for OrderCancellationError { - fn into_warp_reply(self) -> super::ApiReply { +impl IntoResponse for OrderCancellationError { + fn into_response(self) -> Response { match self { - Self::InvalidSignature => with_status( + Self::InvalidSignature => ( + StatusCode::BAD_REQUEST, super::error("InvalidSignature", "Malformed signature"), + ) + .into_response(), + Self::AlreadyCancelled => ( StatusCode::BAD_REQUEST, - ), - Self::AlreadyCancelled => with_status( super::error("AlreadyCancelled", "Order is already cancelled"), + ) + .into_response(), + Self::OrderFullyExecuted => ( StatusCode::BAD_REQUEST, - ), - Self::OrderFullyExecuted => with_status( super::error("OrderFullyExecuted", "Order is fully executed"), + ) + .into_response(), + Self::OrderExpired => ( StatusCode::BAD_REQUEST, - ), - Self::OrderExpired => with_status( super::error("OrderExpired", "Order is expired"), - StatusCode::BAD_REQUEST, - ), - Self::OrderNotFound => with_status( - super::error("OrderNotFound", "Order not located in database"), + ) + .into_response(), + Self::OrderNotFound => ( StatusCode::NOT_FOUND, - ), - Self::WrongOwner => with_status( + super::error("OrderNotFound", "Order not located in database"), + ) + .into_response(), + Self::WrongOwner => ( + StatusCode::UNAUTHORIZED, super::error( "WrongOwner", "Signature recovery's owner doesn't match order's", ), - StatusCode::UNAUTHORIZED, - ), - Self::OnChainOrder => with_status( - super::error("OnChainOrder", "On-chain orders must be cancelled on-chain"), + ) + .into_response(), + Self::OnChainOrder => ( StatusCode::BAD_REQUEST, - ), + super::error("OnChainOrder", "On-chain orders must be cancelled on-chain"), + ) + .into_response(), Self::Other(err) => { tracing::error!(?err, "cancel_order"); crate::api::internal_error_reply() @@ -63,22 +87,6 @@ impl IntoWarpReply for OrderCancellationError { } } -pub fn cancel_order_response(result: Result<(), OrderCancellationError>) -> super::ApiReply { - convert_json_response(result.map(|_| "Cancelled")) -} - -pub fn cancel_order( - orderbook: Arc, -) -> impl Filter + Clone { - cancel_order_request().and_then(move |order| { - let orderbook = orderbook.clone(); - async move { - let result = orderbook.cancel_order(order).await; - Result::<_, Infallible>::Ok(cancel_order_response(result)) - } - }) -} - #[cfg(test)] mod tests { use { @@ -86,9 +94,10 @@ mod tests { alloy::primitives::b256, model::signature::{EcdsaSignature, EcdsaSigningScheme}, serde_json::json, - warp::{Reply, test::request}, }; + type Result = std::result::Result, OrderCancellationError>; + #[test] fn cancellation_payload_deserialization() { assert_eq!( @@ -111,57 +120,34 @@ mod tests { ); } - #[tokio::test] - async fn cancel_order_request_ok() { - let filter = cancel_order_request(); - let cancellation = OrderCancellation::default(); - - let request = request() - .path(&format!("/v1/orders/{}", cancellation.order_uid)) - .method("DELETE") - .header("content-type", "application/json") - .json(&CancellationPayload { - signature: cancellation.signature, - signing_scheme: cancellation.signing_scheme, - }); - let result = request.filter(&filter).await.unwrap(); - assert_eq!(result, cancellation); - } - #[test] fn cancel_order_response_ok() { - let response = cancel_order_response(Ok(())).into_response(); + let response = (Result::Ok(Json("Cancelled"))).into_response(); assert_eq!(response.status(), StatusCode::OK); } #[test] fn cancel_order_response_err() { - let response = - cancel_order_response(Err(OrderCancellationError::InvalidSignature)).into_response(); + let response = Result::Err(OrderCancellationError::InvalidSignature).into_response(); assert_eq!(response.status(), StatusCode::BAD_REQUEST); - let response = - cancel_order_response(Err(OrderCancellationError::OrderFullyExecuted)).into_response(); + let response = Result::Err(OrderCancellationError::OrderFullyExecuted).into_response(); assert_eq!(response.status(), StatusCode::BAD_REQUEST); - let response = - cancel_order_response(Err(OrderCancellationError::AlreadyCancelled)).into_response(); + let response = Result::Err(OrderCancellationError::AlreadyCancelled).into_response(); assert_eq!(response.status(), StatusCode::BAD_REQUEST); - let response = - cancel_order_response(Err(OrderCancellationError::OrderExpired)).into_response(); + let response = Result::Err(OrderCancellationError::OrderExpired).into_response(); assert_eq!(response.status(), StatusCode::BAD_REQUEST); - let response = - cancel_order_response(Err(OrderCancellationError::WrongOwner)).into_response(); + let response = Result::Err(OrderCancellationError::WrongOwner).into_response(); assert_eq!(response.status(), StatusCode::UNAUTHORIZED); - let response = - cancel_order_response(Err(OrderCancellationError::OrderNotFound)).into_response(); + let response = Result::Err(OrderCancellationError::OrderNotFound).into_response(); assert_eq!(response.status(), StatusCode::NOT_FOUND); - let response = cancel_order_response(Err(OrderCancellationError::Other( - anyhow::Error::msg("test error"), + let response = Result::Err(OrderCancellationError::Other(anyhow::Error::msg( + "test error", ))) .into_response(); assert_eq!(response.status(), StatusCode::INTERNAL_SERVER_ERROR); diff --git a/crates/orderbook/src/api/cancel_orders.rs b/crates/orderbook/src/api/cancel_orders.rs index b252fd8dae..4c6f8e47e5 100644 --- a/crates/orderbook/src/api/cancel_orders.rs +++ b/crates/orderbook/src/api/cancel_orders.rs @@ -1,32 +1,41 @@ use { - crate::{ - api::{convert_json_response, extract_payload}, - orderbook::{OrderCancellationError, Orderbook}, + crate::{api::AppState, orderbook::OrderCancellationError}, + anyhow::anyhow, + axum::{ + Json, + body, + extract::State, + response::{IntoResponse, Response}, }, - anyhow::Result, - model::order::SignedOrderCancellations, - std::{convert::Infallible, sync::Arc}, - warp::{Filter, Rejection}, + hyper::StatusCode, + model::order::{ORDER_UID_LIMIT, SignedOrderCancellations}, + std::sync::Arc, }; -pub fn request() -> impl Filter + Clone { - warp::path!("v1" / "orders") - .and(warp::delete()) - .and(extract_payload()) -} +pub async fn cancel_orders_handler( + State(state): State>, + body: body::Bytes, +) -> Response { + // TODO: remove after all downstream callers have been notified of the status + // code changes + let Ok(cancellations) = serde_json::from_slice::(&body) else { + return StatusCode::BAD_REQUEST.into_response(); + }; -pub fn response(result: Result<(), OrderCancellationError>) -> super::ApiReply { - convert_json_response(result.map(|_| "Cancelled")) -} + // Explicitly limit the number of orders cancelled in a batch as the request + // size limit *does not* provide a proper bound for this + if cancellations.data.order_uids.len() > ORDER_UID_LIMIT { + return Err::<&'static str, _>(OrderCancellationError::Other(anyhow!( + "too many orders ({} > 1024)", + cancellations.data.order_uids.len() + ))) + .into_response(); + } -pub fn filter( - orderbook: Arc, -) -> impl Filter + Clone { - request().and_then(move |cancellations| { - let orderbook = orderbook.clone(); - async move { - let result = orderbook.cancel_orders(cancellations).await; - Result::<_, Infallible>::Ok(response(result)) - } - }) + state + .orderbook + .cancel_orders(cancellations) + .await + .map(|_| Json("Cancelled")) + .into_response() } diff --git a/crates/orderbook/src/api/get_app_data.rs b/crates/orderbook/src/api/get_app_data.rs index 21c3a099e2..68ff733611 100644 --- a/crates/orderbook/src/api/get_app_data.rs +++ b/crates/orderbook/src/api/get_app_data.rs @@ -1,42 +1,38 @@ use { - crate::database::Postgres, - anyhow::Result, + crate::api::AppState, app_data::{AppDataDocument, AppDataHash}, - reqwest::StatusCode, - std::convert::Infallible, - warp::{Filter, Rejection, Reply, reply}, + axum::{ + extract::{Path, State}, + http::StatusCode, + response::{IntoResponse, Json, Response}, + }, + std::{str::FromStr, sync::Arc}, }; -pub fn request() -> impl Filter + Clone { - warp::path!("v1" / "app_data" / AppDataHash).and(warp::get()) -} +pub async fn get_app_data_handler( + State(state): State>, + Path(contract_app_data): Path, +) -> Response { + let Ok(contract_app_data) = AppDataHash::from_str(&contract_app_data) else { + return StatusCode::NOT_FOUND.into_response(); + }; -pub fn get( - database: Postgres, -) -> impl Filter,), Error = Rejection> + Clone { - request().and_then(move |contract_app_data: AppDataHash| { - let database = database.clone(); - async move { - let result = database.get_full_app_data(&contract_app_data).await; - Result::<_, Infallible>::Ok(match result { - Ok(Some(response)) => { - let response = reply::with_status( - reply::json(&AppDataDocument { - full_app_data: response, - }), - StatusCode::OK, - ); - Box::new(response) as Box - } - Ok(None) => Box::new(reply::with_status( - "full app data not found", - StatusCode::NOT_FOUND, - )), - Err(err) => { - tracing::error!(?err, "get_app_data_by_hash"); - Box::new(crate::api::internal_error_reply()) - } - }) + let result = state + .database_read + .get_full_app_data(&contract_app_data) + .await; + match result { + Ok(Some(response)) => ( + StatusCode::OK, + Json(AppDataDocument { + full_app_data: response, + }), + ) + .into_response(), + Ok(None) => (StatusCode::NOT_FOUND, "full app data not found").into_response(), + Err(err) => { + tracing::error!(?err, "get_app_data_by_hash"); + crate::api::internal_error_reply() } - }) + } } diff --git a/crates/orderbook/src/api/get_auction.rs b/crates/orderbook/src/api/get_auction.rs index cfe169f8a7..1577fdc363 100644 --- a/crates/orderbook/src/api/get_auction.rs +++ b/crates/orderbook/src/api/get_auction.rs @@ -1,34 +1,25 @@ use { - crate::{api::ApiReply, orderbook::Orderbook}, - anyhow::Result, - reqwest::StatusCode, - std::{convert::Infallible, sync::Arc}, - warp::{Filter, Rejection, reply::with_status}, + crate::api::AppState, + axum::{ + extract::State, + http::StatusCode, + response::{IntoResponse, Json, Response}, + }, + std::sync::Arc, }; -fn get_auction_request() -> impl Filter + Clone { - warp::path!("v1" / "auction").and(warp::get()) -} - -pub fn get_auction( - orderbook: Arc, -) -> impl Filter + Clone { - get_auction_request().and_then(move || { - let orderbook = orderbook.clone(); - async move { - let result = orderbook.get_auction().await; - let reply = match result { - Ok(Some(auction)) => with_status(warp::reply::json(&auction), StatusCode::OK), - Ok(None) => with_status( - super::error("NotFound", "There is no active auction"), - StatusCode::NOT_FOUND, - ), - Err(err) => { - tracing::error!(?err, "/api/v1/get_auction"); - crate::api::internal_error_reply() - } - }; - Result::<_, Infallible>::Ok(reply) +pub async fn get_auction_handler(State(state): State>) -> Response { + let result = state.orderbook.get_auction().await; + match result { + Ok(Some(auction)) => Json(auction).into_response(), + Ok(None) => ( + StatusCode::NOT_FOUND, + super::error("NotFound", "There is no active auction"), + ) + .into_response(), + Err(err) => { + tracing::error!(?err, "/api/v1/get_auction"); + crate::api::internal_error_reply() } - }) + } } diff --git a/crates/orderbook/src/api/get_native_price.rs b/crates/orderbook/src/api/get_native_price.rs index b2c027ebce..563b73bad5 100644 --- a/crates/orderbook/src/api/get_native_price.rs +++ b/crates/orderbook/src/api/get_native_price.rs @@ -1,50 +1,30 @@ use { - crate::api::{ApiReply, IntoWarpReply}, + crate::api::{AppState, PriceEstimationErrorWrapper}, alloy::primitives::Address, - anyhow::Result, + axum::{ + extract::{Path, State}, + http::StatusCode, + response::{IntoResponse, Json, Response}, + }, model::quote::NativeTokenPrice, - shared::price_estimation::native::NativePriceEstimating, - std::{convert::Infallible, sync::Arc, time::Duration}, - warp::{Filter, Rejection, hyper::StatusCode, reply::with_status}, + std::{str::FromStr, sync::Arc}, }; -fn get_native_prices_request() -> impl Filter + Clone { - warp::path!("v1" / "token" / Address / "native_price").and(warp::get()) -} - -pub fn get_native_price( - estimator: Arc, - quote_timeout: Duration, -) -> impl Filter + Clone { - get_native_prices_request().and_then(move |token: Address| { - let estimator = estimator.clone(); - async move { - let result = estimator.estimate_native_price(token, quote_timeout).await; - let reply = match result { - Ok(price) => with_status( - warp::reply::json(&NativeTokenPrice { price }), - StatusCode::OK, - ), - Err(err) => err.into_warp_reply(), - }; - Result::<_, Infallible>::Ok(reply) - } - }) -} - -#[cfg(test)] -mod tests { - use {super::*, alloy::primitives::address, futures::FutureExt, warp::test::request}; +pub async fn get_native_price_handler( + State(state): State>, + Path(token): Path, +) -> Response { + // TODO: remove after all downstream callers have been notified of the status + // code changes + let Ok(token) = Address::from_str(&token) else { + return StatusCode::NOT_FOUND.into_response(); + }; - #[test] - fn native_prices_query() { - let path = "/v1/token/0xdac17f958d2ee523a2206206994597c13d831ec7/native_price"; - let request = request().path(path).method("GET"); - let result = request - .filter(&get_native_prices_request()) - .now_or_never() - .unwrap() - .unwrap(); - assert_eq!(result, address!("dac17f958d2ee523a2206206994597c13d831ec7")); - } + state + .native_price_estimator + .estimate_native_price(token, state.quote_timeout) + .await + .map(|price| Json(NativeTokenPrice { price })) + .map_err(PriceEstimationErrorWrapper) + .into_response() } diff --git a/crates/orderbook/src/api/get_order_by_uid.rs b/crates/orderbook/src/api/get_order_by_uid.rs index 9a579eacaf..4e0b9e6e71 100644 --- a/crates/orderbook/src/api/get_order_by_uid.rs +++ b/crates/orderbook/src/api/get_order_by_uid.rs @@ -1,16 +1,30 @@ use { - crate::orderbook::Orderbook, + crate::api::AppState, anyhow::Result, + axum::{ + extract::{Path, State}, + http::StatusCode, + response::{IntoResponse, Json, Response}, + }, model::order::{Order, OrderUid}, - std::{convert::Infallible, sync::Arc}, - warp::{Filter, Rejection, hyper::StatusCode, reply}, + std::{str::FromStr, sync::Arc}, }; -pub fn get_order_by_uid_request() -> impl Filter + Clone { - warp::path!("v1" / "orders" / OrderUid).and(warp::get()) +pub async fn get_order_by_uid_handler( + State(state): State>, + Path(uid): Path, +) -> Response { + // TODO: remove after all downstream callers have been notified of the status + // code changes + let Ok(uid) = OrderUid::from_str(&uid) else { + return StatusCode::NOT_FOUND.into_response(); + }; + + let result = state.orderbook.get_order(&uid).await; + get_order_by_uid_response(result) } -pub fn get_order_by_uid_response(result: Result>) -> super::ApiReply { +pub fn get_order_by_uid_response(result: Result>) -> Response { let order = match result { Ok(order) => order, Err(err) => { @@ -19,47 +33,23 @@ pub fn get_order_by_uid_response(result: Result>) -> super::ApiRep } }; match order { - Some(order) => reply::with_status(reply::json(&order), StatusCode::OK), - None => reply::with_status( - super::error("NotFound", "Order was not found"), + Some(order) => (StatusCode::OK, Json(order)).into_response(), + None => ( StatusCode::NOT_FOUND, - ), + super::error("NotFound", "Order was not found"), + ) + .into_response(), } } -pub fn get_order_by_uid( - orderbook: Arc, -) -> impl Filter + Clone { - get_order_by_uid_request().and_then(move |uid| { - let orderbook = orderbook.clone(); - async move { - let result = orderbook.get_order(&uid).await; - Result::<_, Infallible>::Ok(get_order_by_uid_response(result)) - } - }) -} - #[cfg(test)] mod tests { - use { - super::*, - crate::api::response_body, - warp::{Reply, test::request}, - }; - - #[tokio::test] - async fn get_order_by_uid_request_ok() { - let uid = OrderUid::default(); - let request = request().path(&format!("/v1/orders/{uid}")).method("GET"); - let filter = get_order_by_uid_request(); - let result = request.filter(&filter).await.unwrap(); - assert_eq!(result, uid); - } + use {super::*, crate::api::response_body}; #[tokio::test] async fn get_order_by_uid_response_ok() { let order = Order::default(); - let response = get_order_by_uid_response(Ok(Some(order.clone()))).into_response(); + let response = get_order_by_uid_response(Ok(Some(order.clone()))); assert_eq!(response.status(), StatusCode::OK); let body = response_body(response).await; let response_order: Order = serde_json::from_slice(body.as_slice()).unwrap(); @@ -68,7 +58,7 @@ mod tests { #[tokio::test] async fn get_order_by_uid_response_non_existent() { - let response = get_order_by_uid_response(Ok(None)).into_response(); + let response = get_order_by_uid_response(Ok(None)); assert_eq!(response.status(), StatusCode::NOT_FOUND); } } diff --git a/crates/orderbook/src/api/get_order_status.rs b/crates/orderbook/src/api/get_order_status.rs index 90f0d5336e..e90e9537a3 100644 --- a/crates/orderbook/src/api/get_order_status.rs +++ b/crates/orderbook/src/api/get_order_status.rs @@ -1,36 +1,35 @@ use { - crate::{ - api::ApiReply, - orderbook::{OrderStatusError, Orderbook}, + crate::{api::AppState, orderbook::OrderStatusError}, + axum::{ + extract::{Path, State}, + http::StatusCode, + response::{IntoResponse, Json, Response}, }, - anyhow::Result, model::order::OrderUid, - std::{convert::Infallible, sync::Arc}, - warp::{Filter, Rejection, hyper::StatusCode, reply}, + std::{str::FromStr, sync::Arc}, }; -fn get_status_request() -> impl Filter + Clone { - warp::path!("v1" / "orders" / OrderUid / "status").and(warp::get()) -} +pub async fn get_status_handler( + State(state): State>, + Path(uid): Path, +) -> Response { + // TODO: remove after all downstream callers have been notified of the status + // code changes + let Ok(uid) = OrderUid::from_str(&uid) else { + return StatusCode::NOT_FOUND.into_response(); + }; -pub fn get_status( - orderbook: Arc, -) -> impl Filter + Clone { - get_status_request().and_then(move |uid| { - let orderbook = orderbook.clone(); - async move { - let status = orderbook.get_order_status(&uid).await; - Result::<_, Infallible>::Ok(match status { - Ok(status) => warp::reply::with_status(warp::reply::json(&status), StatusCode::OK), - Err(OrderStatusError::NotFound) => reply::with_status( - super::error("NotFound", "Order status was not found"), - StatusCode::NOT_FOUND, - ), - Err(err) => { - tracing::error!(?err, "get_order_status"); - *Box::new(crate::api::internal_error_reply()) - } - }) + let status = state.orderbook.get_order_status(&uid).await; + match status { + Ok(status) => Json(status).into_response(), + Err(err @ OrderStatusError::NotFound) => ( + StatusCode::NOT_FOUND, + super::error("NotFound", err.to_string()), + ) + .into_response(), + Err(err) => { + tracing::error!(?err, "get_order_status"); + crate::api::internal_error_reply() } - }) + } } diff --git a/crates/orderbook/src/api/get_orders_by_tx.rs b/crates/orderbook/src/api/get_orders_by_tx.rs index dc357227de..a06acee9db 100644 --- a/crates/orderbook/src/api/get_orders_by_tx.rs +++ b/crates/orderbook/src/api/get_orders_by_tx.rs @@ -1,47 +1,30 @@ use { - crate::{api::ApiReply, orderbook::Orderbook}, + crate::api::AppState, alloy::primitives::B256, - anyhow::Result, - reqwest::StatusCode, - std::{convert::Infallible, sync::Arc}, - warp::{Filter, Rejection, reply::with_status}, + axum::{ + extract::{Path, State}, + http::StatusCode, + response::{IntoResponse, Json, Response}, + }, + std::{str::FromStr, sync::Arc}, }; -pub fn get_orders_by_tx_request() -> impl Filter + Clone { - warp::path!("v1" / "transactions" / B256 / "orders").and(warp::get()) -} +pub async fn get_orders_by_tx_handler( + State(state): State>, + Path(hash): Path, +) -> Response { + // TODO: remove after all downstream callers have been notified of the status + // code changes + let Ok(hash) = B256::from_str(&hash) else { + return StatusCode::NOT_FOUND.into_response(); + }; -pub fn get_orders_by_tx( - orderbook: Arc, -) -> impl Filter + Clone { - get_orders_by_tx_request().and_then(move |hash: B256| { - let orderbook = orderbook.clone(); - async move { - let result = orderbook.get_orders_for_tx(&hash).await; - Result::<_, Infallible>::Ok(match result { - Ok(response) => with_status(warp::reply::json(&response), StatusCode::OK), - Err(err) => { - tracing::error!(?err, "get_orders_by_tx"); - crate::api::internal_error_reply() - } - }) + let result = state.orderbook.get_orders_for_tx(&hash).await; + match result { + Ok(response) => Json(response).into_response(), + Err(err) => { + tracing::error!(?err, "get_orders_by_tx"); + crate::api::internal_error_reply() } - }) -} - -#[cfg(test)] -mod tests { - use {super::*, std::str::FromStr}; - - #[tokio::test] - async fn request_ok() { - let hash_str = "0x0191dbb560e936bd3320d5a505c9c05580a0ebb7e12fe117551ac26e484f295e"; - let result = warp::test::request() - .path(&format!("/v1/transactions/{hash_str}/orders")) - .method("GET") - .filter(&get_orders_by_tx_request()) - .await - .unwrap(); - assert_eq!(result.0, B256::from_str(hash_str).unwrap().0); } } diff --git a/crates/orderbook/src/api/get_solver_competition.rs b/crates/orderbook/src/api/get_solver_competition.rs index 26e7a38390..4335160139 100644 --- a/crates/orderbook/src/api/get_solver_competition.rs +++ b/crates/orderbook/src/api/get_solver_competition.rs @@ -1,114 +1,88 @@ use { - crate::solver_competition::{Identifier, LoadSolverCompetitionError, SolverCompetitionStoring}, + crate::{ + api::AppState, + solver_competition::{Identifier, SolverCompetitionStoring}, + }, alloy::primitives::B256, - anyhow::Result, - model::{AuctionId, solver_competition::SolverCompetitionAPI}, - reqwest::StatusCode, - std::{convert::Infallible, sync::Arc}, - warp::{ - Filter, - Rejection, - reply::{Json, WithStatus, with_status}, + axum::{ + extract::{Path, State}, + http::StatusCode, + response::{IntoResponse, Json, Response}, }, + model::{AuctionId, solver_competition::SolverCompetitionAPI}, + std::{str::FromStr, sync::Arc}, }; -fn request_id() -> impl Filter + Clone { - warp::path!("v1" / "solver_competition" / AuctionId) - .and(warp::get()) - .map(Identifier::Id) -} +pub async fn get_solver_competition_by_id_handler( + State(state): State>, + Path(auction_id): Path, +) -> Response { + // TODO: remove after all downstream callers have been notified of the status + // code changes + let Ok(auction_id) = auction_id.parse::() else { + return StatusCode::NOT_FOUND.into_response(); + }; -fn request_hash() -> impl Filter + Clone { - warp::path!("v1" / "solver_competition" / "by_tx_hash" / B256) - .and(warp::get()) - .map(Identifier::Transaction) -} + // We use u64 to ensure that negative numbers are returned as BAD_REQUEST + // however, there's a gap between u64::MAX and i64::MAX, numbers beyond i64::MAX + // will be marked as NOT_FOUND as they're positive (and as such, valid) but + // they are not covered by our system + if auction_id >= AuctionId::MAX.cast_unsigned() { + return crate::solver_competition::LoadSolverCompetitionError::NotFound.into_response(); + } -fn request_latest() -> impl Filter + Clone { - warp::path!("v1" / "solver_competition" / "latest").and(warp::get()) -} -pub fn get( - handler: Arc, -) -> impl Filter + Clone { - request_id() - .or(request_hash()) - .unify() - .and_then(move |identifier: Identifier| { - let handler = handler.clone(); - async move { - let result = handler.load_competition(identifier).await; - Result::<_, Infallible>::Ok(response(result)) - } - }) + let handler: &dyn SolverCompetitionStoring = &state.database_read; + handler + .load_competition(Identifier::Id(auction_id.cast_signed())) + .await + .map(Json) + .into_response() } -pub fn get_latest( - handler: Arc, -) -> impl Filter + Clone { - request_latest().and_then(move || { - let handler = handler.clone(); - async move { - let result = handler.load_latest_competition().await; - Result::<_, Infallible>::Ok(response(result)) - } - }) +pub async fn get_solver_competition_by_hash_handler( + State(state): State>, + Path(tx_hash): Path, +) -> Response { + // TODO: remove after all downstream callers have been notified of the status + // code changes + let Ok(tx_hash) = B256::from_str(&tx_hash) else { + return StatusCode::NOT_FOUND.into_response(); + }; + + let handler: &dyn SolverCompetitionStoring = &state.database_read; + handler + .load_competition(Identifier::Transaction(tx_hash)) + .await + .map(Json) + .into_response() } -fn response( - result: Result, -) -> WithStatus { - match result { - Ok(response) => with_status(warp::reply::json(&response), StatusCode::OK), - Err(LoadSolverCompetitionError::NotFound) => with_status( - super::error("NotFound", "no competition found"), - StatusCode::NOT_FOUND, - ), - Err(LoadSolverCompetitionError::Other(err)) => { - tracing::error!(?err, "load solver competition"); - crate::api::internal_error_reply() - } - } +pub async fn get_solver_competition_latest_handler( + State(state): State>, +) -> Result, crate::solver_competition::LoadSolverCompetitionError> { + let handler: &dyn SolverCompetitionStoring = &state.database_read; + handler.load_latest_competition().await.map(Json) } #[cfg(test)] mod tests { use { - super::*, - crate::solver_competition::MockSolverCompetitionStoring, - warp::{Reply, test::request}, + crate::solver_competition::LoadSolverCompetitionError, + axum::response::IntoResponse, + hyper::StatusCode, }; #[tokio::test] - async fn test() { - let mut storage = MockSolverCompetitionStoring::new(); - storage - .expect_load_competition() - .times(2) - .returning(|_| Ok(Default::default())); - storage - .expect_load_competition() - .times(1) - .return_once(|_| Err(LoadSolverCompetitionError::NotFound)); - let filter = get(Arc::new(storage)); - - let request_ = request().path("/v1/solver_competition/0").method("GET"); - let response = request_.filter(&filter).await.unwrap().into_response(); - dbg!(&response); - assert_eq!(response.status(), StatusCode::OK); - - let request_ = request() - .path( - "/v1/solver_competition/by_tx_hash/\ - 0xd51f28edffcaaa76be4a22f6375ad289272c037f3cc072345676e88d92ced8b5", - ) - .method("GET"); - let response = request_.filter(&filter).await.unwrap().into_response(); - dbg!(&response); - assert_eq!(response.status(), StatusCode::OK); + async fn test_response_not_found() { + let error = LoadSolverCompetitionError::NotFound; + let resp = error.into_response(); + assert_eq!(resp.status(), StatusCode::NOT_FOUND); + } - let request_ = request().path("/v1/solver_competition/1337").method("GET"); - let response = request_.filter(&filter).await.unwrap().into_response(); - dbg!(&response); - assert_eq!(response.status(), StatusCode::NOT_FOUND); + #[tokio::test] + async fn test_response_internal_error() { + let error = LoadSolverCompetitionError::Other(anyhow::anyhow!("test error")); + let resp = error.into_response(); + assert_eq!(resp.status(), StatusCode::INTERNAL_SERVER_ERROR); } } diff --git a/crates/orderbook/src/api/get_solver_competition_v2.rs b/crates/orderbook/src/api/get_solver_competition_v2.rs index 85850a8ec9..1e332c0446 100644 --- a/crates/orderbook/src/api/get_solver_competition_v2.rs +++ b/crates/orderbook/src/api/get_solver_competition_v2.rs @@ -1,76 +1,57 @@ use { - crate::{ - database::Postgres, - solver_competition::{Identifier, LoadSolverCompetitionError}, - }, + crate::{api::AppState, solver_competition::LoadSolverCompetitionError}, alloy::primitives::B256, - anyhow::Result, - model::{AuctionId, solver_competition_v2::Response}, - reqwest::StatusCode, - std::convert::Infallible, - warp::{ - Filter, - Rejection, - reply::{Json, WithStatus, with_status}, + axum::{ + extract::{Path, State}, + http::StatusCode, + response::{IntoResponse, Json, Response}, }, + model::{AuctionId, solver_competition_v2::Response as CompetitionResponse}, + std::{str::FromStr, sync::Arc}, }; -fn request_id() -> impl Filter + Clone { - warp::path!("v2" / "solver_competition" / AuctionId) - .and(warp::get()) - .map(Identifier::Id) -} +pub async fn get_solver_competition_by_id_handler( + State(state): State>, + Path(auction_id): Path, +) -> Response { + // TODO: remove after all downstream callers have been notified of the status + // code changes + let Ok(auction_id) = auction_id.parse::() else { + return StatusCode::NOT_FOUND.into_response(); + }; -fn request_hash() -> impl Filter + Clone { - warp::path!("v2" / "solver_competition" / "by_tx_hash" / B256) - .and(warp::get()) - .map(Identifier::Transaction) + state + .database_read + .load_competition_by_id(auction_id) + .await + .map(Json) + .into_response() } -fn request_latest() -> impl Filter + Clone { - warp::path!("v2" / "solver_competition" / "latest").and(warp::get()) -} - -pub fn get(db: Postgres) -> impl Filter + Clone { - request_id() - .or(request_hash()) - .unify() - .and_then(move |identifier: Identifier| { - let db = db.clone(); - async move { - let result = match identifier { - Identifier::Id(id) => db.load_competition_by_id(id).await, - Identifier::Transaction(hash) => db.load_competition_by_tx_hash(hash).await, - }; - Result::<_, Infallible>::Ok(response(result)) - } - }) -} +pub async fn get_solver_competition_by_hash_handler( + State(state): State>, + Path(tx_hash): Path, +) -> Response { + // TODO: remove after all downstream callers have been notified of the status + // code changes + let Ok(tx_hash) = B256::from_str(&tx_hash) else { + return StatusCode::NOT_FOUND.into_response(); + }; -pub fn get_latest( - db: Postgres, -) -> impl Filter + Clone { - request_latest().and_then(move || { - let db = db.clone(); - async move { - let result = db.load_latest_competition().await; - Result::<_, Infallible>::Ok(response(result)) - } - }) + state + .database_read + .load_competition_by_tx_hash(tx_hash) + .await + .map(Json) + .into_response() } -fn response( - result: Result, -) -> WithStatus { - match result { - Ok(response) => with_status(warp::reply::json(&response), StatusCode::OK), - Err(LoadSolverCompetitionError::NotFound) => with_status( - super::error("NotFound", "no competition found"), - StatusCode::NOT_FOUND, - ), - Err(LoadSolverCompetitionError::Other(err)) => { - tracing::error!(?err, "load solver competition"); - crate::api::internal_error_reply() - } - } +pub async fn get_solver_competition_latest_handler( + State(state): State>, +) -> Result, LoadSolverCompetitionError> { + state + .database_read + .load_latest_competition() + .await + .map(Json) } diff --git a/crates/orderbook/src/api/get_token_metadata.rs b/crates/orderbook/src/api/get_token_metadata.rs index d8f0631564..04794b61d9 100644 --- a/crates/orderbook/src/api/get_token_metadata.rs +++ b/crates/orderbook/src/api/get_token_metadata.rs @@ -1,31 +1,30 @@ use { - crate::database::Postgres, + crate::api::AppState, alloy::primitives::Address, - hyper::StatusCode, - std::convert::Infallible, - warp::{Filter, Rejection, reply}, + axum::{ + extract::{Path, State}, + http::StatusCode, + response::{IntoResponse, Json, Response}, + }, + std::{str::FromStr, sync::Arc}, }; -fn get_native_prices_request() -> impl Filter + Clone { - warp::path!("v1" / "token" / Address / "metadata").and(warp::get()) -} - -pub fn get_token_metadata( - db: Postgres, -) -> impl Filter + Clone { - get_native_prices_request().and_then(move |token: Address| { - let db = db.clone(); - async move { - let result = db.token_metadata(&token).await; - let response = match result { - Ok(metadata) => reply::with_status(reply::json(&metadata), StatusCode::OK), - Err(err) => { - tracing::error!(?err, ?token, "Failed to fetch token's first trade block"); - crate::api::internal_error_reply() - } - }; +pub async fn get_token_metadata_handler( + State(state): State>, + Path(token): Path, +) -> Response { + // TODO: remove after all downstream callers have been notified of the status + // code changes + let Ok(token) = Address::from_str(&token) else { + return StatusCode::NOT_FOUND.into_response(); + }; - Result::<_, Infallible>::Ok(response) + let result = state.database_read.token_metadata(&token).await; + match result { + Ok(metadata) => Json(metadata).into_response(), + Err(err) => { + tracing::error!(?err, ?token, "Failed to fetch token's first trade block"); + crate::api::internal_error_reply() } - }) + } } diff --git a/crates/orderbook/src/api/get_total_surplus.rs b/crates/orderbook/src/api/get_total_surplus.rs index afdbc07b93..143b67b9e8 100644 --- a/crates/orderbook/src/api/get_total_surplus.rs +++ b/crates/orderbook/src/api/get_total_surplus.rs @@ -1,30 +1,37 @@ use { - crate::database::Postgres, + crate::api::AppState, alloy::primitives::Address, + axum::{ + extract::{Path, State}, + http::StatusCode, + response::{IntoResponse, Json, Response}, + }, serde_json::json, - std::convert::Infallible, - warp::{Filter, Rejection, http::StatusCode, reply::with_status}, + std::{str::FromStr, sync::Arc}, }; -pub fn get(db: Postgres) -> impl Filter + Clone { - warp::path!("v1" / "users" / Address / "total_surplus") - .and(warp::get()) - .and_then(move |user| { - let db = db.clone(); - async move { - let surplus = db.total_surplus(&user).await; - Result::<_, Infallible>::Ok(match surplus { - Ok(surplus) => with_status( - warp::reply::json(&json!({ - "totalSurplus": surplus.to_string() - })), - StatusCode::OK, - ), - Err(err) => { - tracing::error!(?err, ?user, "failed to compute total surplus"); - crate::api::internal_error_reply() - } - }) - } - }) +pub async fn get_total_surplus_handler( + State(state): State>, + Path(user): Path, +) -> Response { + // TODO: remove after all downstream callers have been notified of the status + // code changes + let Ok(user) = Address::from_str(&user) else { + return StatusCode::NOT_FOUND.into_response(); + }; + + let surplus = state.database_read.total_surplus(&user).await; + match surplus { + Ok(surplus) => ( + StatusCode::OK, + Json(json!({ + "totalSurplus": surplus.to_string() + })), + ) + .into_response(), + Err(err) => { + tracing::error!(?err, ?user, "failed to compute total surplus"); + crate::api::internal_error_reply() + } + } } diff --git a/crates/orderbook/src/api/get_trades.rs b/crates/orderbook/src/api/get_trades.rs index a104c7cb55..fd8dcaf907 100644 --- a/crates/orderbook/src/api/get_trades.rs +++ b/crates/orderbook/src/api/get_trades.rs @@ -1,22 +1,23 @@ use { crate::{ - api::{ApiReply, error}, - database::{ - Postgres, - trades::{TradeFilter, TradeRetrieving}, - }, + api::{AppState, error}, + database::trades::{TradeFilter, TradeRetrieving}, }, alloy::primitives::Address, - anyhow::{Context, Result}, + anyhow::Context, + axum::{ + extract::{Query, State}, + http::StatusCode, + response::{IntoResponse, Json, Response}, + }, model::order::OrderUid, serde::Deserialize, - std::convert::Infallible, - warp::{Filter, Rejection, hyper::StatusCode, reply::with_status}, + std::sync::Arc, }; #[derive(Deserialize)] #[serde(rename_all = "camelCase")] -struct Query { +pub(crate) struct QueryParams { pub order_uid: Option, pub owner: Option
, } @@ -26,7 +27,7 @@ enum TradeFilterError { InvalidFilter(String), } -impl Query { +impl QueryParams { fn trade_filter(&self) -> TradeFilter { TradeFilter { order_uid: self.order_uid, @@ -44,87 +45,71 @@ impl Query { } } -fn get_trades_request() --> impl Filter,), Error = Rejection> + Clone { - warp::path!("v1" / "trades") - .and(warp::get()) - .and(warp::query::()) - .map(|query: Query| query.validate()) -} +pub async fn get_trades_handler( + State(state): State>, + Query(query): Query, +) -> Response { + let trade_filter = match query.validate() { + Ok(trade_filter) => trade_filter, + Err(TradeFilterError::InvalidFilter(msg)) => { + let err = error("InvalidTradeFilter", msg); + return (StatusCode::BAD_REQUEST, err).into_response(); + } + }; -pub fn get_trades(db: Postgres) -> impl Filter + Clone { - get_trades_request().and_then(move |request_result| { - let database = db.clone(); - async move { - Result::<_, Infallible>::Ok(match request_result { - Ok(trade_filter) => { - let result = database.trades(&trade_filter).await.context("get_trades"); - match result { - Ok(reply) => with_status(warp::reply::json(&reply), StatusCode::OK), - Err(err) => { - tracing::error!(?err, "get_trades"); - crate::api::internal_error_reply() - } - } - } - Err(TradeFilterError::InvalidFilter(msg)) => { - let err = error("InvalidTradeFilter", msg); - with_status(err, StatusCode::BAD_REQUEST) - } - }) + let result = state + .database_read + .trades(&trade_filter) + .await + .context("get_trades"); + match result { + Ok(reply) => Json(reply).into_response(), + Err(err) => { + tracing::error!(?err, "get_trades"); + crate::api::internal_error_reply() } - }) + } } #[cfg(test)] mod tests { - use { - super::*, - warp::test::{RequestBuilder, request}, - }; - - #[tokio::test] - async fn get_trades_request_ok() { - let trade_filter = |request: RequestBuilder| async move { - let filter = get_trades_request(); - request.method("GET").filter(&filter).await - }; + use {super::*, alloy::primitives::Address, model::order::OrderUid}; + #[test] + fn query_validation_ok() { let owner = Address::with_last_byte(1); - let owner_path = format!("/v1/trades?owner=0x{owner:x}"); - let result = trade_filter(request().path(owner_path.as_str())) - .await - .unwrap() - .unwrap(); + let query = QueryParams { + owner: Some(owner), + order_uid: None, + }; + let result = query.validate().unwrap(); assert_eq!(result.owner, Some(owner)); assert_eq!(result.order_uid, None); let uid = OrderUid([1u8; 56]); - let order_uid_path = format!("/v1/trades?orderUid={uid}"); - let result = trade_filter(request().path(order_uid_path.as_str())) - .await - .unwrap() - .unwrap(); + let query = QueryParams { + owner: None, + order_uid: Some(uid), + }; + let result = query.validate().unwrap(); assert_eq!(result.owner, None); assert_eq!(result.order_uid, Some(uid)); } - #[tokio::test] - async fn get_trades_request_err() { - let trade_filter = |request: RequestBuilder| async move { - let filter = get_trades_request(); - request.method("GET").filter(&filter).await - }; - + #[test] + fn query_validation_err() { let owner = Address::with_last_byte(1); let uid = OrderUid([1u8; 56]); - let path = format!("/v1/trades?owner=0x{owner:x}&orderUid={uid}"); - - let result = trade_filter(request().path(path.as_str())).await.unwrap(); - assert!(result.is_err()); + let query = QueryParams { + owner: Some(owner), + order_uid: Some(uid), + }; + assert!(query.validate().is_err()); - let path = "/v1/trades"; - let result = trade_filter(request().path(path)).await.unwrap(); - assert!(result.is_err()); + let query = QueryParams { + owner: None, + order_uid: None, + }; + assert!(query.validate().is_err()); } } diff --git a/crates/orderbook/src/api/get_trades_v2.rs b/crates/orderbook/src/api/get_trades_v2.rs index a570ccf3a5..b3c1073849 100644 --- a/crates/orderbook/src/api/get_trades_v2.rs +++ b/crates/orderbook/src/api/get_trades_v2.rs @@ -1,22 +1,23 @@ use { crate::{ - api::{ApiReply, error}, - database::{ - Postgres, - trades::{PaginatedTradeFilter, TradeRetrievingPaginated}, - }, + api::{AppState, error}, + database::trades::{PaginatedTradeFilter, TradeRetrievingPaginated}, }, alloy::primitives::Address, - anyhow::{Context, Result}, + anyhow::Context, + axum::{ + extract::{Query, State}, + http::StatusCode, + response::{IntoResponse, Json, Response}, + }, model::order::OrderUid, serde::Deserialize, - std::convert::Infallible, - warp::{Filter, Rejection, hyper::StatusCode, reply::with_status}, + std::sync::Arc, }; #[derive(Deserialize)] #[serde(rename_all = "camelCase")] -struct Query { +pub(crate) struct QueryParams { pub order_uid: Option, pub owner: Option
, pub offset: Option, @@ -34,7 +35,7 @@ enum TradeFilterError { InvalidLimit(u64, u64), } -impl Query { +impl QueryParams { fn trade_filter(&self, offset: u64, limit: u64) -> PaginatedTradeFilter { PaginatedTradeFilter { order_uid: self.order_uid, @@ -63,126 +64,124 @@ impl Query { } } -fn get_trades_request() --> impl Filter,), Error = Rejection> + Clone -{ - warp::path!("v2" / "trades") - .and(warp::get()) - .and(warp::query::()) - .map(|query: Query| query.validate()) -} +pub async fn get_trades_handler( + State(state): State>, + Query(query): Query, +) -> Response { + let trade_filter = match query.validate() { + Ok(trade_filter) => trade_filter, + Err(TradeFilterError::InvalidFilter(msg)) => { + let err = error("InvalidTradeFilter", msg); + return (StatusCode::BAD_REQUEST, err).into_response(); + } + Err(TradeFilterError::InvalidLimit(min, max)) => { + let err = error( + "InvalidLimit", + format!("limit must be between {min} and {max}"), + ); + return (StatusCode::BAD_REQUEST, err).into_response(); + } + }; -pub fn get_trades(db: Postgres) -> impl Filter + Clone { - get_trades_request().and_then(move |request_result| { - let database = db.clone(); - async move { - Result::<_, Infallible>::Ok(match request_result { - Ok(trade_filter) => { - let result = database - .trades_paginated(&trade_filter) - .await - .context("get_trades_v2"); - match result { - Ok(reply) => with_status(warp::reply::json(&reply), StatusCode::OK), - Err(err) => { - tracing::error!(?err, "get_trades_v2"); - crate::api::internal_error_reply() - } - } - } - Err(TradeFilterError::InvalidFilter(msg)) => { - let err = error("InvalidTradeFilter", msg); - with_status(err, StatusCode::BAD_REQUEST) - } - Err(TradeFilterError::InvalidLimit(min, max)) => { - let err = error( - "InvalidLimit", - format!("limit must be between {min} and {max}"), - ); - with_status(err, StatusCode::BAD_REQUEST) - } - }) + let result = state + .database_read + .trades_paginated(&trade_filter) + .await + .context("get_trades_v2"); + match result { + Ok(reply) => (StatusCode::OK, Json(reply)).into_response(), + Err(err) => { + tracing::error!(?err, "get_trades_v2"); + crate::api::internal_error_reply() } - }) + } } #[cfg(test)] mod tests { - use { - super::*, - warp::test::{RequestBuilder, request}, - }; - - #[tokio::test] - async fn get_trades_request_ok() { - let trade_filter = |request: RequestBuilder| async move { - let filter = get_trades_request(); - request.method("GET").filter(&filter).await - }; + use {super::*, alloy::primitives::Address, model::order::OrderUid}; + #[test] + fn query_validation_ok() { let owner = Address::with_last_byte(1); - let owner_path = format!("/v2/trades?owner=0x{owner:x}"); - let result = trade_filter(request().path(owner_path.as_str())) - .await - .unwrap() - .unwrap(); + let query = QueryParams { + owner: Some(owner), + order_uid: None, + offset: None, + limit: None, + }; + let result = query.validate().unwrap(); assert_eq!(result.owner, Some(owner)); assert_eq!(result.order_uid, None); assert_eq!(result.offset, DEFAULT_OFFSET); assert_eq!(result.limit, DEFAULT_LIMIT); let uid = OrderUid([1u8; 56]); - let order_uid_path = format!("/v2/trades?orderUid={uid}"); - let result = trade_filter(request().path(order_uid_path.as_str())) - .await - .unwrap() - .unwrap(); + let query = QueryParams { + owner: None, + order_uid: Some(uid), + offset: None, + limit: None, + }; + let result = query.validate().unwrap(); assert_eq!(result.owner, None); assert_eq!(result.order_uid, Some(uid)); assert_eq!(result.offset, DEFAULT_OFFSET); assert_eq!(result.limit, DEFAULT_LIMIT); // Test with custom offset and limit - let owner_path = format!("/v2/trades?owner=0x{owner:x}&offset=10&limit=50"); - let result = trade_filter(request().path(owner_path.as_str())) - .await - .unwrap() - .unwrap(); + let query = QueryParams { + owner: Some(owner), + order_uid: None, + offset: Some(10), + limit: Some(50), + }; + let result = query.validate().unwrap(); assert_eq!(result.owner, Some(owner)); assert_eq!(result.offset, 10); assert_eq!(result.limit, 50); } - #[tokio::test] - async fn get_trades_request_err() { - let trade_filter = |request: RequestBuilder| async move { - let filter = get_trades_request(); - request.method("GET").filter(&filter).await - }; - + #[test] + fn query_validation_err() { let owner = Address::with_last_byte(1); let uid = OrderUid([1u8; 56]); - let path = format!("/v2/trades?owner=0x{owner:x}&orderUid={uid}"); - - let result = trade_filter(request().path(path.as_str())).await.unwrap(); - assert!(result.is_err()); + let query = QueryParams { + owner: Some(owner), + order_uid: Some(uid), + offset: None, + limit: None, + }; + assert!(query.validate().is_err()); - let path = "/v2/trades"; - let result = trade_filter(request().path(path)).await.unwrap(); - assert!(result.is_err()); + let query = QueryParams { + owner: None, + order_uid: None, + offset: None, + limit: None, + }; + assert!(query.validate().is_err()); // Test limit validation - let path = format!("/v2/trades?owner=0x{owner:x}&limit=0"); - let result = trade_filter(request().path(path.as_str())).await.unwrap(); + let query = QueryParams { + owner: Some(owner), + order_uid: None, + offset: None, + limit: Some(0), + }; assert!(matches!( - result, + query.validate(), Err(TradeFilterError::InvalidLimit(MIN_LIMIT, MAX_LIMIT)) )); - let path = format!("/v2/trades?owner=0x{owner:x}&limit=1001"); - let result = trade_filter(request().path(path.as_str())).await.unwrap(); + let query = QueryParams { + owner: Some(owner), + order_uid: None, + offset: None, + limit: Some(1001), + }; assert!(matches!( - result, + query.validate(), Err(TradeFilterError::InvalidLimit(MIN_LIMIT, MAX_LIMIT)) )); } diff --git a/crates/orderbook/src/api/get_user_orders.rs b/crates/orderbook/src/api/get_user_orders.rs index 77783b29bf..07daef1352 100644 --- a/crates/orderbook/src/api/get_user_orders.rs +++ b/crates/orderbook/src/api/get_user_orders.rs @@ -1,82 +1,57 @@ use { - crate::{api::ApiReply, orderbook::Orderbook}, + crate::api::AppState, alloy::primitives::Address, - anyhow::Result, + axum::{ + extract::{Path, Query, State}, + http::StatusCode, + response::{IntoResponse, Json, Response}, + }, serde::Deserialize, - std::{convert::Infallible, sync::Arc}, - warp::{Filter, Rejection, hyper::StatusCode, reply::with_status}, + std::{str::FromStr, sync::Arc}, }; #[derive(Clone, Copy, Debug, Deserialize)] -struct Query { +pub(crate) struct QueryParams { offset: Option, limit: Option, } -fn request() -> impl Filter + Clone { - warp::path!("v1" / "account" / Address / "orders") - .and(warp::get()) - .and(warp::query::()) -} +pub async fn get_user_orders_handler( + State(state): State>, + Path(owner): Path, + Query(query): Query, +) -> Response { + // TODO: remove after all downstream callers have been notified of the status + // code changes + let Ok(owner) = Address::from_str(&owner) else { + return StatusCode::NOT_FOUND.into_response(); + }; -pub fn get_user_orders( - orderbook: Arc, -) -> impl Filter + Clone { - request().and_then(move |owner: Address, query: Query| { - let orderbook = orderbook.clone(); - async move { - const DEFAULT_OFFSET: u64 = 0; - const DEFAULT_LIMIT: u64 = 10; - const MIN_LIMIT: u64 = 1; - const MAX_LIMIT: u64 = 1000; - let offset = query.offset.unwrap_or(DEFAULT_OFFSET); - let limit = query.limit.unwrap_or(DEFAULT_LIMIT); - if !(MIN_LIMIT..=MAX_LIMIT).contains(&limit) { - return Ok(with_status( - super::error( - "LIMIT_OUT_OF_BOUNDS", - format!("The pagination limit is [{MIN_LIMIT},{MAX_LIMIT}]."), - ), - StatusCode::BAD_REQUEST, - )); - } - let result = orderbook.get_user_orders(&owner, offset, limit).await; - Result::<_, Infallible>::Ok(match result { - Ok(reply) => with_status(warp::reply::json(&reply), StatusCode::OK), - Err(err) => { - tracing::error!(?err, "get_user_orders"); - crate::api::internal_error_reply() - } - }) - } - }) -} + const DEFAULT_OFFSET: u64 = 0; + const DEFAULT_LIMIT: u64 = 10; + const MIN_LIMIT: u64 = 1; + const MAX_LIMIT: u64 = 1000; -#[cfg(test)] -mod tests { - use super::*; + let offset = query.offset.unwrap_or(DEFAULT_OFFSET); + let limit = query.limit.unwrap_or(DEFAULT_LIMIT); - #[tokio::test] - async fn request_() { - let path = "/v1/account/0x0000000000000000000000000000000000000001/orders"; - let result = warp::test::request() - .path(path) - .method("GET") - .filter(&request()) - .await - .unwrap(); - assert_eq!(result.0, Address::with_last_byte(1)); - assert_eq!(result.1.offset, None); - assert_eq!(result.1.limit, None); + if !(MIN_LIMIT..=MAX_LIMIT).contains(&limit) { + return ( + StatusCode::BAD_REQUEST, + super::error( + "LIMIT_OUT_OF_BOUNDS", + format!("The pagination limit is [{MIN_LIMIT},{MAX_LIMIT}]."), + ), + ) + .into_response(); + } - let path = "/v1/account/0x0000000000000000000000000000000000000001/orders?offset=1&limit=2"; - let result = warp::test::request() - .path(path) - .method("GET") - .filter(&request()) - .await - .unwrap(); - assert_eq!(result.1.offset, Some(1)); - assert_eq!(result.1.limit, Some(2)); + let result = state.orderbook.get_user_orders(&owner, offset, limit).await; + match result { + Ok(reply) => (StatusCode::OK, Json(reply)).into_response(), + Err(err) => { + tracing::error!(?err, "get_user_orders"); + crate::api::internal_error_reply() + } } } diff --git a/crates/orderbook/src/api/post_order.rs b/crates/orderbook/src/api/post_order.rs index 7a57ac4ed4..8e2bfea75e 100644 --- a/crates/orderbook/src/api/post_order.rs +++ b/crates/orderbook/src/api/post_order.rs @@ -1,12 +1,17 @@ use { crate::{ - api::{ApiReply, IntoWarpReply, error, extract_payload}, - orderbook::{AddOrderError, OrderReplacementError, Orderbook}, + api::{AppState, error}, + orderbook::{AddOrderError, OrderReplacementError}, + }, + axum::{ + Json, + body, + extract::State, + http::StatusCode, + response::{IntoResponse, Response}, }, - anyhow::Result, model::{ - order::{AppdataFromMismatch, OrderCreation, OrderUid}, - quote::QuoteId, + order::{AppdataFromMismatch, OrderCreation}, signature, }, shared::order_validation::{ @@ -15,77 +20,97 @@ use { PartialValidationError, ValidationError, }, - std::{convert::Infallible, sync::Arc}, - warp::{ - Filter, - Rejection, - hyper::StatusCode, - reply::{self, with_status}, - }, + std::sync::Arc, }; -pub fn create_order_request() -> impl Filter + Clone -{ - warp::path!("v1" / "orders") - .and(warp::post()) - .and(extract_payload()) +pub async fn post_order_handler(State(state): State>, body: body::Bytes) -> Response { + // TODO: remove after all downstream callers have been notified of the status + // code changes + let order = match serde_json::from_slice::(&body) { + Ok(order) => order, + Err(err) => return (StatusCode::BAD_REQUEST, err.to_string()).into_response(), + }; + + state + .orderbook + .add_order(order.clone()) + .await + .map(|(order_uid, quote_metadata)| { + let quote_id = quote_metadata.as_ref().and_then(|q| q.id); + let quote_solver = quote_metadata.as_ref().map(|q| q.solver); + tracing::debug!(%order_uid, ?quote_id, ?quote_solver, "order created"); + (StatusCode::CREATED, Json(order_uid)) + }) + .inspect_err(|err| { + tracing::debug!(?order, ?err, "error creating order"); + }) + .into_response() } pub struct PartialValidationErrorWrapper(pub PartialValidationError); -impl IntoWarpReply for PartialValidationErrorWrapper { - fn into_warp_reply(self) -> ApiReply { +impl IntoResponse for PartialValidationErrorWrapper { + fn into_response(self) -> Response { match self.0 { - PartialValidationError::UnsupportedBuyTokenDestination(dest) => with_status( + PartialValidationError::UnsupportedBuyTokenDestination(dest) => ( + StatusCode::BAD_REQUEST, error("UnsupportedBuyTokenDestination", format!("Type {dest:?}")), + ) + .into_response(), + PartialValidationError::UnsupportedSellTokenSource(src) => ( StatusCode::BAD_REQUEST, - ), - PartialValidationError::UnsupportedSellTokenSource(src) => with_status( error("UnsupportedSellTokenSource", format!("Type {src:?}")), + ) + .into_response(), + PartialValidationError::UnsupportedOrderType => ( StatusCode::BAD_REQUEST, - ), - PartialValidationError::UnsupportedOrderType => with_status( error( "UnsupportedOrderType", "This order type is currently not supported", ), - StatusCode::BAD_REQUEST, - ), - PartialValidationError::Forbidden => with_status( - error("Forbidden", "Forbidden, your account is deny-listed"), + ) + .into_response(), + PartialValidationError::Forbidden => ( StatusCode::FORBIDDEN, - ), - PartialValidationError::ValidTo(OrderValidToError::Insufficient) => with_status( + error("Forbidden", "Forbidden, your account is deny-listed"), + ) + .into_response(), + PartialValidationError::ValidTo(OrderValidToError::Insufficient) => ( + StatusCode::BAD_REQUEST, error( "InsufficientValidTo", "validTo is not far enough in the future", ), + ) + .into_response(), + PartialValidationError::ValidTo(OrderValidToError::Excessive) => ( StatusCode::BAD_REQUEST, - ), - PartialValidationError::ValidTo(OrderValidToError::Excessive) => with_status( error("ExcessiveValidTo", "validTo is too far into the future"), + ) + .into_response(), + PartialValidationError::InvalidNativeSellToken => ( StatusCode::BAD_REQUEST, - ), - PartialValidationError::InvalidNativeSellToken => with_status( error( "InvalidNativeSellToken", "The chain's native token (Ether/xDai) cannot be used as the sell token", ), + ) + .into_response(), + PartialValidationError::SameBuyAndSellToken => ( StatusCode::BAD_REQUEST, - ), - PartialValidationError::SameBuyAndSellToken => with_status( error( "SameBuyAndSellToken", "Buy token is the same as the sell token.", ), + ) + .into_response(), + PartialValidationError::UnsupportedToken { token, reason } => ( StatusCode::BAD_REQUEST, - ), - PartialValidationError::UnsupportedToken { token, reason } => with_status( error( "UnsupportedToken", format!("Token {token:?} is unsupported: {reason}"), ), - StatusCode::BAD_REQUEST, - ), + ) + .into_response(), PartialValidationError::Other(err) => { tracing::error!(?err, "PartialValidatonError"); crate::api::internal_error_reply() @@ -95,14 +120,16 @@ impl IntoWarpReply for PartialValidationErrorWrapper { } pub struct AppDataValidationErrorWrapper(pub AppDataValidationError); -impl IntoWarpReply for AppDataValidationErrorWrapper { - fn into_warp_reply(self) -> ApiReply { +impl IntoResponse for AppDataValidationErrorWrapper { + fn into_response(self) -> Response { match self.0 { - AppDataValidationError::Invalid(err) => with_status( + AppDataValidationError::Invalid(err) => ( + StatusCode::BAD_REQUEST, error("InvalidAppData", format!("{err:?}")), + ) + .into_response(), + AppDataValidationError::Mismatch { provided, actual } => ( StatusCode::BAD_REQUEST, - ), - AppDataValidationError::Mismatch { provided, actual } => with_status( error( "AppDataHashMismatch", format!( @@ -110,30 +137,34 @@ impl IntoWarpReply for AppDataValidationErrorWrapper { {provided:?}", ), ), - StatusCode::BAD_REQUEST, - ), + ) + .into_response(), } } } pub struct ValidationErrorWrapper(ValidationError); -impl IntoWarpReply for ValidationErrorWrapper { - fn into_warp_reply(self) -> ApiReply { +impl IntoResponse for ValidationErrorWrapper { + fn into_response(self) -> Response { match self.0 { - ValidationError::Partial(pre) => PartialValidationErrorWrapper(pre).into_warp_reply(), - ValidationError::AppData(err) => AppDataValidationErrorWrapper(err).into_warp_reply(), - ValidationError::PriceForQuote(err) => err.into_warp_reply(), - ValidationError::MissingFrom => with_status( + ValidationError::Partial(pre) => PartialValidationErrorWrapper(pre).into_response(), + ValidationError::AppData(err) => AppDataValidationErrorWrapper(err).into_response(), + ValidationError::PriceForQuote(err) => { + super::PriceEstimationErrorWrapper(err).into_response() + } + ValidationError::MissingFrom => ( + StatusCode::BAD_REQUEST, error( "MissingFrom", "From address must be specified for on-chain signature", ), - StatusCode::BAD_REQUEST, - ), + ) + .into_response(), ValidationError::AppdataFromMismatch(AppdataFromMismatch { from, app_data_signer, - }) => with_status( + }) => ( + StatusCode::BAD_REQUEST, error( "AppdataFromMismatch", format!( @@ -141,9 +172,10 @@ impl IntoWarpReply for ValidationErrorWrapper { {app_data_signer:?} specified in the app data" ), ), + ) + .into_response(), + ValidationError::WrongOwner(signature::Recovered { message, signer }) => ( StatusCode::BAD_REQUEST, - ), - ValidationError::WrongOwner(signature::Recovered { message, signer }) => with_status( error( "WrongOwner", format!( @@ -151,78 +183,90 @@ impl IntoWarpReply for ValidationErrorWrapper { from address" ), ), + ) + .into_response(), + ValidationError::InvalidEip1271Signature(hash) => ( StatusCode::BAD_REQUEST, - ), - ValidationError::InvalidEip1271Signature(hash) => with_status( error( "InvalidEip1271Signature", format!("signature for computed order hash {hash:?} is not valid"), ), + ) + .into_response(), + ValidationError::InsufficientBalance => ( StatusCode::BAD_REQUEST, - ), - ValidationError::InsufficientBalance => with_status( error( "InsufficientBalance", "order owner must have funds worth at least x in his account", ), + ) + .into_response(), + ValidationError::InsufficientAllowance => ( StatusCode::BAD_REQUEST, - ), - ValidationError::InsufficientAllowance => with_status( error( "InsufficientAllowance", "order owner must give allowance to VaultRelayer", ), + ) + .into_response(), + ValidationError::InvalidSignature => ( StatusCode::BAD_REQUEST, - ), - ValidationError::InvalidSignature => with_status( error("InvalidSignature", "invalid signature"), + ) + .into_response(), + ValidationError::NonZeroFee => ( StatusCode::BAD_REQUEST, - ), - ValidationError::NonZeroFee => with_status( error("NonZeroFee", "Fee must be zero"), - StatusCode::BAD_REQUEST, - ), - ValidationError::SellAmountOverflow => with_status( + ) + .into_response(), + ValidationError::SellAmountOverflow => ( + StatusCode::INTERNAL_SERVER_ERROR, error( "SellAmountOverflow", "Sell amount + fee amount must fit in U256", ), - StatusCode::INTERNAL_SERVER_ERROR, - ), - ValidationError::TransferSimulationFailed => with_status( + ) + .into_response(), + ValidationError::TransferSimulationFailed => ( + StatusCode::BAD_REQUEST, error( "TransferSimulationFailed", "sell token cannot be transferred", ), + ) + .into_response(), + ValidationError::QuoteNotVerified => ( StatusCode::BAD_REQUEST, - ), - ValidationError::QuoteNotVerified => with_status( error( "QuoteNotVerified", "No quote for this trade could be verified to be accurate. Aborting the order \ creation since it will likely not be executed.", ), + ) + .into_response(), + ValidationError::ZeroAmount => ( StatusCode::BAD_REQUEST, - ), - ValidationError::ZeroAmount => with_status( error("ZeroAmount", "Buy or sell amount is zero."), + ) + .into_response(), + ValidationError::IncompatibleSigningScheme => ( StatusCode::BAD_REQUEST, - ), - ValidationError::IncompatibleSigningScheme => with_status( error( "IncompatibleSigningScheme", "Signing scheme is not compatible with order placement method.", ), + ) + .into_response(), + ValidationError::TooManyLimitOrders => ( StatusCode::BAD_REQUEST, - ), - ValidationError::TooManyLimitOrders => with_status( error("TooManyLimitOrders", "Too many limit orders"), + ) + .into_response(), + ValidationError::TooMuchGas => ( StatusCode::BAD_REQUEST, - ), - ValidationError::TooMuchGas => with_status( error("TooMuchGas", "Executing order requires too many gas units"), - StatusCode::BAD_REQUEST, - ), + ) + .into_response(), ValidationError::Other(err) => { tracing::error!(?err, "ValidationErrorWrapper"); @@ -232,14 +276,15 @@ impl IntoWarpReply for ValidationErrorWrapper { } } -impl IntoWarpReply for AddOrderError { - fn into_warp_reply(self) -> ApiReply { +impl IntoResponse for AddOrderError { + fn into_response(self) -> Response { match self { - Self::OrderValidation(err) => ValidationErrorWrapper(err).into_warp_reply(), - Self::DuplicatedOrder => with_status( - error("DuplicatedOrder", "order already exists"), + Self::OrderValidation(err) => ValidationErrorWrapper(err).into_response(), + Self::DuplicatedOrder => ( StatusCode::BAD_REQUEST, - ), + error("DuplicatedOrder", "order already exists"), + ) + .into_response(), Self::Database(err) => { tracing::error!(?err, "AddOrderError"); crate::api::internal_error_reply() @@ -253,38 +298,43 @@ impl IntoWarpReply for AddOrderError { ); crate::api::internal_error_reply() } - AddOrderError::OrderNotFound(err) => err.into_warp_reply(), - AddOrderError::InvalidAppData(err) => reply::with_status( - super::error("InvalidAppData", err.to_string()), + AddOrderError::OrderNotFound(err) => err.into_response(), + AddOrderError::InvalidAppData(err) => ( StatusCode::BAD_REQUEST, - ), - AddOrderError::InvalidReplacement(err) => err.into_warp_reply(), - AddOrderError::MetadataSerializationFailed(err) => reply::with_status( - super::error("MetadataSerializationFailed", err.to_string()), + super::error("InvalidAppData", err.to_string()), + ) + .into_response(), + AddOrderError::InvalidReplacement(err) => err.into_response(), + AddOrderError::MetadataSerializationFailed(err) => ( StatusCode::INTERNAL_SERVER_ERROR, - ), + super::error("MetadataSerializationFailed", err.to_string()), + ) + .into_response(), } } } -impl IntoWarpReply for OrderReplacementError { - fn into_warp_reply(self) -> super::ApiReply { +impl IntoResponse for OrderReplacementError { + fn into_response(self) -> Response { match self { - OrderReplacementError::InvalidSignature => with_status( - super::error("InvalidSignature", "Malformed signature"), + OrderReplacementError::InvalidSignature => ( StatusCode::BAD_REQUEST, - ), - OrderReplacementError::WrongOwner => with_status( - super::error("WrongOwner", "Old and new orders have different signers"), + super::error("InvalidSignature", "Malformed signature"), + ) + .into_response(), + OrderReplacementError::WrongOwner => ( StatusCode::UNAUTHORIZED, - ), - OrderReplacementError::OldOrderActivelyBidOn => with_status( + super::error("WrongOwner", "Old and new orders have different signers"), + ) + .into_response(), + OrderReplacementError::OldOrderActivelyBidOn => ( + StatusCode::BAD_REQUEST, super::error( "OldOrderActivelyBidOn", "The old order is actively beign bid on in recent auctions", ), - StatusCode::BAD_REQUEST, - ), + ) + .into_response(), OrderReplacementError::Other(err) => { tracing::error!(?err, "replace_order"); crate::api::internal_error_reply() @@ -293,70 +343,20 @@ impl IntoWarpReply for OrderReplacementError { } } -pub fn create_order_response( - result: Result<(OrderUid, Option), AddOrderError>, -) -> ApiReply { - match result { - Ok((uid, _)) => with_status(warp::reply::json(&uid), StatusCode::CREATED), - Err(err) => err.into_warp_reply(), - } -} - -pub fn post_order( - orderbook: Arc, -) -> impl Filter + Clone { - create_order_request().and_then(move |order: OrderCreation| { - let orderbook = orderbook.clone(); - async move { - let result = orderbook - .add_order(order.clone()) - .await - .map(|(order_uid, quote_metadata)| { - let quote_id = quote_metadata.as_ref().and_then(|q| q.id); - let quote_solver = quote_metadata.as_ref().map(|q| q.solver); - tracing::debug!(%order_uid, ?quote_id, ?quote_solver, "order created"); - (order_uid, quote_metadata.and_then(|quote| quote.id)) - }) - .inspect_err(|err| { - tracing::debug!(?order, ?err, "error creating order"); - }); - - Result::<_, Infallible>::Ok(create_order_response(result)) - } - }) -} - #[cfg(test)] mod tests { - use { - super::*, - crate::api::response_body, - model::order::{OrderCreation, OrderUid}, - serde_json::json, - warp::{Reply, test::request}, - }; + use {super::*, crate::api::response_body, model::order::OrderUid, serde_json::json}; - #[tokio::test] - async fn create_order_request_ok() { - let filter = create_order_request(); - let order_payload = OrderCreation::default(); - let request = request() - .path("/v1/orders") - .method("POST") - .header("content-type", "application/json") - .json(&order_payload); - let result = request.filter(&filter).await.unwrap(); - assert_eq!(result, order_payload); - } + type Result = std::result::Result<(StatusCode, Json), AddOrderError>; #[tokio::test] async fn create_order_response_created() { let uid = OrderUid([1u8; 56]); - let response = create_order_response(Ok((uid, Some(42)))).into_response(); + let response = Result::Ok((StatusCode::CREATED, Json(uid))).into_response(); assert_eq!(response.status(), StatusCode::CREATED); let body = response_body(response).await; let body: serde_json::Value = serde_json::from_slice(body.as_slice()).unwrap(); - let expected= json!( + let expected = json!( "0x0101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101" ); assert_eq!(body, expected); @@ -364,7 +364,7 @@ mod tests { #[tokio::test] async fn create_order_response_duplicate() { - let response = create_order_response(Err(AddOrderError::DuplicatedOrder)).into_response(); + let response = Result::Err(AddOrderError::DuplicatedOrder).into_response(); assert_eq!(response.status(), StatusCode::BAD_REQUEST); let body = response_body(response).await; let body: serde_json::Value = serde_json::from_slice(body.as_slice()).unwrap(); diff --git a/crates/orderbook/src/api/post_quote.rs b/crates/orderbook/src/api/post_quote.rs index 57cc81c680..90febb2575 100644 --- a/crates/orderbook/src/api/post_quote.rs +++ b/crates/orderbook/src/api/post_quote.rs @@ -1,80 +1,72 @@ use { super::post_order::{AppDataValidationErrorWrapper, PartialValidationErrorWrapper}, crate::{ - api::{self, ApiReply, IntoWarpReply, convert_json_response, error, rich_error}, - quoter::{OrderQuoteError, QuoteHandler}, + api::{AppState, error, rich_error}, + quoter::OrderQuoteError, + }, + axum::{ + Json, + body, + extract::State, + response::{IntoResponse, Response}, }, - anyhow::Result, model::quote::OrderQuoteRequest, reqwest::StatusCode, shared::order_quoting::CalculateQuoteError, - std::{convert::Infallible, sync::Arc}, - thiserror::Error, - warp::{Filter, Rejection}, + std::sync::Arc, }; -fn post_quote_request() -> impl Filter + Clone { - warp::path!("v1" / "quote") - .and(warp::post()) - .and(api::extract_payload()) -} +pub async fn post_quote_handler(State(state): State>, body: body::Bytes) -> Response { + // TODO: remove after all downstream callers have been notified of the status + // code changes + let Ok(request) = serde_json::from_slice::(&body) else { + return StatusCode::BAD_REQUEST.into_response(); + }; -pub fn post_quote( - quotes: Arc, -) -> impl Filter + Clone { - post_quote_request().and_then(move |request: OrderQuoteRequest| { - let quotes = quotes.clone(); - async move { - let result = quotes - .calculate_quote(&request) - .await - .map_err(OrderQuoteErrorWrapper); - if let Err(err) = &result { - tracing::warn!(%err, ?request, "post_quote error"); - } - Result::<_, Infallible>::Ok(convert_json_response(result)) - } - }) + state + .quotes + .calculate_quote(&request) + .await + .map(Json) + .inspect_err(|err| tracing::warn!(%err, ?request, "post_quote error")) + .into_response() } -#[derive(Debug, Error)] -#[error(transparent)] -pub struct OrderQuoteErrorWrapper(pub OrderQuoteError); -impl IntoWarpReply for OrderQuoteErrorWrapper { - fn into_warp_reply(self) -> ApiReply { - match self.0 { - OrderQuoteError::AppData(err) => AppDataValidationErrorWrapper(err).into_warp_reply(), - OrderQuoteError::Order(err) => PartialValidationErrorWrapper(err).into_warp_reply(), - OrderQuoteError::CalculateQuote(err) => { - CalculateQuoteErrorWrapper(err).into_warp_reply() - } +impl IntoResponse for OrderQuoteError { + fn into_response(self) -> Response { + match self { + OrderQuoteError::AppData(err) => AppDataValidationErrorWrapper(err).into_response(), + OrderQuoteError::Order(err) => PartialValidationErrorWrapper(err).into_response(), + OrderQuoteError::CalculateQuote(err) => CalculateQuoteErrorWrapper(err).into_response(), } } } pub struct CalculateQuoteErrorWrapper(CalculateQuoteError); -impl IntoWarpReply for CalculateQuoteErrorWrapper { - fn into_warp_reply(self) -> ApiReply { +impl IntoResponse for CalculateQuoteErrorWrapper { + fn into_response(self) -> Response { match self.0 { - CalculateQuoteError::Price { source, .. } => source.into_warp_reply(), - CalculateQuoteError::SellAmountDoesNotCoverFee { fee_amount } => { - warp::reply::with_status( - rich_error( - "SellAmountDoesNotCoverFee", - "The sell amount for the sell order is lower than the fee.", - serde_json::json!({ "fee_amount": fee_amount }), - ), - StatusCode::BAD_REQUEST, - ) + CalculateQuoteError::Price { source, .. } => { + super::PriceEstimationErrorWrapper(source).into_response() } - CalculateQuoteError::QuoteNotVerified => warp::reply::with_status( + CalculateQuoteError::SellAmountDoesNotCoverFee { fee_amount } => ( + StatusCode::BAD_REQUEST, + rich_error( + "SellAmountDoesNotCoverFee", + "The sell amount for the sell order is lower than the fee.", + serde_json::json!({ "fee_amount": fee_amount }), + ), + ) + .into_response(), + CalculateQuoteError::QuoteNotVerified => ( + StatusCode::BAD_REQUEST, error( "QuoteNotVerified", "No quote for this trade could be verified to be accurate. Orders for this \ trade will likely not be executed.", ), - StatusCode::BAD_REQUEST, - ), + ) + .into_response(), CalculateQuoteError::Other(err) => { tracing::error!(?err, "CalculateQuoteErrorWrapper"); crate::api::internal_error_reply() @@ -110,7 +102,6 @@ mod tests { serde_json::json, shared::order_quoting::CalculateQuoteError, std::{str::FromStr, time::Duration}, - warp::{Reply, test::request}, }; #[test] @@ -270,32 +261,6 @@ mod tests { ); } - #[tokio::test] - async fn post_quote_request_ok() { - let filter = post_quote_request(); - let request_payload = OrderQuoteRequest::default(); - let request = request() - .path("/v1/quote") - .method("POST") - .header("content-type", "application/json") - .json(&request_payload); - let result = request.filter(&filter).await.unwrap(); - assert_eq!(result, request_payload); - } - - #[tokio::test] - async fn post_quote_request_err() { - let filter = post_quote_request(); - let request_payload = OrderQuoteRequest::default(); - // Path is wrong! - let request = request() - .path("/v1/fee_quote") - .method("POST") - .header("content-type", "application/json") - .json(&request_payload); - assert!(request.filter(&filter).await.is_err()); - } - #[tokio::test] async fn post_quote_response_ok() { let quote = OrderQuote { @@ -324,10 +289,7 @@ mod tests { verified: false, protocol_fee_bps: Some("2".to_string()), }; - let response = convert_json_response::(Ok( - order_quote_response.clone(), - )) - .into_response(); + let response = (StatusCode::OK, Json(order_quote_response.clone())).into_response(); assert_eq!(response.status(), StatusCode::OK); let body = response_body(response).await; let body: serde_json::Value = serde_json::from_slice(body.as_slice()).unwrap(); @@ -337,12 +299,9 @@ mod tests { #[tokio::test] async fn post_quote_response_err() { - let response = convert_json_response::(Err( - OrderQuoteErrorWrapper(OrderQuoteError::CalculateQuote(CalculateQuoteError::Other( - anyhow!("Uh oh - error"), - ))), - )) - .into_response(); + let response = + OrderQuoteError::CalculateQuote(CalculateQuoteError::Other(anyhow!("Uh oh - error"))) + .into_response(); assert_eq!(response.status(), StatusCode::INTERNAL_SERVER_ERROR); let body = response_body(response).await; let body: serde_json::Value = serde_json::from_slice(body.as_slice()).unwrap(); diff --git a/crates/orderbook/src/api/put_app_data.rs b/crates/orderbook/src/api/put_app_data.rs index 4bf9e417d2..835628fa44 100644 --- a/crates/orderbook/src/api/put_app_data.rs +++ b/crates/orderbook/src/api/put_app_data.rs @@ -1,69 +1,81 @@ use { - crate::api::{IntoWarpReply, internal_error_reply}, - anyhow::Result, + crate::api::{AppState, internal_error_reply}, app_data::{AppDataDocument, AppDataHash}, - reqwest::StatusCode, - std::{convert::Infallible, sync::Arc}, - warp::{Filter, Rejection, body, reply}, + axum::{ + body::{self}, + extract::{Path, State}, + http::StatusCode, + response::{IntoResponse, Json, Response}, + }, + std::{str::FromStr, sync::Arc}, }; -fn request( - max_size: usize, -) -> impl Filter, AppDataDocument), Error = Rejection> + Clone { - let opt = warp::path::param::() - .map(Some) - .or_else(|_| async { Ok::<(Option,), std::convert::Infallible>((None,)) }); - warp::path!("v1" / "app_data" / ..) - .and(opt) - .and(warp::put()) - .and(body::content_length_limit(max_size as _)) - .and(body::json()) +pub async fn put_app_data_without_hash( + State(state): State>, + body: body::Bytes, +) -> Response { + // TODO: remove after all downstream callers have been notified of the status + // code changes + let Ok(document) = serde_json::from_slice::(&body) else { + return StatusCode::BAD_REQUEST.into_response(); + }; + + state + .app_data + .register(None, document.full_app_data.as_bytes()) + .await + .into_response() } -fn response( - result: Result<(crate::app_data::Registered, AppDataHash), crate::app_data::RegisterError>, -) -> super::ApiReply { - match result { - Ok((registered, hash)) => { - let status = match registered { - crate::app_data::Registered::New => StatusCode::CREATED, - crate::app_data::Registered::AlreadyExisted => StatusCode::OK, - }; - reply::with_status(reply::json(&hash), status) - } - Err(err) => err.into_warp_reply(), - } +pub async fn put_app_data_with_hash( + State(state): State>, + Path(hash): Path, + body: body::Bytes, +) -> Response { + // TODO: remove after all downstream callers have been notified of the status + // code changes + let Ok(hash) = AppDataHash::from_str(&hash) else { + return StatusCode::NOT_FOUND.into_response(); + }; + let Ok(document) = serde_json::from_slice::(&body) else { + return StatusCode::BAD_REQUEST.into_response(); + }; + + state + .app_data + .register(Some(hash), document.full_app_data.as_bytes()) + .await + .into_response() } -pub fn filter( - registry: Arc, -) -> impl Filter + Clone { - request(registry.size_limit()).and_then(move |hash, document: AppDataDocument| { - let registry = registry.clone(); - async move { - let result = registry - .register(hash, document.full_app_data.as_bytes()) - .await; - Result::<_, Infallible>::Ok(response(result)) - } - }) +impl IntoResponse for crate::app_data::Register { + fn into_response(self) -> Response { + let status = match self.status { + crate::app_data::RegistrationStatus::New => StatusCode::CREATED, + crate::app_data::RegistrationStatus::AlreadyExisted => StatusCode::OK, + }; + (status, Json(self.hash)).into_response() + } } -impl IntoWarpReply for crate::app_data::RegisterError { - fn into_warp_reply(self) -> super::ApiReply { +impl IntoResponse for crate::app_data::RegisterError { + fn into_response(self) -> Response { match self { - Self::Invalid(err) => reply::with_status( + Self::Invalid(err) => ( + StatusCode::BAD_REQUEST, super::error("AppDataInvalid", err.to_string()), + ) + .into_response(), + err @ Self::HashMismatch { .. } => ( StatusCode::BAD_REQUEST, - ), - err @ Self::HashMismatch { .. } => reply::with_status( super::error("AppDataHashMismatch", err.to_string()), + ) + .into_response(), + err @ Self::DataMismatch { .. } => ( StatusCode::BAD_REQUEST, - ), - err @ Self::DataMismatch { .. } => reply::with_status( super::error("AppDataMismatch", err.to_string()), - StatusCode::BAD_REQUEST, - ), + ) + .into_response(), Self::Other(err) => { tracing::error!(?err, "app_data::SaveError::Other"); internal_error_reply() diff --git a/crates/orderbook/src/api/version.rs b/crates/orderbook/src/api/version.rs index 04b5c698c5..20489608b7 100644 --- a/crates/orderbook/src/api/version.rs +++ b/crates/orderbook/src/api/version.rs @@ -1,16 +1,3 @@ -use { - reqwest::StatusCode, - std::convert::Infallible, - warp::{Filter, Rejection, Reply, reply::with_status}, -}; - -pub fn version() -> impl Filter,), Error = Rejection> + Clone { - warp::path!("v1" / "version") - .and(warp::get()) - .and_then(|| async { - Result::<_, Infallible>::Ok(Box::new(with_status( - env!("VERGEN_GIT_DESCRIBE"), - StatusCode::OK, - )) as Box) - }) +pub async fn version_handler() -> &'static str { + env!("VERGEN_GIT_DESCRIBE") } diff --git a/crates/orderbook/src/app_data.rs b/crates/orderbook/src/app_data.rs index 9844df4bac..bd6326f174 100644 --- a/crates/orderbook/src/app_data.rs +++ b/crates/orderbook/src/app_data.rs @@ -43,7 +43,7 @@ impl Registry { &self, hash: Option, document: &[u8], - ) -> Result<(Registered, AppDataHash), RegisterError> { + ) -> Result { let validated = self .validator .validate(document) @@ -60,8 +60,14 @@ impl Registry { .insert_full_app_data(&validated.hash, &validated.document) .await { - Ok(()) => Ok((Registered::New, validated.hash)), - Err(InsertError::Duplicate) => Ok((Registered::AlreadyExisted, validated.hash)), + Ok(()) => Ok(Register { + status: RegistrationStatus::New, + hash: validated.hash, + }), + Err(InsertError::Duplicate) => Ok(Register { + status: RegistrationStatus::AlreadyExisted, + hash: validated.hash, + }), Err(InsertError::Mismatch(existing)) => Err(RegisterError::DataMismatch { existing }), Err(InsertError::Other(err)) => Err(RegisterError::Other(err)), } @@ -95,13 +101,18 @@ impl Registry { } #[derive(Debug)] -pub enum Registered { +pub enum RegistrationStatus { /// The app data was newly added to the registry. New, /// An identical app data was already registered. AlreadyExisted, } +pub struct Register { + pub status: RegistrationStatus, + pub hash: AppDataHash, +} + #[derive(Debug, thiserror::Error)] pub enum RegisterError { #[error("appData is invalid: {0}")] diff --git a/crates/orderbook/src/run.rs b/crates/orderbook/src/run.rs index 981dfe3357..e6f9fa0f42 100644 --- a/crates/orderbook/src/run.rs +++ b/crates/orderbook/src/run.rs @@ -22,7 +22,7 @@ use { WETH9, support::Balances, }, - futures::{FutureExt, StreamExt}, + futures::StreamExt, model::{DomainSeparator, order::BUY_ETH_ADDRESS}, num::ToPrimitive, observe::metrics::{DEFAULT_METRICS_PORT, serve_metrics}, @@ -53,9 +53,8 @@ use { sources::{self, BaselineSource, uniswap_v2::UniV2BaselineSourceParameters}, token_info::{CachedTokenInfoFetcher, TokenInfoFetcher}, }, - std::{convert::Infallible, future::Future, net::SocketAddr, sync::Arc, time::Duration}, + std::{future::Future, net::SocketAddr, sync::Arc, time::Duration}, tokio::task::{self, JoinHandle}, - warp::Filter, }; pub async fn start(args: impl Iterator) { @@ -448,17 +447,15 @@ pub async fn run(args: Arguments) { )); check_database_connection(orderbook.as_ref()).await; - let quotes = Arc::new( - QuoteHandler::new( - order_validator, - optimal_quoter, - app_data.clone(), - args.volume_fee_config, - args.shared.volume_fee_bucket_overrides.clone(), - args.shared.enable_sell_equals_buy_volume_fee, - ) - .with_fast_quoter(fast_quoter), - ); + let quotes = QuoteHandler::new( + order_validator, + optimal_quoter, + app_data.clone(), + args.volume_fee_config, + args.shared.volume_fee_bucket_overrides.clone(), + args.shared.enable_sell_equals_buy_volume_fee, + ) + .with_fast_quoter(fast_quoter); let (shutdown_sender, shutdown_receiver) = tokio::sync::oneshot::channel(); let serve_api = serve_api( @@ -539,14 +536,14 @@ fn serve_api( database: Postgres, database_replica: Postgres, orderbook: Arc, - quotes: Arc, + quotes: QuoteHandler, app_data: Arc, address: SocketAddr, shutdown_receiver: impl Future + Send + 'static, native_price_estimator: Arc, quote_timeout: Duration, ) -> JoinHandle<()> { - let filter = api::handle_all_routes( + let app = api::handle_all_routes( database, database_replica, orderbook, @@ -554,19 +551,18 @@ fn serve_api( app_data, native_price_estimator, quote_timeout, - ) - .boxed(); + ); tracing::info!(%address, "serving order book"); - let warp_svc = warp::service(filter); - let make_svc = hyper::service::make_service_fn(move |_| { - let svc = warp_svc.clone(); - async move { Ok::<_, Infallible>(svc) } - }); - let server = hyper::Server::bind(&address) - .serve(make_svc) - .with_graceful_shutdown(shutdown_receiver) - .map(|_| ()); - task::spawn(server) + + let server = axum::Server::bind(&address) + .serve(app.into_make_service()) + .with_graceful_shutdown(shutdown_receiver); + + task::spawn(async move { + if let Err(err) = server.await { + tracing::error!(?err, "server error"); + } + }) } /// Check that important constants such as the EIP 712 Domain Separator and diff --git a/crates/orderbook/src/solver_competition.rs b/crates/orderbook/src/solver_competition.rs index 9cd1c2c2e0..1e852019af 100644 --- a/crates/orderbook/src/solver_competition.rs +++ b/crates/orderbook/src/solver_competition.rs @@ -24,7 +24,7 @@ pub trait SolverCompetitionStoring: Send + Sync { async fn load_competition( &self, identifier: Identifier, - ) -> Result; + ) -> Result; /// Retrieves the solver competition for the most recent auction. /// diff --git a/crates/solvers/src/api/routes/healthz.rs b/crates/solvers/src/api/routes/healthz.rs index 7e39e585dc..fc34fe28c8 100644 --- a/crates/solvers/src/api/routes/healthz.rs +++ b/crates/solvers/src/api/routes/healthz.rs @@ -1,5 +1,8 @@ -use axum::{http::StatusCode, response::IntoResponse}; +use axum::{ + http::StatusCode, + response::{IntoResponse, Response}, +}; -pub async fn healthz() -> impl IntoResponse { - StatusCode::OK +pub async fn healthz() -> Response { + StatusCode::OK.into_response() } diff --git a/playground/docker-compose.non-interactive.yml b/playground/docker-compose.non-interactive.yml index c9d5fca62f..356fc8652b 100644 --- a/playground/docker-compose.non-interactive.yml +++ b/playground/docker-compose.non-interactive.yml @@ -115,7 +115,7 @@ services: - NODE_URL=http://chain:8545 - SIMULATION_NODE_URL=http://chain:8545 - SETTLE_INTERVAL=15s - - GAS_ESTIMATORS=Web3 + - GAS_ESTIMATORS=Web3,Alloy - PRICE_ESTIMATORS=None - NATIVE_PRICE_ESTIMATORS=baseline - BLOCK_STREAM_POLL_INTERVAL=1s From 6bd3e6f3730a57a189c070c71acae68075412762 Mon Sep 17 00:00:00 2001 From: "Jan [Yann]" <4518474+fafk@users.noreply.github.com> Date: Fri, 13 Feb 2026 10:55:37 +0100 Subject: [PATCH 053/219] Propagate request ID from Forwarder (#4150) # Description I wanted to get all logs for a quote request, but when I searched for request ID in OpenSearch I only got requests from the orderbook but none from the autopilot which should have shown native price fetching. This PR adds request ID propagation to the forwarder. --- .../shared/src/price_estimation/native/forwarder.rs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/crates/shared/src/price_estimation/native/forwarder.rs b/crates/shared/src/price_estimation/native/forwarder.rs index 4d6aaa72d4..28089b9a1a 100644 --- a/crates/shared/src/price_estimation/native/forwarder.rs +++ b/crates/shared/src/price_estimation/native/forwarder.rs @@ -35,14 +35,15 @@ impl Forwarder { .join(format!("native_price/{:?}", token).as_str()) .context("failed to construct autopilot URL")?; - let response = self + let mut request = self .client .get(url) .query(&[("timeout_ms", timeout.as_millis() as u64)]) - .timeout(timeout) - .send() - .await - .context("failed to send request")?; + .timeout(timeout); + if let Some(id) = observe::distributed_tracing::request_id::from_current_span() { + request = request.header("X-REQUEST-ID", id); + } + let response = request.send().await.context("failed to send request")?; match response.status() { StatusCode::OK => { From 4e243385822417340b09fabecf880d9791742201 Mon Sep 17 00:00:00 2001 From: Aryan Godara <65490434+AryanGodara@users.noreply.github.com> Date: Fri, 13 Feb 2026 15:44:58 +0530 Subject: [PATCH 054/219] Add openAPI reference to ink (#4111) # Description Added Ink staging and prod URLs for OpenAPI spec --- crates/orderbook/openapi.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/crates/orderbook/openapi.yml b/crates/orderbook/openapi.yml index 9434a62d98..b47831b118 100644 --- a/crates/orderbook/openapi.yml +++ b/crates/orderbook/openapi.yml @@ -43,6 +43,10 @@ servers: url: "https://api.cow.fi/plasma" - description: Plasma (Staging) url: "https://barn.api.cow.fi/plasma" + - description: Ink (Prod) + url: "https://api.cow.fi/ink" + - description: Ink (Staging) + url: "https://barn.api.cow.fi/ink" - description: Sepolia (Prod) url: "https://api.cow.fi/sepolia" - description: Sepolia (Staging) From d28b2a019f6a524d344c263bc7d304306c8ec2cf Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Fri, 13 Feb 2026 11:29:32 +0100 Subject: [PATCH 055/219] Don't re-hash appdata every time (#4152) # Description A surprising amount of time when building a new auction is lost when assembling the final orders. This step involves inspecting the appdata and recovering the partner fee from that. The current implementation has 2 major flaws: * it re-parses the appdata during every auction * it parses AND hashes the data in every auction While the ideal fix would be to only parse the appdata once this is more involved and we can already get an easy win by just omitting the hashing part. I'll follow up with a PR that avoids the unnecessary parsing as well. # Changes - don't validate the appdata only parse it since that skips the re-hashing of the data which we don't need at this point ## How to test Measured performance with tempo `assemble_orders` went from 88ms to 50ms Before Screenshot 2026-02-13 at 08 22 33 After Screenshot 2026-02-13 at 08 22 40 --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- crates/autopilot/src/domain/fee/mod.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/crates/autopilot/src/domain/fee/mod.rs b/crates/autopilot/src/domain/fee/mod.rs index fcbea4ba92..966fdda141 100644 --- a/crates/autopilot/src/domain/fee/mod.rs +++ b/crates/autopilot/src/domain/fee/mod.rs @@ -13,7 +13,6 @@ use { domain::{self, eth}, }, alloy::primitives::{Address, U256}, - app_data::Validator, chrono::{DateTime, Utc}, rust_decimal::Decimal, shared::{ @@ -164,14 +163,13 @@ impl ProtocolFees { let Some(full_app_data) = order.metadata.full_app_data.as_ref() else { return vec![]; }; - let Ok(validated) = Validator::new(usize::MAX).validate(full_app_data.as_bytes()) else { + let Ok(parsed_app_data) = app_data::parse(full_app_data.as_bytes()) else { return vec![]; }; let mut accumulated = Decimal::ZERO; - validated - .protocol + parsed_app_data .partner_fee .iter() .map(move |partner_fee| { From bf0f77851c876ac2d5e28c6ddac9c4ff33aa298f Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Fri, 13 Feb 2026 12:08:49 +0100 Subject: [PATCH 056/219] Move storing order events fully off the hot path (#4149) # Description While building a new auction we also store a bunch of order events for debugging purposes. This already happens mostly in a background task but the conversion from the owned collection to a collection of the type the persistence layer expects still happens on the hot path. # Changes Introduce a second function for storing order events which takes a collection (`impl IntoIterator`) and a conversion function. This then allows us to move the collection as is into the background task and do the conversion there. ## How to test I measured the optimization using tempo but the exact numbers are a bit hard to nail down because only some of the instrumented logic is on the hot path and it doesn't get displayed that well in the tempo UI. Based on the time and size of the `assemble_orders` span it looks like the new version shaves off ~45ms or so. Before Screenshot 2026-02-13 at 07 51 15 After Screenshot 2026-02-13 at 07 51 27 --- crates/autopilot/src/infra/persistence/mod.rs | 19 ++++++++++++++++++- crates/autopilot/src/solvable_orders.rs | 12 ++++++------ 2 files changed, 24 insertions(+), 7 deletions(-) diff --git a/crates/autopilot/src/infra/persistence/mod.rs b/crates/autopilot/src/infra/persistence/mod.rs index 3f3d0b2f5f..207f8d645d 100644 --- a/crates/autopilot/src/infra/persistence/mod.rs +++ b/crates/autopilot/src/infra/persistence/mod.rs @@ -294,10 +294,27 @@ impl Persistence { order_uids: impl IntoIterator, label: boundary::OrderEventLabel, ) { + let order_uids: Vec<_> = order_uids.into_iter().collect(); + self.store_order_events_owned(order_uids, std::convert::identity, label); + } + + /// A variants of [`store_order_events`] where [`items`] is already an owned + /// collection which allows us to move the logic to convert an item to a + /// [`domain::OrderUid`] into the background task as well. + pub fn store_order_events_owned( + &self, + items: I, + convert: F, + label: boundary::OrderEventLabel, + ) where + I: IntoIterator + Send + 'static, + I::Item: Send, + F: (Fn(I::Item) -> domain::OrderUid) + Send + 'static, + { let db = self.postgres.clone(); - let order_uids = order_uids.into_iter().collect(); tokio::spawn( async move { + let order_uids = items.into_iter().map(convert).collect(); match db.pool.acquire().await { Ok(mut tx) => { store_order_events(&mut tx, order_uids, label, Utc::now()).await; diff --git a/crates/autopilot/src/solvable_orders.rs b/crates/autopilot/src/solvable_orders.rs index bb2bd94d55..46a5c761ba 100644 --- a/crates/autopilot/src/solvable_orders.rs +++ b/crates/autopilot/src/solvable_orders.rs @@ -266,14 +266,14 @@ impl SolvableOrdersCache { if store_events { // spawning a background task since `order_events` table insert operation takes // a while and the result is ignored. - self.persistence.store_order_events( - invalid_order_uids.iter().map(|id| domain::OrderUid(id.0)), + self.persistence.store_order_events_owned( + invalid_order_uids, + |uid| domain::OrderUid(uid.0), OrderEventLabel::Invalid, ); - self.persistence.store_order_events( - filtered_order_events - .iter() - .map(|id| domain::OrderUid(id.0)), + self.persistence.store_order_events_owned( + filtered_order_events, + |uid| domain::OrderUid(uid.0), OrderEventLabel::Filtered, ); } From 49d8b6e3b2865c644c2902e346d88ac7bbae9d44 Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Fri, 13 Feb 2026 12:08:57 +0100 Subject: [PATCH 057/219] Make autopilot sync more granular (#4144) # Description While https://github.com/cowprotocol/services/pull/4141 achieved the goal of reducing the time before we build a fresh auction it also came with the oddity that the metrics suggest slower order execution while the time to happy moo SLI was very healthy. The reason for the weird metrics is that the autopilot task that monitors for landed transactions previously waited for ALL of the maintenance task to finish before looking up the settlement from the DB (see [here](https://github.com/cowprotocol/services/blob/main/crates/autopilot/src/run_loop.rs#L843-L850)) while the PR unintentionally changed this part as well to only wait for the minimum processing. Improved latency metrics Screenshot 2026-02-11 at 16 15 19 Degraded runloop and settle metrics Screenshot 2026-02-11 at 16 15 27 Despite not degraded time to happy moo Screenshot 2026-02-11 at 16 16 14 # Changes Fix metrics by allowing `MaintenanceSync` to either wait for partial or full processing of the given block. Before building the auction we now explicitly await partial processing to be done while later on when we determine whether a solution was submitted we wait for the block to be fully processed. # Alternatives Considered * have 1 update channel for every maintenance sub task and have it push updates => discarded because it's more granular than we need * replace watch stream with struct with atomic counters for every update type => discarded because awaiting changes on those counters is more complicated than just 2 watch streams Overall I'm not super happy with the solution so I'm happy to hear alternative suggestions if you have any. --- crates/autopilot/src/maintenance.rs | 54 ++++++++++++++++++++++------- crates/autopilot/src/run_loop.rs | 29 ++++++---------- 2 files changed, 52 insertions(+), 31 deletions(-) diff --git a/crates/autopilot/src/maintenance.rs b/crates/autopilot/src/maintenance.rs index b279ea5c20..f17e66abf6 100644 --- a/crates/autopilot/src/maintenance.rs +++ b/crates/autopilot/src/maintenance.rs @@ -40,25 +40,42 @@ use { pub struct MaintenanceSync { /// How long the autopilot wants to wait at most. timeout: Duration, - last_processed_block: watch::Receiver, + /// This is the last block where essential processing like indexing events + /// was completed. + partially_processed_block: watch::Receiver, + /// This is the last block that has been fully processed. + fully_processed_block: watch::Receiver, +} + +pub enum SyncTarget { + /// Essential processing (e.g. event indexing) of the given block is + /// sufficient, + PartiallyProcessed(u64), + /// The given block as to be fully processed. + FullyProcessed(u64), } impl MaintenanceSync { - pub async fn wait_until_block_processed(&self, block: u64) { + pub async fn wait_until_block_processed(&self, target: SyncTarget) { let _timer = observe::metrics::metrics() .on_auction_overhead_start("autopilot", "wait_for_maintenance"); - if let Err(_timeout) = tokio::time::timeout(self.timeout, self.wait_inner(block)).await { + if let Err(_timeout) = tokio::time::timeout(self.timeout, self.wait_inner(target)).await { tracing::debug!("timed out waiting for maintenance"); } } - async fn wait_inner(&self, target_block: u64) { - if *self.last_processed_block.borrow() >= target_block { + async fn wait_inner(&self, target: SyncTarget) { + let (relevant_updates, target_block) = match target { + SyncTarget::FullyProcessed(block) => (&self.partially_processed_block, block), + SyncTarget::PartiallyProcessed(block) => (&self.fully_processed_block, block), + }; + + if *relevant_updates.borrow() >= target_block { return; } - let mut stream = WatchStream::new(self.last_processed_block.clone()); + let mut stream = WatchStream::new(relevant_updates.clone()); loop { let processed_block = stream.next().await.unwrap(); if processed_block >= target_block { @@ -110,7 +127,8 @@ impl Maintenance { blocks: CurrentBlockWatcher, timeout: Duration, ) -> MaintenanceSync { - let (sender, receiver) = watch::channel(blocks.borrow().number); + let (full_tx, full_rx) = watch::channel(blocks.borrow().number); + let (partial_tx, partial_rx) = watch::channel(blocks.borrow().number); tokio::task::spawn(async move { let mut stream = into_stream(blocks); @@ -119,7 +137,7 @@ impl Maintenance { .next() .await .expect("block stream terminated unexpectedly"); - self.index_until_block(block, &sender) + self.index_until_block(block, &partial_tx, &full_tx) .instrument(tracing::info_span!( "autopilot_maintenance", block = block.number @@ -129,12 +147,18 @@ impl Maintenance { }); MaintenanceSync { - last_processed_block: receiver, + partially_processed_block: partial_rx, + fully_processed_block: full_rx, timeout, } } - async fn index_until_block(&self, block: BlockInfo, last_processed_block: &watch::Sender) { + async fn index_until_block( + &self, + block: BlockInfo, + partially_processed_block: &watch::Sender, + fully_processed_block: &watch::Sender, + ) { metrics().last_seen_block.set(block.number); let start = Instant::now(); @@ -150,8 +174,11 @@ impl Maintenance { ); metrics().last_updated_block.set(block.number); metrics().updates.with_label_values(&["success"]).inc(); - if let Err(err) = last_processed_block.send(block.number) { - tracing::warn!(?err, "nobody listening for processed blocks anymore"); + if let Err(err) = partially_processed_block.send(block.number) { + tracing::warn!( + ?err, + "nobody listening for partially processed blocks anymore" + ); } // only after we informed the run_loop that the essential updates are done we @@ -161,6 +188,9 @@ impl Maintenance { tracing::warn!(?err, "failed to run optional maintenance"); return; } + if let Err(err) = fully_processed_block.send(block.number) { + tracing::warn!(?err, "nobody listening for fully processed blocks anymore"); + } tracing::info!( time = ?start.elapsed(), "successfully ran optional maintenance tasks" diff --git a/crates/autopilot/src/run_loop.rs b/crates/autopilot/src/run_loop.rs index 7a8a0be012..7e36d04e68 100644 --- a/crates/autopilot/src/run_loop.rs +++ b/crates/autopilot/src/run_loop.rs @@ -19,7 +19,7 @@ use { solvers::dto::{settle, solve}, }, leader_lock_tracker::LeaderLockTracker, - maintenance::MaintenanceSync, + maintenance::{MaintenanceSync, SyncTarget}, run::Liveness, shutdown_controller::ShutdownController, solvable_orders::SolvableOrdersCache, @@ -208,7 +208,12 @@ impl RunLoop { current_block }; - self.run_maintenance(&auction_block).await; + { + let _timer = Metrics::get().service_maintenance_time.start_timer(); + self.maintenance + .wait_until_block_processed(SyncTarget::PartiallyProcessed(auction_block.number)) + .await; + } match self .solvable_orders_cache @@ -254,16 +259,6 @@ impl RunLoop { Some(auction) } - /// Runs maintenance on all components to ensure the system uses - /// the latest available state. - async fn run_maintenance(&self, block: &BlockInfo) { - let start = Instant::now(); - self.maintenance - .wait_until_block_processed(block.number) - .await; - Metrics::ran_maintenance(start.elapsed()); - } - async fn cut_auction(&self) -> Option { let Some(auction) = self.solvable_orders_cache.current_auction().await else { tracing::debug!("no current auction"); @@ -842,7 +837,9 @@ impl RunLoop { let block = ethrpc::block_stream::next_block(self.eth.current_block()).await; // Run maintenance to ensure the system processed the last available block so // it's possible to find the tx in the DB in the next line. - self.run_maintenance(&block).await; + self.maintenance + .wait_until_block_processed(SyncTarget::FullyProcessed(block.number)) + .await; match self .persistence @@ -1066,12 +1063,6 @@ impl Metrics { .observe(elapsed.as_secs_f64()); } - fn ran_maintenance(elapsed: Duration) { - Self::get() - .service_maintenance_time - .observe(elapsed.as_secs_f64()); - } - fn single_run_completed(elapsed: Duration) { Self::get().single_run_time.observe(elapsed.as_secs_f64()); } From eb99f9920ef926757fe7cb4ac117b1119fb3566c Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Fri, 13 Feb 2026 12:09:40 +0100 Subject: [PATCH 058/219] Store orders and quotes reference counted in auction (#4153) # Description Currently the solvable orders cache stores raw owned instances of orders and quotes. This is painful for 2 reasons: * `solvable_orders_after()` clones all orders and quotes to do the incremental update (see [here](https://github.com/cowprotocol/services/blob/main/crates/autopilot/src/solvable_orders.rs#L377-L378)) * `update()` clones all orders at the start (see [here](https://github.com/cowprotocol/services/blob/main/crates/autopilot/src/solvable_orders.rs#L172-L176)) Both of these can be made very cheap by storing `Arc`ed instances. # Alternatives considered Theoretically one could also collect `&Order` when building the auction and use `std::mem::take()` to move the existing hashmaps into `solvable_orders_after()`. But using `&Order` is less flexible and more complicated and `std::mem::take()` requires extra attention when handling errors in `solvable_orders_after()` to not leave the cache in a broken state or completely empty. # Changes - put all orders and quotes in `Arc`s. ## How to test I only tested the approach with using `&Order` but it's reasonable to assume that the improvement for `cloning_relevant_orders()` will be very similar when cloning `Arc`s instead of collecting references. Also I expect the highlighted section of `get_solvable_orders` (1st screenshot) to disappear as well with this PR. Both speed ups would combine to ~50ms. Before Screenshot 2026-02-13 at 08 51 46 After Screenshot 2026-02-13 at 08 52 09 --- crates/autopilot/src/boundary/mod.rs | 11 +- crates/autopilot/src/boundary/order.rs | 15 +- crates/autopilot/src/database/auction.rs | 13 +- crates/autopilot/src/domain/fee/mod.rs | 10 +- crates/autopilot/src/infra/persistence/mod.rs | 10 +- crates/autopilot/src/solvable_orders.rs | 301 ++++++++++-------- 6 files changed, 207 insertions(+), 153 deletions(-) diff --git a/crates/autopilot/src/boundary/mod.rs b/crates/autopilot/src/boundary/mod.rs index 7142d7e8c0..ab71671bfa 100644 --- a/crates/autopilot/src/boundary/mod.rs +++ b/crates/autopilot/src/boundary/mod.rs @@ -22,7 +22,12 @@ pub use { }, shared::order_validation::{Amounts, is_order_outside_market_price}, }; -use {crate::domain, ethrpc::Web3, std::collections::HashMap, url::Url}; +use { + crate::domain, + ethrpc::Web3, + std::{collections::HashMap, sync::Arc}, + url::Url, +}; pub mod events; pub mod order; @@ -33,8 +38,8 @@ pub fn web3_client(ethrpc: &Url, ethrpc_args: &shared::ethrpc::Arguments) -> Web } pub struct SolvableOrders { - pub orders: HashMap, - pub quotes: HashMap, + pub orders: HashMap>, + pub quotes: HashMap>, pub latest_settlement_block: u64, /// Used as a checkpoint - meaning at this point in time /// **at least** the stored orders were present in the system. diff --git a/crates/autopilot/src/boundary/order.rs b/crates/autopilot/src/boundary/order.rs index 372e4090b3..04d43fb5bf 100644 --- a/crates/autopilot/src/boundary/order.rs +++ b/crates/autopilot/src/boundary/order.rs @@ -4,7 +4,7 @@ use { }; pub fn to_domain( - order: model::order::Order, + order: &model::order::Order, protocol_fees: Vec, quote: Option, ) -> domain::Order { @@ -30,20 +30,27 @@ pub fn to_domain( partially_fillable: order.data.partially_fillable, executed: remaining_order.executed_amount.into(), pre_interactions: if order_is_untouched { - order.interactions.pre.into_iter().map(Into::into).collect() + order + .interactions + .pre + .iter() + .cloned() + .map(Into::into) + .collect() } else { Default::default() }, post_interactions: order .interactions .post - .into_iter() + .iter() + .cloned() .map(Into::into) .collect(), sell_token_balance: order.data.sell_token_balance.into(), buy_token_balance: order.data.buy_token_balance.into(), app_data: order.data.app_data.into(), - signature: order.signature.into(), + signature: order.signature.clone().into(), quote, } } diff --git a/crates/autopilot/src/database/auction.rs b/crates/autopilot/src/database/auction.rs index ac24f60772..a1a2ea21df 100644 --- a/crates/autopilot/src/database/auction.rs +++ b/crates/autopilot/src/database/auction.rs @@ -11,7 +11,7 @@ use { event_storing_helpers::{create_db_search_parameters, create_quote_row}, order_quoting::{QuoteData, QuoteSearchParameters, QuoteStoring}, }, - std::{collections::HashMap, ops::DerefMut}, + std::{collections::HashMap, ops::DerefMut, sync::Arc}, }; #[async_trait::async_trait] @@ -75,11 +75,11 @@ impl Postgres { sqlx::query("SET TRANSACTION ISOLATION LEVEL REPEATABLE READ") .execute(ex.deref_mut()) .await?; - let orders: HashMap = + let orders: HashMap> = database::orders::solvable_orders(&mut ex, i64::from(min_valid_to)) .map(|result| match result { Ok(order) => full_order_into_model_order(order) - .map(|order| (domain::OrderUid(order.metadata.uid.0), order)), + .map(|order| (domain::OrderUid(order.metadata.uid.0), Arc::new(order))), Err(err) => Err(anyhow::Error::from(err)), }) .try_collect() @@ -88,7 +88,12 @@ impl Postgres { .await? .to_u64() .context("latest_settlement_block is not u64")?; - let quotes = self.read_quotes(orders.keys()).await?; + let quotes = self + .read_quotes(orders.keys()) + .await? + .into_iter() + .map(|(uid, quote)| (uid, Arc::new(quote))) + .collect(); Ok(boundary::SolvableOrders { orders, quotes, diff --git a/crates/autopilot/src/domain/fee/mod.rs b/crates/autopilot/src/domain/fee/mod.rs index 966fdda141..d6bdd659ff 100644 --- a/crates/autopilot/src/domain/fee/mod.rs +++ b/crates/autopilot/src/domain/fee/mod.rs @@ -233,7 +233,7 @@ impl ProtocolFees { /// protocol fees if necessary. pub fn apply( &self, - order: boundary::Order, + order: &boundary::Order, quote: Option, surplus_capturing_jit_order_owners: &[eth::Address], ) -> domain::Order { @@ -248,7 +248,7 @@ impl ProtocolFees { }); let partner_fee = - Self::get_partner_fee(&order, &reference_quote, self.max_partner_fee.get()); + Self::get_partner_fee(order, &reference_quote, self.max_partner_fee.get()); if surplus_capturing_jit_order_owners.contains(&order.metadata.owner) { return boundary::order::to_domain(order, partner_fee, quote); @@ -259,7 +259,7 @@ impl ProtocolFees { fn apply_policies( &self, - order: boundary::Order, + order: &boundary::Order, quote: domain::Quote, partner_fees: Vec, ) -> domain::Order { @@ -273,8 +273,8 @@ impl ProtocolFees { let protocol_fees = fee_policies .iter() - .filter_map(|fee_policy| Self::protocol_fee_into_policy(&order, "e, fee_policy)) - .flat_map(|policy| self.variant_fee_apply(&order, "e, policy)) + .filter_map(|fee_policy| Self::protocol_fee_into_policy(order, "e, fee_policy)) + .flat_map(|policy| self.variant_fee_apply(order, "e, policy)) .chain(partner_fees) .collect::>(); diff --git a/crates/autopilot/src/infra/persistence/mod.rs b/crates/autopilot/src/infra/persistence/mod.rs index 207f8d645d..b6980c749a 100644 --- a/crates/autopilot/src/infra/persistence/mod.rs +++ b/crates/autopilot/src/infra/persistence/mod.rs @@ -558,8 +558,8 @@ impl Persistence { /// order creation timestamp, and minimum validity period. pub async fn solvable_orders_after( &self, - mut current_orders: HashMap, - mut current_quotes: HashMap, + mut current_orders: HashMap>, + mut current_quotes: HashMap>, after_timestamp: DateTime, after_block: u64, min_valid_to: u32, @@ -587,7 +587,7 @@ impl Persistence { // Fetch the orders that were updated after the given block and were created or // cancelled after the given timestamp. - let next_orders: HashMap = { + let next_orders: HashMap> = { let _timer = Metrics::get() .database_queries .with_label_values(&["open_orders_after"]) @@ -600,7 +600,7 @@ impl Persistence { ) .map(|result| match result { Ok(order) => full_order_into_model_order(order) - .map(|order| (domain::OrderUid(order.metadata.uid.0), order)), + .map(|order| (domain::OrderUid(order.metadata.uid.0), Arc::new(order))), Err(err) => Err(anyhow::Error::from(err)), }) .try_collect() @@ -674,7 +674,7 @@ impl Persistence { let order_uid = domain::OrderUid(quote.order_uid.0); match dto::quote::into_domain(quote) { Ok(quote) => { - current_quotes.insert(order_uid, quote); + current_quotes.insert(order_uid, Arc::new(quote)); } Err(err) => tracing::warn!(?order_uid, ?err, "failed to convert quote from db"), } diff --git a/crates/autopilot/src/solvable_orders.rs b/crates/autopilot/src/solvable_orders.rs index 46a5c761ba..22db019b77 100644 --- a/crates/autopilot/src/solvable_orders.rs +++ b/crates/autopilot/src/solvable_orders.rs @@ -191,7 +191,10 @@ impl SolvableOrdersCache { .collect(); let (balances, orders, cow_amms) = { - let queries = orders.iter().map(Query::from_order).collect::>(); + let queries = orders + .iter() + .map(|o| Query::from_order(o.as_ref())) + .collect::>(); tokio::join!( self.fetch_balances(queries), self.filter_invalid_orders(orders, &mut counter, &mut invalid_order_uids,), @@ -303,9 +306,12 @@ impl SolvableOrdersCache { let quote = db_solvable_orders .quotes .get(&order.metadata.uid.into()) - .cloned(); - self.protocol_fees - .apply(order, quote, &surplus_capturing_jit_order_owners) + .map(|quote| quote.as_ref().clone()); + self.protocol_fees.apply( + order.as_ref(), + quote, + &surplus_capturing_jit_order_owners, + ) }) .collect(), prices: prices @@ -398,10 +404,10 @@ impl SolvableOrdersCache { /// Executed orders filtering in parallel. async fn filter_invalid_orders( &self, - mut orders: Vec, + mut orders: Vec>, counter: &mut OrderFilterCounter, invalid_order_uids: &mut HashSet, - ) -> Vec { + ) -> Vec> { let presignature_pending_orders = find_presignature_pending_orders(&orders); let (banned_user_orders, unsupported_token_orders) = tokio::join!( @@ -447,7 +453,10 @@ impl SolvableOrdersCache { /// Finds all orders whose owners or receivers are in the set of "banned" /// users. -async fn find_banned_user_orders(orders: &[Order], banned_users: &banned::Users) -> Vec { +async fn find_banned_user_orders( + orders: &[Arc], + banned_users: &banned::Users, +) -> Vec { let banned = banned_users .banned( orders @@ -484,7 +493,7 @@ async fn get_native_prices( /// Finds orders with pending presignatures. EIP-1271 signature validation is /// skipped entirely - the driver validates signatures before settlement. -fn find_presignature_pending_orders(orders: &[Order]) -> Vec { +fn find_presignature_pending_orders(orders: &[Arc]) -> Vec { orders .iter() .filter(|order| { @@ -500,11 +509,11 @@ fn find_presignature_pending_orders(orders: &[Order]) -> Vec { /// Removes orders that can't possibly be settled because there isn't enough /// balance. fn orders_with_balance( - mut orders: Vec, + mut orders: Vec>, balances: &Balances, settlement_contract: Address, filter_bypass_orders: &HashSet, -) -> Vec { +) -> Vec> { // Prefer newer orders over older ones. orders.sort_by_key(|order| std::cmp::Reverse(order.metadata.creation_date)); orders.retain(|order| { @@ -546,7 +555,7 @@ fn orders_with_balance( /// Filters out dust orders i.e. partially fillable orders that, when scaled /// have a 0 buy or sell amount. -fn filter_dust_orders(mut orders: Vec, balances: &Balances) -> Vec { +fn filter_dust_orders(mut orders: Vec>, balances: &Balances) -> Vec> { orders.retain(|order| { if !order.data.partially_fillable { return true; @@ -559,7 +568,7 @@ fn filter_dust_orders(mut orders: Vec, balances: &Balances) -> Vec }; let Ok(remaining) = - remaining_amounts::Remaining::from_order_with_balance(&order.into(), balance) + remaining_amounts::Remaining::from_order_with_balance(&order.as_ref().into(), balance) else { return false; }; @@ -577,12 +586,12 @@ fn filter_dust_orders(mut orders: Vec, balances: &Balances) -> Vec } async fn get_orders_with_native_prices( - orders: Vec, + orders: Vec>, native_price_estimator: &NativePriceUpdater, metrics: &Metrics, additional_tokens: impl IntoIterator, timeout: Duration, -) -> (Vec, BTreeMap) { +) -> (Vec>, BTreeMap) { let traded_tokens = orders .iter() .flat_map(|order| [order.data.sell_token, order.data.buy_token]) @@ -613,7 +622,7 @@ async fn get_orders_with_native_prices( } async fn find_unsupported_tokens( - orders: &[Order], + orders: &[Arc], bad_token: Arc, ) -> Vec { let bad_tokens = join_all( @@ -660,10 +669,10 @@ async fn find_unsupported_tokens( /// Filter out limit orders which are far enough outside the estimated native /// token price. fn filter_mispriced_limit_orders( - mut orders: Vec, + mut orders: Vec>, prices: &BTreeMap, price_factor: &BigDecimal, -) -> Vec { +) -> Vec> { orders.retain(|order| { if !order.is_limit_order() { return true; @@ -710,7 +719,7 @@ struct OrderFilterCounter { type Reason = &'static str; impl OrderFilterCounter { - fn new(metrics: &'static Metrics, orders: &[Order]) -> Self { + fn new(metrics: &'static Metrics, orders: &[Arc]) -> Self { // Eagerly store the candidate orders. This ensures that that gauge is // always up to date even if there are errors in the auction building // process. @@ -736,7 +745,7 @@ impl OrderFilterCounter { } /// Creates a new checkpoint from the current remaining orders. - fn checkpoint(&mut self, reason: Reason, orders: &[Order]) -> Vec { + fn checkpoint(&mut self, reason: Reason, orders: &[Arc]) -> Vec { let filtered_orders = orders .iter() .fold(self.orders.clone(), |mut order_uids, order| { @@ -784,7 +793,7 @@ impl OrderFilterCounter { /// If there are orders that have been filtered out since the last /// checkpoint these orders will get recorded with the readon "other". /// Returns these catch-all orders. - fn record(mut self, orders: &[Order]) -> Vec { + fn record(mut self, orders: &[Arc]) -> Vec { let removed = self.checkpoint("other", orders); self.metrics.auction_creations.inc(); @@ -840,18 +849,22 @@ mod tests { let token3 = Address::repeat_byte(3); let orders = vec![ - OrderBuilder::default() - .with_sell_token(token1) - .with_buy_token(token2) - .with_buy_amount(alloy::primitives::U256::ONE) - .with_sell_amount(alloy::primitives::U256::ONE) - .build(), - OrderBuilder::default() - .with_sell_token(token1) - .with_buy_token(token3) - .with_buy_amount(alloy::primitives::U256::ONE) - .with_sell_amount(alloy::primitives::U256::ONE) - .build(), + Arc::new( + OrderBuilder::default() + .with_sell_token(token1) + .with_buy_token(token2) + .with_buy_amount(alloy::primitives::U256::ONE) + .with_sell_amount(alloy::primitives::U256::ONE) + .build(), + ), + Arc::new( + OrderBuilder::default() + .with_sell_token(token1) + .with_buy_token(token3) + .with_buy_amount(alloy::primitives::U256::ONE) + .with_sell_amount(alloy::primitives::U256::ONE) + .build(), + ), ]; let mut native_price_estimator = MockNativePriceEstimating::new(); @@ -909,30 +922,38 @@ mod tests { let token5 = Address::repeat_byte(5); let orders = vec![ - OrderBuilder::default() - .with_sell_token(token1) - .with_buy_token(token2) - .with_buy_amount(alloy::primitives::U256::ONE) - .with_sell_amount(alloy::primitives::U256::ONE) - .build(), - OrderBuilder::default() - .with_sell_token(token2) - .with_buy_token(token3) - .with_buy_amount(alloy::primitives::U256::ONE) - .with_sell_amount(alloy::primitives::U256::ONE) - .build(), - OrderBuilder::default() - .with_sell_token(token1) - .with_buy_token(token3) - .with_buy_amount(alloy::primitives::U256::ONE) - .with_sell_amount(alloy::primitives::U256::ONE) - .build(), - OrderBuilder::default() - .with_sell_token(token2) - .with_buy_token(token4) - .with_buy_amount(alloy::primitives::U256::ONE) - .with_sell_amount(alloy::primitives::U256::ONE) - .build(), + Arc::new( + OrderBuilder::default() + .with_sell_token(token1) + .with_buy_token(token2) + .with_buy_amount(alloy::primitives::U256::ONE) + .with_sell_amount(alloy::primitives::U256::ONE) + .build(), + ), + Arc::new( + OrderBuilder::default() + .with_sell_token(token2) + .with_buy_token(token3) + .with_buy_amount(alloy::primitives::U256::ONE) + .with_sell_amount(alloy::primitives::U256::ONE) + .build(), + ), + Arc::new( + OrderBuilder::default() + .with_sell_token(token1) + .with_buy_token(token3) + .with_buy_amount(alloy::primitives::U256::ONE) + .with_sell_amount(alloy::primitives::U256::ONE) + .build(), + ), + Arc::new( + OrderBuilder::default() + .with_sell_token(token2) + .with_buy_token(token4) + .with_buy_amount(alloy::primitives::U256::ONE) + .with_sell_amount(alloy::primitives::U256::ONE) + .build(), + ), ]; let mut native_price_estimator = MockNativePriceEstimating::new(); @@ -1023,24 +1044,30 @@ mod tests { let token_approx2 = Address::repeat_byte(5); let orders = vec![ - OrderBuilder::default() - .with_sell_token(token1) - .with_buy_token(token2) - .with_buy_amount(alloy::primitives::U256::ONE) - .with_sell_amount(alloy::primitives::U256::ONE) - .build(), - OrderBuilder::default() - .with_sell_token(token1) - .with_buy_token(token2) - .with_buy_amount(alloy::primitives::U256::ONE) - .with_sell_amount(alloy::primitives::U256::ONE) - .build(), - OrderBuilder::default() - .with_sell_token(token1) - .with_buy_token(token3) - .with_buy_amount(alloy::primitives::U256::ONE) - .with_sell_amount(alloy::primitives::U256::ONE) - .build(), + Arc::new( + OrderBuilder::default() + .with_sell_token(token1) + .with_buy_token(token2) + .with_buy_amount(alloy::primitives::U256::ONE) + .with_sell_amount(alloy::primitives::U256::ONE) + .build(), + ), + Arc::new( + OrderBuilder::default() + .with_sell_token(token1) + .with_buy_token(token2) + .with_buy_amount(alloy::primitives::U256::ONE) + .with_sell_amount(alloy::primitives::U256::ONE) + .build(), + ), + Arc::new( + OrderBuilder::default() + .with_sell_token(token1) + .with_buy_token(token3) + .with_buy_amount(alloy::primitives::U256::ONE) + .with_sell_amount(alloy::primitives::U256::ONE) + .build(), + ), ]; let mut native_price_estimator = MockNativePriceEstimating::new(); @@ -1109,18 +1136,20 @@ mod tests { ] .into_iter() .enumerate() - .map(|(i, owner)| Order { - metadata: OrderMetadata { - owner, - uid: OrderUid([i as u8; 56]), - ..Default::default() - }, - data: OrderData { - buy_amount: alloy::primitives::U256::ONE, - sell_amount: alloy::primitives::U256::ONE, + .map(|(i, owner)| { + Arc::new(Order { + metadata: OrderMetadata { + owner, + uid: OrderUid([i as u8; 56]), + ..Default::default() + }, + data: OrderData { + buy_amount: alloy::primitives::U256::ONE, + sell_amount: alloy::primitives::U256::ONE, + ..Default::default() + }, ..Default::default() - }, - ..Default::default() + }) }) .collect::>(); @@ -1140,31 +1169,31 @@ mod tests { let presign_uid = OrderUid::from_parts(B256::repeat_byte(1), Address::repeat_byte(11), 1); let orders = vec![ // PresignaturePending order - should be found - Order { + Arc::new(Order { metadata: OrderMetadata { uid: presign_uid, status: model::order::OrderStatus::PresignaturePending, ..Default::default() }, ..Default::default() - }, + }), // EIP-1271 order - not PresignaturePending - Order { + Arc::new(Order { metadata: OrderMetadata { uid: OrderUid::from_parts(B256::repeat_byte(2), Address::repeat_byte(22), 2), ..Default::default() }, signature: Signature::Eip1271(vec![2, 2]), ..Default::default() - }, + }), // Regular order - not PresignaturePending - Order { + Arc::new(Order { metadata: OrderMetadata { uid: OrderUid::from_parts(B256::repeat_byte(3), Address::repeat_byte(33), 3), ..Default::default() }, ..Default::default() - }, + }), ]; let pending_orders = find_presignature_pending_orders(&orders); @@ -1178,18 +1207,24 @@ mod tests { let token2 = Address::with_last_byte(2); let bad_token = Arc::new(ListBasedDetector::deny_list(vec![token0])); let orders = vec![ - OrderBuilder::default() - .with_sell_token(token0) - .with_buy_token(token1) - .build(), - OrderBuilder::default() - .with_sell_token(token1) - .with_buy_token(token2) - .build(), - OrderBuilder::default() - .with_sell_token(token0) - .with_buy_token(token2) - .build(), + Arc::new( + OrderBuilder::default() + .with_sell_token(token0) + .with_buy_token(token1) + .build(), + ), + Arc::new( + OrderBuilder::default() + .with_sell_token(token1) + .with_buy_token(token2) + .build(), + ), + Arc::new( + OrderBuilder::default() + .with_sell_token(token0) + .with_buy_token(token2) + .build(), + ), ]; let unsupported_tokens_orders = find_unsupported_tokens(&orders, bad_token) .now_or_never() @@ -1213,19 +1248,21 @@ mod tests { }; let price_factor = "0.95".parse().unwrap(); - let order = |sell_amount: u8, buy_amount: u8| Order { - data: OrderData { - sell_token, - sell_amount: alloy::primitives::U256::from(sell_amount), - buy_token, - buy_amount: alloy::primitives::U256::from(buy_amount), - ..Default::default() - }, - metadata: OrderMetadata { - class: OrderClass::Limit, + let order = |sell_amount: u8, buy_amount: u8| { + Arc::new(Order { + data: OrderData { + sell_token, + sell_amount: alloy::primitives::U256::from(sell_amount), + buy_token, + buy_amount: alloy::primitives::U256::from(buy_amount), + ..Default::default() + }, + metadata: OrderMetadata { + class: OrderClass::Limit, + ..Default::default() + }, ..Default::default() - }, - ..Default::default() + }) }; let valid_orders = vec![ @@ -1251,7 +1288,7 @@ mod tests { ); let mut order = order(10, 21); - order.data.partially_fillable = true; + Arc::make_mut(&mut order).data.partially_fillable = true; let orders = vec![order]; assert_eq!( filter_mispriced_limit_orders(orders, &prices, &price_factor).len(), @@ -1264,7 +1301,7 @@ mod tests { let settlement_contract = Address::repeat_byte(1); let orders = vec![ // enough balance for sell and fee - Order { + Arc::new(Order { data: OrderData { sell_token: Address::with_last_byte(2), sell_amount: alloy::primitives::U256::ONE, @@ -1273,9 +1310,9 @@ mod tests { ..Default::default() }, ..Default::default() - }, + }), // missing fee balance - Order { + Arc::new(Order { data: OrderData { sell_token: Address::with_last_byte(3), sell_amount: alloy::primitives::U256::ONE, @@ -1284,9 +1321,9 @@ mod tests { ..Default::default() }, ..Default::default() - }, + }), // at least 1 partially fillable balance - Order { + Arc::new(Order { data: OrderData { sell_token: Address::with_last_byte(4), sell_amount: alloy::primitives::U256::from(2), @@ -1295,9 +1332,9 @@ mod tests { ..Default::default() }, ..Default::default() - }, + }), // 0 partially fillable balance - Order { + Arc::new(Order { data: OrderData { sell_token: Address::with_last_byte(5), sell_amount: alloy::primitives::U256::from(2), @@ -1306,9 +1343,9 @@ mod tests { ..Default::default() }, ..Default::default() - }, + }), // considered flashloan order because of special receiver - Order { + Arc::new(Order { data: OrderData { sell_token: Address::with_last_byte(6), sell_amount: alloy::primitives::U256::from(200), @@ -1318,7 +1355,7 @@ mod tests { ..Default::default() }, ..Default::default() - }, + }), ]; let balances = [ (Query::from_order(&orders[0]), U256::from(2)), @@ -1346,7 +1383,7 @@ mod tests { let settlement_contract = Address::repeat_byte(1); // EIP-1271 order (should skip balance check) - let eip1271_order = Order { + let eip1271_order = Arc::new(Order { data: OrderData { sell_token: Address::with_last_byte(7), sell_amount: alloy::primitives::U256::from(10), @@ -1360,12 +1397,12 @@ mod tests { ..Default::default() }, ..Default::default() - }; + }); // Order with wrappers in bypass set (should skip balance check) let wrapper_order_uid = OrderUid::from_parts(B256::repeat_byte(7), Address::repeat_byte(77), 7); - let wrapper_order = Order { + let wrapper_order = Arc::new(Order { data: OrderData { sell_token: Address::with_last_byte(8), sell_amount: alloy::primitives::U256::from(10), @@ -1378,10 +1415,10 @@ mod tests { ..Default::default() }, ..Default::default() - }; + }); // Regular ECDSA order without wrappers (should be filtered) - let regular_order = Order { + let regular_order = Arc::new(Order { data: OrderData { sell_token: Address::with_last_byte(9), sell_amount: alloy::primitives::U256::from(10), @@ -1394,7 +1431,7 @@ mod tests { ..Default::default() }, ..Default::default() - }; + }); let orders = vec![ regular_order.clone(), From 9b50da5606620e304a4109d02273ad5e79086d66 Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Fri, 13 Feb 2026 12:11:10 +0100 Subject: [PATCH 059/219] Skip tx simulation on endpoints that don't mine reverts (#4143) # Description Based on our logs it seems like simulating a tx before submission can take up to 500ms. Since our submission logic know which mempool mines reverting txs and which don't we can simply skip the simulation step when the current mempool does not mine reverting txs. # Changes skip tx simulation (estimate_gas()) in mempools that don't mine reverting txs (e.g. mevblocker). --- crates/driver/src/domain/mempools.rs | 34 ++++++++++++++++------------ 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/crates/driver/src/domain/mempools.rs b/crates/driver/src/domain/mempools.rs index b5928a4f5e..fb606842c8 100644 --- a/crates/driver/src/domain/mempools.rs +++ b/crates/driver/src/domain/mempools.rs @@ -105,22 +105,26 @@ impl Mempools { // The tx is simulated before submitting the solution to the competition, but a // delay between that and the actual execution can cause the simulation to be // invalid which doesn't make sense to submit to the mempool anymore. - if let Err(err) = self.ethereum.estimate_gas(tx.clone()).await { - if err.is_revert() { - tracing::info!( - ?err, - "settlement tx simulation reverted before submitting to the mempool" - ); - return Err(Error::SimulationRevert { - submitted_at_block: current_block, - reverted_at_block: current_block, - }); - } else { - tracing::warn!( - ?err, - "couldn't simulate tx before submitting to the mempool" - ); + if mempool.reverts_can_get_mined() { + if let Err(err) = self.ethereum.estimate_gas(tx.clone()).await { + if err.is_revert() { + tracing::info!( + ?err, + "settlement tx simulation reverted before submitting to the mempool" + ); + return Err(Error::SimulationRevert { + submitted_at_block: current_block, + reverted_at_block: current_block, + }); + } else { + tracing::warn!( + ?err, + "couldn't simulate tx before submitting to the mempool" + ); + } } + } else { + tracing::trace!("skipping tx simulation because mempool does not mine reverting txs"); } // Fetch the nonce to avoid race conditions between concurrent From 0c04766fa2d606c1ff9b908000b4549f159a1c6e Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Fri, 13 Feb 2026 12:44:04 +0100 Subject: [PATCH 060/219] Remove price filtering from autopilot (#4148) # Description First PR in a series of small optimizations in the auction building logic. # Changes removes filtering of orders based on the existing native prices this feature was never enabled and with the stronger focus on filtering as little as possible in the autopilot it should also not be enabled in the future --- crates/autopilot/src/arguments.rs | 7 -- crates/autopilot/src/run.rs | 3 - crates/autopilot/src/solvable_orders.rs | 109 ------------------------ 3 files changed, 119 deletions(-) diff --git a/crates/autopilot/src/arguments.rs b/crates/autopilot/src/arguments.rs index 3057f514a0..410f8dba11 100644 --- a/crates/autopilot/src/arguments.rs +++ b/crates/autopilot/src/arguments.rs @@ -135,11 +135,6 @@ pub struct Arguments { )] pub max_auction_age: Duration, - /// Used to filter out limit orders with prices that are too far from the - /// market price. 0 means no filtering. - #[clap(long, env, default_value = "0")] - pub limit_order_price_factor: f64, - /// The URL of a list of tokens our settlement contract is willing to /// internalize. #[clap(long, env)] @@ -315,7 +310,6 @@ impl std::fmt::Display for Arguments { banned_users, banned_users_max_cache_size, max_auction_age, - limit_order_price_factor, trusted_tokens_url, trusted_tokens, trusted_tokens_update_interval, @@ -375,7 +369,6 @@ impl std::fmt::Display for Arguments { "banned_users_max_cache_size: {banned_users_max_cache_size:?}" )?; writeln!(f, "max_auction_age: {max_auction_age:?}")?; - writeln!(f, "limit_order_price_factor: {limit_order_price_factor:?}")?; display_option(f, "trusted_tokens_url", trusted_tokens_url)?; writeln!(f, "trusted_tokens: {trusted_tokens:?}")?; writeln!( diff --git a/crates/autopilot/src/run.rs b/crates/autopilot/src/run.rs index 0235ad4036..8a57ee6233 100644 --- a/crates/autopilot/src/run.rs +++ b/crates/autopilot/src/run.rs @@ -524,9 +524,6 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { bad_token_detector.clone(), competition_native_price_updater.clone(), *eth.contracts().weth().address(), - args.limit_order_price_factor - .try_into() - .expect("limit order price factor can't be converted to BigDecimal"), domain::ProtocolFees::new( &args.fee_policies_config, args.shared.volume_fee_bucket_overrides.clone(), diff --git a/crates/autopilot/src/solvable_orders.rs b/crates/autopilot/src/solvable_orders.rs index 22db019b77..d8ea74a8dd 100644 --- a/crates/autopilot/src/solvable_orders.rs +++ b/crates/autopilot/src/solvable_orders.rs @@ -6,7 +6,6 @@ use { }, alloy::primitives::{Address, U256}, anyhow::{Context, Result}, - bigdecimal::BigDecimal, database::order_events::OrderEventLabel, futures::{FutureExt, future::join_all}, itertools::Itertools, @@ -15,7 +14,6 @@ use { signature::Signature, time::now_in_epoch_seconds, }, - number::conversions::u256_to_big_decimal, prometheus::{Histogram, HistogramVec, IntCounter, IntCounterVec, IntGauge, IntGaugeVec}, shared::{ account_balances::{BalanceFetching, Query}, @@ -92,7 +90,6 @@ pub struct SolvableOrdersCache { native_price_estimator: Arc, metrics: &'static Metrics, weth: Address, - limit_order_price_factor: BigDecimal, protocol_fees: domain::ProtocolFees, cow_amm_registry: cow_amm::Registry, native_price_timeout: Duration, @@ -118,7 +115,6 @@ impl SolvableOrdersCache { bad_token_detector: Arc, native_price_estimator: Arc, weth: Address, - limit_order_price_factor: BigDecimal, protocol_fees: domain::ProtocolFees, cow_amm_registry: cow_amm::Registry, native_price_timeout: Duration, @@ -136,7 +132,6 @@ impl SolvableOrdersCache { native_price_estimator, metrics, weth, - limit_order_price_factor, protocol_fees, cow_amm_registry, native_price_timeout, @@ -259,10 +254,6 @@ impl SolvableOrdersCache { let removed = counter.checkpoint("missing_price", &orders); filtered_order_events.extend(removed); - let orders = filter_mispriced_limit_orders(orders, &prices, &self.limit_order_price_factor); - let removed = counter.checkpoint("out_of_market", &orders); - filtered_order_events.extend(removed); - let removed = counter.record(&orders); filtered_order_events.extend(removed); @@ -666,45 +657,6 @@ async fn find_unsupported_tokens( .collect() } -/// Filter out limit orders which are far enough outside the estimated native -/// token price. -fn filter_mispriced_limit_orders( - mut orders: Vec>, - prices: &BTreeMap, - price_factor: &BigDecimal, -) -> Vec> { - orders.retain(|order| { - if !order.is_limit_order() { - return true; - } - - let sell_price = *prices.get(&order.data.sell_token).unwrap(); - let buy_price = *prices.get(&order.data.buy_token).unwrap(); - - // Convert the sell and buy price to the native token (ETH) and make sure that - // sell is higher than buy with the configurable price factor. - let (sell_native, buy_native) = match ( - order.data.sell_amount.checked_mul(sell_price), - order.data.buy_amount.checked_mul(buy_price), - ) { - (Some(sell), Some(buy)) => (sell, buy), - _ => { - tracing::warn!( - order_uid = %order.metadata.uid, - "limit order overflow computing native amounts", - ); - return false; - } - }; - - let sell_native = u256_to_big_decimal(&sell_native); - let buy_native = u256_to_big_decimal(&buy_native); - - sell_native >= buy_native * price_factor - }); - orders -} - /// Order filtering state for recording filtered orders over the course of /// building an auction. struct OrderFilterCounter { @@ -1235,67 +1187,6 @@ mod tests { ); } - #[test] - fn filters_mispriced_orders() { - let sell_token = Address::repeat_byte(1); - let buy_token = Address::repeat_byte(2); - - // Prices are set such that 1 sell token is equivalent to 2 buy tokens. - // Additionally, they are scaled to large values to allow for overflows. - let prices = btreemap! { - sell_token => alloy::primitives::U256::MAX / alloy::primitives::U256::from(100), - buy_token => alloy::primitives::U256::MAX / alloy::primitives::U256::from(200), - }; - let price_factor = "0.95".parse().unwrap(); - - let order = |sell_amount: u8, buy_amount: u8| { - Arc::new(Order { - data: OrderData { - sell_token, - sell_amount: alloy::primitives::U256::from(sell_amount), - buy_token, - buy_amount: alloy::primitives::U256::from(buy_amount), - ..Default::default() - }, - metadata: OrderMetadata { - class: OrderClass::Limit, - ..Default::default() - }, - ..Default::default() - }) - }; - - let valid_orders = vec![ - // Reasonably priced order, doesn't get filtered. - order(100, 200), - // Slightly out of price order, doesn't get filtered. - order(10, 21), - ]; - - let invalid_orders = vec![ - // Out of price order gets filtered out. - order(10, 100), - // Overflow sell value gets filtered. - order(255, 1), - // Overflow buy value gets filtered. - order(100, 255), - ]; - - let orders = [valid_orders.clone(), invalid_orders].concat(); - assert_eq!( - filter_mispriced_limit_orders(orders, &prices, &price_factor), - valid_orders, - ); - - let mut order = order(10, 21); - Arc::make_mut(&mut order).data.partially_fillable = true; - let orders = vec![order]; - assert_eq!( - filter_mispriced_limit_orders(orders, &prices, &price_factor).len(), - 1 - ); - } - #[test] fn orders_with_balance_() { let settlement_contract = Address::repeat_byte(1); From 80f1b6985e4ca21746bf3e4ef38553079ed382b0 Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Fri, 13 Feb 2026 15:10:39 +0100 Subject: [PATCH 061/219] Speed up removing old orders and quotes (#4151) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description The incremental solvable orders query so far blindly inserted updated orders in to the set of open orders and then ran a filtering step over ALL open orders and quotes separately. This is pretty wasteful since the new/updated orders are significantly fewer than the set of ALL orders and 3 of 4 things we check we only need to check on the new/updated orders. Also we don't have to go over ALL quotes if we just remove the quotes together with the orders. # Changes - determine whether to insert or remove orders based on the new information returned by the incremental DB query - whenever we remove an order we also remove the associated quote to never run do a `.retain()` on ALL quotes - still do a `.retain()` on the open orders to find expired orders as the incremental orders query will not flag those - while we still scan the whole list twice the most complicated checks could be moved to the part that only runs on the updated orders so this `.retain()` still is a lot faster than before. ## How to test Measured performance with tempo Retain orders went from 2.5ms to 283µs and we dropped the 6.6ms from retaining quotes completely Before Screenshot 2026-02-13 at 08 13 38 After Screenshot 2026-02-13 at 08 13 24 --- crates/autopilot/src/infra/persistence/mod.rs | 41 +++++++++++-------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/crates/autopilot/src/infra/persistence/mod.rs b/crates/autopilot/src/infra/persistence/mod.rs index b6980c749a..bf5f610a9d 100644 --- a/crates/autopilot/src/infra/persistence/mod.rs +++ b/crates/autopilot/src/infra/persistence/mod.rs @@ -612,20 +612,9 @@ impl Persistence { .to_u64() .context("latest_settlement_block is not u64")?; - // Blindly insert all new orders into the cache. + // Insert new / updated orders or remove invalidated orders + // and the associated quote. for (uid, order) in next_orders { - current_orders.insert(uid, order); - } - - // Filter out all the invalid orders. - current_orders.retain(|_uid, order| { - let expired = order.data.valid_to < min_valid_to - || order - .metadata - .ethflow_data - .as_ref() - .is_some_and(|data| data.user_valid_to < i64::from(min_valid_to)); - let invalidated = order.metadata.invalidated; let onchain_error = order .metadata @@ -645,10 +634,30 @@ impl Persistence { } }; - !expired && !invalidated && !onchain_error && !fulfilled - }); + if invalidated || onchain_error || fulfilled { + current_orders.remove(&uid); + current_quotes.remove(&uid); + } else { + current_orders.insert(uid, order); + } + } + + // Filter out all the expired orders and their quotes. + current_orders.retain(|uid, order| { + let expired = order.data.valid_to < min_valid_to + || order + .metadata + .ethflow_data + .as_ref() + .is_some_and(|data| data.user_valid_to < i64::from(min_valid_to)); - current_quotes.retain(|uid, _| current_orders.contains_key(uid)); + if expired { + current_quotes.remove(uid); + false + } else { + true + } + }); { let _timer = Metrics::get() From d2d7e4fc991f14005cfbd4048b4d990684603033 Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Fri, 13 Feb 2026 16:45:31 +0100 Subject: [PATCH 062/219] Speed up auction metrics (#4155) # Description A surprisingly big amount of time in the auction building process is wasted with metrics. By far the biggest offender is the function `checkpoint()` which figures out which orders got filtered out since the last checkpoint. It does it by cloning the currently alive orders and removing the still alive orders from that map one by one. All the orders that are left over are no longer alive. This regularly incurs an overhead of a couple milliseconds each while the alternative function `checkpoint_by_invalid_orders()` (which gets the removed orders passed in) only takes a few microseconds. Additionally constructing the an `OrderFilterCounter` also take ~5ms because it clones the original set of orders and counts them already there. Screenshot 2026-02-13 at 12 29 01 # Changes - drop `OrderFilterCounter` in favor of function on the metrics - from all functions return a list of filtered orders such that we don't have to do a ton of computations to diff the orders before and after a filter function - fixed resulting compile errors in the tests This technically introduces breaking changes but IMO those are well worth it: * we don't populate the gauges once at the start of the process and once at the end - instead we update individual gauges as we go * some orders can **technically** be double counted. Since filtering for banned users, unsupported tokens and invalid signatures happens in parallel and happens separately the same order can be counted for multiple things. The orders filtered that way are relatively few and having the chance that orders fit multiple categories is even lower. On the plus side if there are multiple things wrong with an order we now learn about it. Since the exact numbers of filtered orders are not super interesting anyway one can even consider this a debugging improvement. --- crates/autopilot/src/solvable_orders.rs | 301 ++++++++++-------------- 1 file changed, 118 insertions(+), 183 deletions(-) diff --git a/crates/autopilot/src/solvable_orders.rs b/crates/autopilot/src/solvable_orders.rs index d8ea74a8dd..2d427cd15f 100644 --- a/crates/autopilot/src/solvable_orders.rs +++ b/crates/autopilot/src/solvable_orders.rs @@ -74,6 +74,45 @@ pub struct Metrics { auction_market_order_missing_price: IntGauge, } +impl Metrics { + fn get() -> &'static Self { + Metrics::instance(observe::metrics::get_storage_registry()).unwrap() + } + + fn track_filtered_orders(reason: &'static str, invalid_orders: &[OrderUid]) { + if invalid_orders.is_empty() { + return; + } + + Metrics::get() + .auction_filtered_orders + .with_label_values(&[reason]) + .set(i64::try_from(invalid_orders.len()).unwrap_or(i64::MAX)); + + tracing::debug!( + %reason, + count = invalid_orders.len(), + orders = ?invalid_orders, "filtered orders" + ); + } + + fn track_orders_in_final_auction(orders: &[Arc]) { + let metrics = Metrics::get(); + metrics.auction_creations.inc(); + + let remaining_counts = orders + .iter() + .counts_by(|order| order.metadata.class.as_ref()); + for class in OrderClass::VARIANTS { + let count = remaining_counts.get(class).copied().unwrap_or_default(); + metrics + .auction_solvable_orders + .with_label_values(&[class]) + .set(i64::try_from(count).unwrap_or(i64::MAX)); + } + } +} + /// Keeps track and updates the set of currently solvable orders. /// For this we also need to keep track of user sell token balances for open /// orders so this is retrievable as well. @@ -88,7 +127,6 @@ pub struct SolvableOrdersCache { bad_token_detector: Arc, cache: Mutex>, native_price_estimator: Arc, - metrics: &'static Metrics, weth: Address, protocol_fees: domain::ProtocolFees, cow_amm_registry: cow_amm::Registry, @@ -121,7 +159,6 @@ impl SolvableOrdersCache { settlement_contract: Address, disable_order_balance_filter: bool, ) -> Arc { - let metrics = Metrics::instance(observe::metrics::get_storage_registry()).unwrap(); Arc::new(Self { min_order_validity_period, persistence, @@ -130,7 +167,6 @@ impl SolvableOrdersCache { bad_token_detector, cache: Mutex::new(None), native_price_estimator, - metrics, weth, protocol_fees, cow_amm_registry, @@ -170,7 +206,6 @@ impl SolvableOrdersCache { .cloned() .collect::>(); - let mut counter = OrderFilterCounter::new(self.metrics, &orders); let mut invalid_order_uids = HashSet::new(); let mut filtered_order_events = Vec::new(); @@ -192,7 +227,7 @@ impl SolvableOrdersCache { .collect::>(); tokio::join!( self.fetch_balances(queries), - self.filter_invalid_orders(orders, &mut counter, &mut invalid_order_uids,), + self.filter_invalid_orders(orders, &mut invalid_order_uids), self.timed_future("cow_amm_registry", self.cow_amm_registry.amms()), ) }; @@ -200,17 +235,17 @@ impl SolvableOrdersCache { let orders = if self.disable_order_balance_filter { orders } else { - let orders = orders_with_balance( + let (orders, removed) = orders_with_balance( orders, &balances, self.settlement_contract, &balance_filter_exempt_orders, ); - let removed = counter.checkpoint("insufficient_balance", &orders); + Metrics::track_filtered_orders("insufficient_balance", &removed); invalid_order_uids.extend(removed); - let orders = filter_dust_orders(orders, &balances); - let removed = counter.checkpoint("dust_order", &orders); + let (orders, removed) = filter_dust_orders(orders, &balances); + Metrics::track_filtered_orders("dust_order", &removed); filtered_order_events.extend(removed); orders @@ -222,13 +257,12 @@ impl SolvableOrdersCache { .collect::>(); // create auction - let (orders, mut prices) = self + let (orders, removed, mut prices) = self .timed_future( "get_orders_with_native_prices", get_orders_with_native_prices( orders, &self.native_price_estimator, - self.metrics, cow_amm_tokens, self.native_price_timeout, ), @@ -250,12 +284,10 @@ impl SolvableOrdersCache { entry.insert(weth_price); } - - let removed = counter.checkpoint("missing_price", &orders); + Metrics::track_filtered_orders("missing_price", &removed); filtered_order_events.extend(removed); - let removed = counter.record(&orders); - filtered_order_events.extend(removed); + Metrics::track_orders_in_final_auction(&orders); if store_events { // spawning a background task since `order_events` table insert operation takes @@ -320,7 +352,7 @@ impl SolvableOrdersCache { }); tracing::debug!(%block, "updated current auction cache"); - self.metrics + Metrics::get() .auction_update_total_time .observe(start.elapsed().as_secs_f64()); Ok(()) @@ -396,7 +428,6 @@ impl SolvableOrdersCache { async fn filter_invalid_orders( &self, mut orders: Vec>, - counter: &mut OrderFilterCounter, invalid_order_uids: &mut HashSet, ) -> Vec> { let presignature_pending_orders = find_presignature_pending_orders(&orders); @@ -413,9 +444,9 @@ impl SolvableOrdersCache { ); tracing::trace!("filtered invalid orders"); - counter.checkpoint_by_invalid_orders("banned_user", &banned_user_orders); - counter.checkpoint_by_invalid_orders("invalid_signature", &presignature_pending_orders); - counter.checkpoint_by_invalid_orders("unsupported_token", &unsupported_token_orders); + Metrics::track_filtered_orders("banned_user", &banned_user_orders); + Metrics::track_filtered_orders("invalid_signature", &presignature_pending_orders); + Metrics::track_filtered_orders("unsupported_token", &unsupported_token_orders); invalid_order_uids.extend(banned_user_orders); invalid_order_uids.extend(presignature_pending_orders); invalid_order_uids.extend(unsupported_token_orders); @@ -425,7 +456,7 @@ impl SolvableOrdersCache { } pub fn track_auction_update(&self, result: &str) { - self.metrics + Metrics::get() .auction_update .with_label_values(&[result]) .inc(); @@ -433,8 +464,7 @@ impl SolvableOrdersCache { /// Runs the future and collects runtime metrics. async fn timed_future(&self, label: &str, fut: impl Future) -> T { - let _timer = self - .metrics + let _timer = Metrics::get() .auction_update_stage_time .with_label_values(&[label]) .start_timer(); @@ -504,10 +534,11 @@ fn orders_with_balance( balances: &Balances, settlement_contract: Address, filter_bypass_orders: &HashSet, -) -> Vec> { +) -> (Vec>, Vec) { // Prefer newer orders over older ones. orders.sort_by_key(|order| std::cmp::Reverse(order.metadata.creation_date)); - orders.retain(|order| { + let mut filtered_orders = vec![]; + let keep = |order: &Order| { // Skip balance check for all EIP-1271 orders (they can rely on pre-interactions // to unlock funds) or orders with wrappers (wrappers produce the required // balance at settlement time). @@ -540,14 +571,27 @@ fn orders_with_balance( Some(balance) => balance, }; balance >= needed_balance + }; + + orders.retain(|order| { + if keep(order) { + true + } else { + filtered_orders.push(order.metadata.uid); + false + } }); - orders + (orders, filtered_orders) } /// Filters out dust orders i.e. partially fillable orders that, when scaled /// have a 0 buy or sell amount. -fn filter_dust_orders(mut orders: Vec>, balances: &Balances) -> Vec> { - orders.retain(|order| { +fn filter_dust_orders( + mut orders: Vec>, + balances: &Balances, +) -> (Vec>, Vec) { + let mut removed = vec![]; + let keep = |order: &Order| { if !order.data.partially_fillable { return true; } @@ -559,7 +603,7 @@ fn filter_dust_orders(mut orders: Vec>, balances: &Balances) -> Vec>, balances: &Balances) -> Vec>, native_price_estimator: &NativePriceUpdater, - metrics: &Metrics, additional_tokens: impl IntoIterator, timeout: Duration, -) -> (Vec>, BTreeMap) { +) -> ( + Vec>, + Vec, + BTreeMap, +) { let traded_tokens = orders .iter() .flat_map(|order| [order.data.sell_token, order.data.buy_token]) @@ -592,24 +648,26 @@ async fn get_orders_with_native_prices( let prices = get_native_prices(traded_tokens, native_price_estimator, timeout).await; // Filter orders so that we only return orders that have prices - let mut filtered_market_orders = 0_i64; + let mut removed_market_orders = 0_i64; + let mut removed_orders = vec![]; let mut orders = orders; orders.retain(|order| { - let (t0, t1) = (&order.data.sell_token, &order.data.buy_token); - match (prices.get(t0), prices.get(t1)) { - (Some(_), Some(_)) => true, - _ => { - filtered_market_orders += i64::from(order.metadata.class == OrderClass::Market); - false - } + let both_prices_present = prices.contains_key(&order.data.sell_token) + && prices.contains_key(&order.data.buy_token); + if both_prices_present { + true + } else { + removed_orders.push(order.metadata.uid); + removed_market_orders += i64::from(order.metadata.class == OrderClass::Market); + false } }); - metrics + Metrics::get() .auction_market_order_missing_price - .set(filtered_market_orders); + .set(removed_market_orders); - (orders, prices) + (orders, removed_orders, prices) } async fn find_unsupported_tokens( @@ -657,119 +715,6 @@ async fn find_unsupported_tokens( .collect() } -/// Order filtering state for recording filtered orders over the course of -/// building an auction. -struct OrderFilterCounter { - metrics: &'static Metrics, - - /// Mapping of remaining order UIDs to their classes. - orders: HashMap, - /// Running tally for counts of filtered orders. - counts: HashMap, -} - -type Reason = &'static str; - -impl OrderFilterCounter { - fn new(metrics: &'static Metrics, orders: &[Arc]) -> Self { - // Eagerly store the candidate orders. This ensures that that gauge is - // always up to date even if there are errors in the auction building - // process. - let initial_counts = orders - .iter() - .counts_by(|order| order.metadata.class.as_ref()); - for class in OrderClass::VARIANTS { - let count = initial_counts.get(class).copied().unwrap_or_default(); - metrics - .auction_candidate_orders - .with_label_values(&[class]) - .set(i64::try_from(count).unwrap_or(i64::MAX)); - } - - Self { - metrics, - orders: orders - .iter() - .map(|order| (order.metadata.uid, order.metadata.class)) - .collect(), - counts: HashMap::new(), - } - } - - /// Creates a new checkpoint from the current remaining orders. - fn checkpoint(&mut self, reason: Reason, orders: &[Arc]) -> Vec { - let filtered_orders = orders - .iter() - .fold(self.orders.clone(), |mut order_uids, order| { - order_uids.remove(&order.metadata.uid); - order_uids - }); - - *self.counts.entry(reason).or_default() += filtered_orders.len(); - for order_uid in filtered_orders.keys() { - self.orders.remove(order_uid).unwrap(); - } - if !filtered_orders.is_empty() { - tracing::debug!( - %reason, - count = filtered_orders.len(), - orders = ?filtered_orders, "filtered orders" - ); - } - filtered_orders.into_keys().collect() - } - - /// Creates a new checkpoint based on the found invalid orders. - fn checkpoint_by_invalid_orders(&mut self, reason: Reason, invalid_orders: &[OrderUid]) { - if invalid_orders.is_empty() { - return; - } - - let mut counter = 0; - for order_uid in invalid_orders { - if self.orders.remove(order_uid).is_some() { - counter += 1; - } - } - *self.counts.entry(reason).or_default() += counter; - if counter > 0 { - tracing::debug!( - %reason, - count = invalid_orders.len(), - orders = ?invalid_orders, "filtered orders" - ); - } - } - - /// Records the filter counter to metrics. - /// If there are orders that have been filtered out since the last - /// checkpoint these orders will get recorded with the readon "other". - /// Returns these catch-all orders. - fn record(mut self, orders: &[Arc]) -> Vec { - let removed = self.checkpoint("other", orders); - - self.metrics.auction_creations.inc(); - - let remaining_counts = self.orders.iter().counts_by(|(_, class)| class.as_ref()); - for class in OrderClass::VARIANTS { - let count = remaining_counts.get(class).copied().unwrap_or_default(); - self.metrics - .auction_solvable_orders - .with_label_values(&[class]) - .set(i64::try_from(count).unwrap_or(i64::MAX)); - } - - for (reason, count) in self.counts { - self.metrics - .auction_filtered_orders - .with_label_values(&[reason]) - .set(i64::try_from(count).unwrap_or(i64::MAX)); - } - - removed - } -} - #[cfg(test)] mod tests { use { @@ -845,12 +790,10 @@ mod tests { ); let native_price_estimator = NativePriceUpdater::new(caching_estimator, Duration::MAX, Default::default()); - let metrics = Metrics::instance(observe::metrics::get_storage_registry()).unwrap(); - let (filtered_orders, prices) = get_orders_with_native_prices( + let (filtered_orders, _removed, prices) = get_orders_with_native_prices( orders.clone(), &native_price_estimator, - metrics, vec![], Duration::from_millis(100), ) @@ -947,35 +890,32 @@ mod tests { Duration::from_millis(5), Default::default(), ); - let metrics = Metrics::instance(observe::metrics::get_storage_registry()).unwrap(); // We'll have no native prices in this call. But set_tokens_to_update // will cause the background task to fetch them in the next cycle. - let (filtered_orders, prices) = get_orders_with_native_prices( + let (alive_orders, _removed_orders, prices) = get_orders_with_native_prices( orders.clone(), &native_price_estimator, - metrics, vec![token5], Duration::ZERO, ) .await; - assert!(filtered_orders.is_empty()); + assert!(alive_orders.is_empty()); assert!(prices.is_empty()); // Wait for native prices to get fetched by the background task. tokio::time::sleep(tokio::time::Duration::from_millis(30)).await; // Now we have all the native prices we want. - let (filtered_orders, prices) = get_orders_with_native_prices( + let (alive_orders, _removed_orders, prices) = get_orders_with_native_prices( orders.clone(), &native_price_estimator, - metrics, vec![token5], Duration::ZERO, ) .await; - assert_eq!(filtered_orders, [orders[2].clone()]); + assert_eq!(alive_orders, [orders[2].clone()]); assert_eq!( prices, btreemap! { @@ -1053,17 +993,15 @@ mod tests { ); let native_price_estimator = NativePriceUpdater::new(caching_estimator, Duration::MAX, Default::default()); - let metrics = Metrics::instance(observe::metrics::get_storage_registry()).unwrap(); - let (filtered_orders, prices) = get_orders_with_native_prices( + let (alive_orders, _removed_orders, prices) = get_orders_with_native_prices( orders.clone(), &native_price_estimator, - metrics, vec![], Duration::from_secs(10), ) .await; - assert_eq!(filtered_orders, orders); + assert_eq!(alive_orders, orders); assert_eq!( prices, btreemap! { @@ -1260,11 +1198,11 @@ mod tests { let expected = &[0, 2, 4]; let no_bypass: HashSet = HashSet::new(); - let filtered = + let (alive_orders, _removed_orders) = orders_with_balance(orders.clone(), &balances, settlement_contract, &no_bypass); - assert_eq!(filtered.len(), expected.len()); + assert_eq!(alive_orders.len(), expected.len()); for index in expected { - let found = filtered.iter().any(|o| o.data == orders[*index].data); + let found = alive_orders.iter().any(|o| o.data == orders[*index].data); assert!(found, "{}", index); } } @@ -1333,28 +1271,25 @@ mod tests { // EIP-1271 order and wrapper order should be retained, regular order filtered let wrapper_set = HashSet::from([wrapper_order_uid]); - let filtered = + let (alive_orders, _removed_orders) = orders_with_balance(orders.clone(), &balances, settlement_contract, &wrapper_set); - assert_eq!(filtered.len(), 2); + assert_eq!(alive_orders.len(), 2); assert!( - filtered + alive_orders .iter() .any(|o| o.metadata.uid == eip1271_order.metadata.uid) ); assert!( - filtered + alive_orders .iter() .any(|o| o.metadata.uid == wrapper_order.metadata.uid) ); // Without wrapper set, only EIP-1271 order should be retained let empty_set: HashSet = HashSet::new(); - let filtered_no_wrappers = + let (alive_orders, _removed_orders) = orders_with_balance(orders, &balances, settlement_contract, &empty_set); - assert_eq!(filtered_no_wrappers.len(), 1); - assert_eq!( - filtered_no_wrappers[0].metadata.uid, - eip1271_order.metadata.uid - ); + assert_eq!(alive_orders.len(), 1); + assert_eq!(alive_orders[0].metadata.uid, eip1271_order.metadata.uid); } } From 95b5017e60de2379eee735af1731c676dc55be9f Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Fri, 13 Feb 2026 18:00:49 +0100 Subject: [PATCH 063/219] Remove unused config options / features (#4156) # Description This started out as an investigation into speeding up deny-listed token filtering in the auction building process but I realized that there is still some unfinished work of migrating away from the tracing based bad token detection. We already moved away from that bad token detection mechanism a while ago but I didn't fully delete all the related code yet in case the new mechanism doesn't work well enough. Since then we've been running the services without the feature for many months so it's time to remove that stuff now. We are deleting so much here because only the tracing detector was using all the logic to find owners of certain tokens onchain so that also went with it. Once that was done I also went ahead and tried to find any other args that are unused. # Changes - remove `tracing_url` parameter which made the tracing bad token detector unused - removed tracing detector which made token owner finding stuff unused - removed token owner finding stuff which made liquidity source configs unused - removed any other args that weren't used ## How to test compiler --- crates/autopilot/src/arguments.rs | 13 - crates/autopilot/src/run.rs | 66 +- crates/e2e/src/setup/services.rs | 2 - crates/orderbook/src/arguments.rs | 25 +- crates/orderbook/src/run.rs | 62 +- crates/shared/src/arguments.rs | 139 +---- crates/shared/src/bad_token/cache.rs | 216 ------- crates/shared/src/bad_token/mod.rs | 2 - .../token_owner_finder/blockscout.rs | 172 ----- .../bad_token/token_owner_finder/ethplorer.rs | 179 ------ .../bad_token/token_owner_finder/liquidity.rs | 113 ---- .../src/bad_token/token_owner_finder/mod.rs | 590 ------------------ .../token_owner_finder/solvers/mod.rs | 13 - .../token_owner_finder/solvers/solver_api.rs | 30 - .../solvers/solver_finder.rs | 154 ----- .../token_owner_finder/token_owner_list.rs | 52 -- crates/shared/src/bad_token/trace_call.rs | 499 +-------------- 17 files changed, 8 insertions(+), 2319 deletions(-) delete mode 100644 crates/shared/src/bad_token/cache.rs delete mode 100644 crates/shared/src/bad_token/token_owner_finder/blockscout.rs delete mode 100644 crates/shared/src/bad_token/token_owner_finder/ethplorer.rs delete mode 100644 crates/shared/src/bad_token/token_owner_finder/liquidity.rs delete mode 100644 crates/shared/src/bad_token/token_owner_finder/mod.rs delete mode 100644 crates/shared/src/bad_token/token_owner_finder/solvers/mod.rs delete mode 100644 crates/shared/src/bad_token/token_owner_finder/solvers/solver_api.rs delete mode 100644 crates/shared/src/bad_token/token_owner_finder/solvers/solver_finder.rs delete mode 100644 crates/shared/src/bad_token/token_owner_finder/token_owner_list.rs diff --git a/crates/autopilot/src/arguments.rs b/crates/autopilot/src/arguments.rs index 410f8dba11..689be1705b 100644 --- a/crates/autopilot/src/arguments.rs +++ b/crates/autopilot/src/arguments.rs @@ -6,7 +6,6 @@ use { clap::ValueEnum, shared::{ arguments::{FeeFactor, display_list, display_option, display_secret_option}, - bad_token::token_owner_finder, http_client, price_estimation::{self, NativePriceEstimators}, }, @@ -31,9 +30,6 @@ pub struct Arguments { #[clap(flatten)] pub http_client: http_client::Arguments, - #[clap(flatten)] - pub token_owner_finder: token_owner_finder::Arguments, - #[clap(flatten)] pub price_estimation: price_estimation::Arguments, @@ -54,11 +50,6 @@ pub struct Arguments { #[clap(long, env)] pub ethflow_indexing_start: Option, - /// A tracing Ethereum node URL to connect to, allowing a separate node URL - /// to be used exclusively for tracing calls. - #[clap(long, env)] - pub tracing_node_url: Option, - #[clap(long, env, default_value = "0.0.0.0:9589")] pub metrics_address: SocketAddr, @@ -293,10 +284,8 @@ impl std::fmt::Display for Arguments { shared, order_quoting, http_client, - token_owner_finder, price_estimation, database_pool, - tracing_node_url, ethflow_contracts, ethflow_indexing_start, metrics_address, @@ -341,10 +330,8 @@ impl std::fmt::Display for Arguments { write!(f, "{shared}")?; write!(f, "{order_quoting}")?; write!(f, "{http_client}")?; - write!(f, "{token_owner_finder}")?; write!(f, "{price_estimation}")?; write!(f, "{database_pool}")?; - display_option(f, "tracing_node_url", tracing_node_url)?; writeln!(f, "ethflow_contracts: {ethflow_contracts:?}")?; writeln!(f, "ethflow_indexing_start: {ethflow_indexing_start:?}")?; writeln!(f, "metrics_address: {metrics_address}")?; diff --git a/crates/autopilot/src/run.rs b/crates/autopilot/src/run.rs index 8a57ee6233..1bc6d4f40f 100644 --- a/crates/autopilot/src/run.rs +++ b/crates/autopilot/src/run.rs @@ -27,9 +27,8 @@ use { alloy::{eips::BlockNumberOrTag, primitives::Address, providers::Provider}, chain::Chain, clap::Parser, - contracts::alloy::{BalancerV2Vault, GPv2Settlement, IUniswapV3Factory, WETH9}, + contracts::alloy::{BalancerV2Vault, GPv2Settlement, WETH9}, ethrpc::{Web3, block_stream::block_number_to_block_number_hash}, - futures::StreamExt, model::DomainSeparator, num::ToPrimitive, observe::metrics::LivenessChecking, @@ -37,11 +36,8 @@ use { account_balances::{self, BalanceSimulator}, arguments::tracing_config, bad_token::{ - cache::CachingDetector, instrumented::InstrumentedBadTokenDetectorExt, list_based::{ListBasedDetector, UnknownTokenStrategy}, - token_owner_finder, - trace_call::TraceCallDetector, }, baseline_solver::BaseTokens, code_fetching::CachedCodeFetcher, @@ -51,7 +47,6 @@ use { factory::{self, PriceEstimatorFactory}, native::NativePriceEstimating, }, - sources::{BaselineSource, uniswap_v2::UniV2BaselineSourceParameters}, token_info::{CachedTokenInfoFetcher, TokenInfoFetcher}, token_list::{AutoUpdatingTokenList, TokenListConfiguration}, }, @@ -240,14 +235,6 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { } addr }); - let vault = - vault_address.map(|address| BalancerV2Vault::Instance::new(address, web3.provider.clone())); - - let uniswapv3_factory = IUniswapV3Factory::Instance::deployed(&web3.provider) - .instrument(info_span!("uniswapv3_deployed")) - .await - .inspect_err(|err| tracing::warn!(%err, "error while fetching IUniswapV3Factory instance")) - .ok(); let chain = Chain::try_from(chain_id).expect("incorrect chain ID"); @@ -275,27 +262,6 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { .expect("failed to create gas price estimator"), ); - let baseline_sources = args - .shared - .baseline_sources - .clone() - .unwrap_or_else(|| shared::sources::defaults_for_network(&chain)); - tracing::info!(?baseline_sources, "using baseline sources"); - let univ2_sources = baseline_sources - .iter() - .filter_map(|source: &BaselineSource| { - UniV2BaselineSourceParameters::from_baseline_source(*source, &chain_id.to_string()) - }) - .chain(args.shared.custom_univ2_baseline_sources.iter().copied()); - let pair_providers: Vec<_> = futures::stream::iter(univ2_sources) - .then(|source: UniV2BaselineSourceParameters| { - let web3 = &web3; - async move { source.into_source(web3).await.unwrap().pair_provider } - }) - .collect() - .instrument(info_span!("pair_providers")) - .await; - let base_tokens = Arc::new(BaseTokens::new( *eth.contracts().weth().address(), &args.shared.base_tokens, @@ -305,39 +271,11 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { allowed_tokens.push(model::order::BUY_ETH_ADDRESS); let unsupported_tokens = args.unsupported_tokens.clone(); - let finder = token_owner_finder::init( - &args.token_owner_finder, - web3.clone(), - &chain, - &http_factory, - &pair_providers, - vault.as_ref(), - uniswapv3_factory.as_ref(), - &base_tokens, - *eth.contracts().settlement().address(), - ) - .instrument(info_span!("token_owner_finder_init")) - .await - .expect("failed to initialize token owner finders"); - - let trace_call_detector = args.tracing_node_url.as_ref().map(|tracing_node_url| { - CachingDetector::new( - Box::new(TraceCallDetector::new( - shared::ethrpc::web3(&args.shared.ethrpc, tracing_node_url, "trace"), - *eth.contracts().settlement().address(), - finder, - )), - args.shared.token_quality_cache_expiry, - args.shared.token_quality_cache_prefetch_time, - ) - }); let bad_token_detector = Arc::new( ListBasedDetector::new( allowed_tokens, unsupported_tokens, - trace_call_detector - .map(|detector| UnknownTokenStrategy::Forward(detector)) - .unwrap_or(UnknownTokenStrategy::Allow), + UnknownTokenStrategy::Allow, ) .instrumented(), ); diff --git a/crates/e2e/src/setup/services.rs b/crates/e2e/src/setup/services.rs index 7e2b4afd04..74226f468f 100644 --- a/crates/e2e/src/setup/services.rs +++ b/crates/e2e/src/setup/services.rs @@ -157,9 +157,7 @@ impl<'a> Services<'a> { fn api_autopilot_solver_arguments(&self) -> impl Iterator + use<> { [ - "--baseline-sources=None".to_string(), "--network-block-interval=1s".to_string(), - "--solver-competition-auth=super_secret_key".to_string(), format!( "--settlement-contract-address={:?}", self.contracts.gp_settlement.address() diff --git a/crates/orderbook/src/arguments.rs b/crates/orderbook/src/arguments.rs index a5dcad6de6..11273c5406 100644 --- a/crates/orderbook/src/arguments.rs +++ b/crates/orderbook/src/arguments.rs @@ -3,8 +3,7 @@ use { chrono::{DateTime, Utc}, reqwest::Url, shared::{ - arguments::{FeeFactor, display_option, display_secret_option}, - bad_token::token_owner_finder, + arguments::{FeeFactor, display_secret_option}, http_client, price_estimation::{self, NativePriceEstimators}, }, @@ -22,20 +21,12 @@ pub struct Arguments { #[clap(flatten)] pub http_client: http_client::Arguments, - #[clap(flatten)] - pub token_owner_finder: token_owner_finder::Arguments, - #[clap(flatten)] pub price_estimation: price_estimation::Arguments, #[clap(flatten)] pub database_pool: shared::arguments::DatabasePoolConfig, - /// A tracing Ethereum node URL to connect to, allowing a separate node URL - /// to be used exclusively for tracing calls. - #[clap(long, env)] - pub tracing_node_url: Option, - #[clap(long, env, default_value = "0.0.0.0:8080")] pub bind_address: SocketAddr, @@ -115,11 +106,6 @@ pub struct Arguments { #[clap(long, env, action = clap::ArgAction::Set, default_value = "false")] pub eip1271_skip_creation_validation: bool, - /// If solvable orders haven't been successfully updated in this many blocks - /// attempting to get them errors and our liveness check fails. - #[clap(long, env, default_value = "24")] - pub solvable_orders_max_update_age_blocks: u64, - /// Max number of limit orders per user. #[clap(long, env, default_value = "10")] pub max_limit_orders_per_user: u64, @@ -182,10 +168,8 @@ impl std::fmt::Display for Arguments { shared, order_quoting, http_client, - token_owner_finder, price_estimation, database_pool, - tracing_node_url, bind_address, min_order_validity_period, max_order_validity_period, @@ -195,7 +179,6 @@ impl std::fmt::Display for Arguments { banned_users_max_cache_size, allowed_tokens, eip1271_skip_creation_validation, - solvable_orders_max_update_age_blocks, native_price_estimators, fast_price_estimation_results_required, max_limit_orders_per_user, @@ -213,10 +196,8 @@ impl std::fmt::Display for Arguments { write!(f, "{shared}")?; write!(f, "{order_quoting}")?; write!(f, "{http_client}")?; - write!(f, "{token_owner_finder}")?; write!(f, "{price_estimation}")?; write!(f, "{database_pool}")?; - display_option(f, "tracing_node_url", tracing_node_url)?; writeln!(f, "bind_address: {bind_address}")?; let _intentionally_ignored = db_url; writeln!(f, "db_url: SECRET")?; @@ -244,10 +225,6 @@ impl std::fmt::Display for Arguments { f, "eip1271_skip_creation_validation: {eip1271_skip_creation_validation}" )?; - writeln!( - f, - "solvable_orders_max_update_age_blocks: {solvable_orders_max_update_age_blocks}", - )?; writeln!(f, "native_price_estimators: {native_price_estimators}")?; writeln!( f, diff --git a/crates/orderbook/src/run.rs b/crates/orderbook/src/run.rs index e6f9fa0f42..5e353e3c33 100644 --- a/crates/orderbook/src/run.rs +++ b/crates/orderbook/src/run.rs @@ -18,11 +18,9 @@ use { ChainalysisOracle, GPv2Settlement, HooksTrampoline, - IUniswapV3Factory, WETH9, support::Balances, }, - futures::StreamExt, model::{DomainSeparator, order::BUY_ETH_ADDRESS}, num::ToPrimitive, observe::metrics::{DEFAULT_METRICS_PORT, serve_metrics}, @@ -31,11 +29,8 @@ use { account_balances::{self, BalanceSimulator}, arguments::tracing_config, bad_token::{ - cache::CachingDetector, instrumented::InstrumentedBadTokenDetectorExt, list_based::{ListBasedDetector, UnknownTokenStrategy}, - token_owner_finder, - trace_call::TraceCallDetector, }, baseline_solver::BaseTokens, code_fetching::CachedCodeFetcher, @@ -50,7 +45,6 @@ use { native::NativePriceEstimating, }, signature_validator, - sources::{self, BaselineSource, uniswap_v2::UniV2BaselineSourceParameters}, token_info::{CachedTokenInfoFetcher, TokenInfoFetcher}, }, std::{future::Future, net::SocketAddr, sync::Arc, time::Duration}, @@ -154,8 +148,6 @@ pub async fn run(args: Arguments) { } } }); - let vault = - vault_address.map(|address| BalancerV2Vault::Instance::new(address, web3.provider.clone())); let hooks_contract = match args.shared.hooks_contract_address { Some(address) => HooksTrampoline::Instance::new(address, web3.provider.clone()), @@ -204,26 +196,6 @@ pub async fn run(args: Arguments) { .expect("failed to create gas price estimator"), )); - let baseline_sources = args - .shared - .baseline_sources - .clone() - .unwrap_or_else(|| sources::defaults_for_network(&chain)); - tracing::info!(?baseline_sources, "using baseline sources"); - let univ2_sources = baseline_sources - .iter() - .filter_map(|source: &BaselineSource| { - UniV2BaselineSourceParameters::from_baseline_source(*source, &chain_id.to_string()) - }) - .chain(args.shared.custom_univ2_baseline_sources.iter().copied()); - let pair_providers: Vec<_> = futures::stream::iter(univ2_sources) - .then(|source: UniV2BaselineSourceParameters| { - let web3 = &web3; - async move { source.into_source(web3).await.unwrap().pair_provider } - }) - .collect() - .await; - let base_tokens = Arc::new(BaseTokens::new( *native_token.address(), &args.shared.base_tokens, @@ -233,43 +205,11 @@ pub async fn run(args: Arguments) { allowed_tokens.push(BUY_ETH_ADDRESS); let unsupported_tokens = args.unsupported_tokens.clone(); - let uniswapv3_factory = IUniswapV3Factory::Instance::deployed(&web3.provider) - .await - .inspect_err(|err| tracing::warn!(%err, "error while fetching IUniswapV3Factory instance")) - .ok(); - - let finder = token_owner_finder::init( - &args.token_owner_finder, - web3.clone(), - &chain, - &http_factory, - &pair_providers, - vault.as_ref(), - uniswapv3_factory.as_ref(), - &base_tokens, - *settlement_contract.address(), - ) - .await - .expect("failed to initialize token owner finders"); - - let trace_call_detector = args.tracing_node_url.as_ref().map(|tracing_node_url| { - CachingDetector::new( - Box::new(TraceCallDetector::new( - shared::ethrpc::web3(&args.shared.ethrpc, tracing_node_url, "trace"), - *settlement_contract.address(), - finder, - )), - args.shared.token_quality_cache_expiry, - args.shared.token_quality_cache_prefetch_time, - ) - }); let bad_token_detector = Arc::new( ListBasedDetector::new( allowed_tokens, unsupported_tokens, - trace_call_detector - .map(|detector| UnknownTokenStrategy::Forward(detector)) - .unwrap_or(UnknownTokenStrategy::Allow), + UnknownTokenStrategy::Allow, ) .instrumented(), ); diff --git a/crates/shared/src/arguments.rs b/crates/shared/src/arguments.rs index 9f9238f006..3474375cbb 100644 --- a/crates/shared/src/arguments.rs +++ b/crates/shared/src/arguments.rs @@ -2,18 +2,14 @@ //! the binaries. use { - crate::{ - gas_price_estimation::GasEstimatorType, - sources::{BaselineSource, uniswap_v2::UniV2BaselineSourceParameters}, - tenderly_api, - }, + crate::{gas_price_estimation::GasEstimatorType, tenderly_api}, alloy::primitives::Address, anyhow::{Context, Result, ensure}, observe::TracingConfig, std::{ collections::HashSet, fmt::{self, Display, Formatter}, - num::{NonZeroU32, NonZeroU64}, + num::NonZeroU32, str::FromStr, time::Duration, }, @@ -196,59 +192,6 @@ pub struct Arguments { #[clap(long, env, use_value_delimiter = true)] pub base_tokens: Vec
, - /// Which Liquidity sources to be used by Price Estimator. - #[clap(long, env, value_enum, ignore_case = true, use_value_delimiter = true)] - pub baseline_sources: Option>, - - /// List of non hardcoded univ2-like contracts. - /// - /// For example to add a univ2-like liquidity source the argument could be - /// set to - /// - /// 0x0000000000000000000000000000000000000001|0x0000000000000000000000000000000000000000000000000000000000000002 - /// - /// which sets the router address to 0x01 and the init code digest to 0x02. - #[clap(long, env, value_enum, ignore_case = true, use_value_delimiter = true)] - pub custom_univ2_baseline_sources: Vec, - - /// The number of blocks kept in the pool cache. - #[clap(long, env, default_value = "10")] - pub pool_cache_blocks: NonZeroU64, - - /// The number of pairs that are automatically updated in the pool cache. - #[clap(long, env, default_value = "4")] - pub pool_cache_maximum_recent_block_age: u64, - - /// How often to retry requests in the pool cache. - #[clap(long, env, default_value = "5")] - pub pool_cache_maximum_retries: u32, - - /// How long to sleep in seconds between retries in the pool cache. - #[clap(long, env, default_value = "1s", value_parser = humantime::parse_duration)] - pub pool_cache_delay_between_retries: Duration, - - /// If solvers should use internal buffers to improve solution quality. - #[clap(long, env, action = clap::ArgAction::Set, default_value = "false")] - pub use_internal_buffers: bool, - - /// Value of the authorization header for the solver competition post api. - #[clap(long, env)] - pub solver_competition_auth: Option, - - /// If liquidity pool fetcher has caching mechanism, this argument defines - /// how old pool data is allowed to be before updating - #[clap( - long, - env, - default_value = "30s", - value_parser = humantime::parse_duration, - )] - pub liquidity_fetcher_max_age_update: Duration, - - /// The number of pools to initially populate the UniswapV3 cache - #[clap(long, env, default_value = "100")] - pub max_pools_to_initialize_cache: usize, - /// The time between new blocks on the network. #[clap(long, env, value_parser = humantime::parse_duration)] pub network_block_interval: Option, @@ -279,31 +222,6 @@ pub struct Arguments { #[clap(long, env)] pub balancer_v2_vault_address: Option
, - /// The amount of time a classification of a token into good or - /// bad is valid for. - #[clap( - long, - env, - default_value = "10m", - value_parser = humantime::parse_duration, - )] - pub token_quality_cache_expiry: Duration, - - /// How long before expiry the token quality cache should try to update the - /// token quality in the background. This is useful to make sure that token - /// quality for every cached token is usable at all times. This value - /// has to be smaller than `token_quality_cache_expiry` - /// This configuration also affects the period of the token quality - /// maintenance job. Maintenance period = - /// `token_quality_cache_prefetch_time` / 2 - #[clap( - long, - env, - default_value = "2m", - value_parser = humantime::parse_duration, - )] - pub token_quality_cache_prefetch_time: Duration, - /// Custom volume fees for token buckets. /// Format: "factor:token1;token2;..." (e.g., /// "0:0xA0b86...;0x6B175...;0xdAC17...") Orders where BOTH tokens are @@ -397,13 +315,6 @@ impl Display for Arguments { simulation_node_url, gas_estimators, base_tokens, - baseline_sources, - pool_cache_blocks, - pool_cache_maximum_recent_block_age, - pool_cache_maximum_retries, - pool_cache_delay_between_retries, - use_internal_buffers, - solver_competition_auth, network_block_interval, settlement_contract_address, balances_contract_address, @@ -411,11 +322,6 @@ impl Display for Arguments { native_token_address, hooks_contract_address, balancer_v2_vault_address, - custom_univ2_baseline_sources, - liquidity_fetcher_max_age_update, - max_pools_to_initialize_cache, - token_quality_cache_expiry, - token_quality_cache_prefetch_time, tracing, volume_fee_bucket_overrides, enable_sell_equals_buy_volume_fee, @@ -430,26 +336,6 @@ impl Display for Arguments { display_option(f, "simulation_node_url", simulation_node_url)?; writeln!(f, "gas_estimators: {gas_estimators:?}")?; writeln!(f, "base_tokens: {base_tokens:?}")?; - writeln!(f, "baseline_sources: {baseline_sources:?}")?; - writeln!(f, "pool_cache_blocks: {pool_cache_blocks}")?; - writeln!( - f, - "pool_cache_maximum_recent_block_age: {pool_cache_maximum_recent_block_age}" - )?; - writeln!( - f, - "pool_cache_maximum_retries: {pool_cache_maximum_retries}" - )?; - writeln!( - f, - "pool_cache_delay_between_retries: {pool_cache_delay_between_retries:?}" - )?; - writeln!(f, "use_internal_buffers: {use_internal_buffers}")?; - display_secret_option( - f, - "solver_competition_auth", - solver_competition_auth.as_ref(), - )?; display_option( f, "network_block_interval", @@ -485,27 +371,6 @@ impl Display for Arguments { "balancer_v2_vault_address", &balancer_v2_vault_address.map(|a| format!("{a:?}")), )?; - display_list( - f, - "custom_univ2_baseline_sources", - custom_univ2_baseline_sources, - )?; - writeln!( - f, - "liquidity_fetcher_max_age_update: {liquidity_fetcher_max_age_update:?}" - )?; - writeln!( - f, - "max_pools_to_initialize_cache: {max_pools_to_initialize_cache}" - )?; - writeln!( - f, - "token_quality_cache_expiry: {token_quality_cache_expiry:?}" - )?; - writeln!( - f, - "token_quality_cache_prefetch_time: {token_quality_cache_prefetch_time:?}" - )?; write!(f, "{tracing:?}")?; writeln!( f, diff --git a/crates/shared/src/bad_token/cache.rs b/crates/shared/src/bad_token/cache.rs deleted file mode 100644 index 86743a33a3..0000000000 --- a/crates/shared/src/bad_token/cache.rs +++ /dev/null @@ -1,216 +0,0 @@ -use { - super::{BadTokenDetecting, TokenQuality}, - alloy::primitives::Address, - anyhow::Result, - dashmap::DashMap, - futures::future::join_all, - std::{ - ops::Div, - sync::Arc, - time::{Duration, Instant}, - }, - tracing::instrument, -}; - -pub struct CachingDetector { - inner: Box, - cache: DashMap, - cache_expiry: Duration, - prefetch_time: Duration, -} - -#[async_trait::async_trait] -impl BadTokenDetecting for CachingDetector { - #[instrument(skip_all)] - async fn detect(&self, token: Address) -> Result { - if let Some(quality) = self.get_from_cache(&token, Instant::now()) { - return Ok(quality); - } - - let result = self.inner.detect(token).await?; - self.cache.insert(token, (Instant::now(), result.clone())); - Ok(result) - } -} - -impl CachingDetector { - pub fn new( - inner: Box, - cache_expiry: Duration, - prefetch_time: Duration, - ) -> Arc { - assert!( - cache_expiry > prefetch_time, - "token quality cache prefetch time needs to be less than token quality cache expiry" - ); - let detector = Arc::new(Self { - inner, - cache: Default::default(), - cache_expiry, - prefetch_time, - }); - detector.clone().spawn_maintenance_task(); - detector - } - - fn get_from_cache(&self, token: &Address, now: Instant) -> Option { - let (instant, quality) = self.cache.get(token)?.value().clone(); - let still_valid = now.saturating_duration_since(instant) < self.cache_expiry; - still_valid.then_some(quality) - } - - fn insert_many_into_cache(&self, tokens: impl Iterator) { - let now = Instant::now(); - tokens.into_iter().for_each(|(token, quality)| { - self.cache.insert(token, (now, quality)); - }); - } - - fn spawn_maintenance_task(self: Arc) { - // We need to prefetch the token quality the `prefetch_time` before the cache - // expires - let prefetch_time_to_expire = self.cache_expiry - self.prefetch_time; - // The maintenance frequency has to be at least double of the prefetch time - // frequency in order to guarantee that the prefetch time is executed - // before the token quality expires. This is because of the - // Nyquist–Shannon sampling theorem. - let maintenance_timeout = self.prefetch_time.div(2); - let detector = Arc::clone(&self); - - tokio::task::spawn(async move { - loop { - let start = Instant::now(); - - let futures = detector.cache.iter().filter_map(|entry| { - let (token, (instant, _)) = entry.pair(); - let (token, instant) = (*token, *instant); - if start.saturating_duration_since(instant) < prefetch_time_to_expire { - return None; - } - let detector = detector.clone(); - Some(async move { - match detector.inner.detect(token).await { - Ok(result) => Some((token, result)), - Err(err) => { - tracing::warn!( - ?token, - ?err, - "unable to determine token quality in the background task" - ); - None - } - } - }) - }); - - let results = join_all(futures).await; - detector.insert_many_into_cache(results.into_iter().flatten()); - - let remaining_sleep = maintenance_timeout.saturating_sub(start.elapsed()); - tokio::time::sleep(remaining_sleep).await; - } - }); - } -} - -#[cfg(test)] -mod tests { - use {super::*, crate::bad_token::MockBadTokenDetecting, futures::FutureExt}; - - #[tokio::test] - async fn goes_to_cache() { - // Would panic if called twice. - let mut inner = MockBadTokenDetecting::new(); - inner - .expect_detect() - .times(1) - .returning(|_| Ok(TokenQuality::Good)); - - let detector = CachingDetector::new( - Box::new(inner), - Duration::from_secs(1), - Duration::from_millis(200), - ); - - for _ in 0..2 { - let result = detector - .detect(Address::with_last_byte(0)) - .now_or_never() - .unwrap(); - assert!(result.unwrap().is_good()); - } - } - - #[tokio::test] - async fn cache_expires() { - let inner = MockBadTokenDetecting::new(); - let token = Address::with_last_byte(0); - let detector = CachingDetector::new( - Box::new(inner), - Duration::from_secs(2), - Duration::from_millis(200), - ); - let now = Instant::now(); - detector.cache.insert(token, (now, TokenQuality::Good)); - assert!( - detector - .get_from_cache(&token, now + Duration::from_secs(1)) - .is_some() - ); - assert!( - detector - .get_from_cache(&token, now + Duration::from_secs(3)) - .is_none() - ); - } - - #[tokio::test] - async fn cache_prefetch_works() { - let mut inner = MockBadTokenDetecting::new(); - // we expect it to be called twice: first time + prefetch time - let mut seq = mockall::Sequence::new(); - // First call returns Ok(TokenQuality::Good) - inner - .expect_detect() - .times(1) - .in_sequence(&mut seq) - .returning(|_| Ok(TokenQuality::Good)); - // Second call returns Ok(TokenQuality::Bad) - inner - .expect_detect() - .times(1) - .in_sequence(&mut seq) - .returning(|_| { - Ok(TokenQuality::Bad { - reason: "bad token".to_string(), - }) - }); - - let detector = CachingDetector::new( - Box::new(inner), - Duration::from_millis(200), - Duration::from_millis(50), - ); - - let result = detector - .detect(Address::with_last_byte(0)) - .now_or_never() - .unwrap(); - assert!(result.unwrap().is_good()); - // Check that the result is the same because we haven't reached the prefetch - // time yet - tokio::time::sleep(Duration::from_millis(100)).await; - let result = detector - .detect(Address::with_last_byte(0)) - .now_or_never() - .unwrap(); - assert!(result.unwrap().is_good()); - // We wait so the prefetch fetches the data - tokio::time::sleep(Duration::from_millis(70)).await; - let result = detector - .detect(Address::with_last_byte(0)) - .now_or_never() - .unwrap(); - assert!(!result.unwrap().is_good()); - } -} diff --git a/crates/shared/src/bad_token/mod.rs b/crates/shared/src/bad_token/mod.rs index 4cb030db2a..f4d146e74c 100644 --- a/crates/shared/src/bad_token/mod.rs +++ b/crates/shared/src/bad_token/mod.rs @@ -1,7 +1,5 @@ -pub mod cache; pub mod instrumented; pub mod list_based; -pub mod token_owner_finder; pub mod trace_call; use {alloy::primitives::Address, anyhow::Result}; diff --git a/crates/shared/src/bad_token/token_owner_finder/blockscout.rs b/crates/shared/src/bad_token/token_owner_finder/blockscout.rs deleted file mode 100644 index dacd5fd0d1..0000000000 --- a/crates/shared/src/bad_token/token_owner_finder/blockscout.rs +++ /dev/null @@ -1,172 +0,0 @@ -use { - super::TokenOwnerProposing, - alloy::primitives::Address, - anyhow::Result, - chain::Chain, - prometheus::IntCounterVec, - prometheus_metric_storage::MetricStorage, - rate_limit::{RateLimiter, Strategy, back_off}, - reqwest::{Client, Url}, - serde::Deserialize, -}; - -pub struct BlockscoutTokenOwnerFinder { - client: Client, - base: Url, - api_key: Option, - rate_limiter: Option, -} - -impl BlockscoutTokenOwnerFinder { - pub fn with_network(client: Client, chain: &Chain) -> Result { - let base_url = match chain { - Chain::Mainnet => "https://eth.blockscout.com/api", - Chain::Goerli => "https://eth-goerli.blockscout.com/api", - Chain::Gnosis => "https://blockscout.com/xdai/mainnet/api", - Chain::Sepolia => "https://eth-sepolia.blockscout.com/api", - Chain::ArbitrumOne => "https://arbitrum.blockscout.com/api", - Chain::Base => "https://base.blockscout.com/api", - _ => anyhow::bail!("Chain not supported"), - }; - - Ok(Self { - client, - base: Url::parse(base_url)?, - api_key: None, - rate_limiter: None, - }) - } - - pub fn with_base_url(&mut self, base_url: Url) -> &mut Self { - self.base = base_url; - self - } - - pub fn with_api_key(&mut self, api_key: String) -> &mut Self { - self.api_key = Some(api_key); - self - } - - pub fn with_rate_limiter(&mut self, strategy: Strategy) -> &mut Self { - self.rate_limiter = Some(RateLimiter::from_strategy( - strategy, - "blockscout".to_owned(), - )); - self - } - - async fn query_owners(&self, token: Address) -> Result> { - let mut url = self.base.clone(); - url.query_pairs_mut() - .append_pair("module", "token") - .append_pair("action", "getTokenHolders") - .append_pair("contractaddress", &format!("{token:#x}")); - - // Don't log the API key! - tracing::debug!(%url, "Querying Blockscout API"); - - if let Some(api_key) = &self.api_key { - url.query_pairs_mut().append_pair("apikey", api_key); - } - - let request = self.client.get(url).send(); - let response = match &self.rate_limiter { - Some(limiter) => limiter.execute(request, back_off::on_http_429).await??, - _ => request.await?, - }; - let status = response.status(); - let status_result = response.error_for_status_ref().map(|_| ()); - let body = response.text().await?; - - tracing::debug!(%status, %body, "Response from Blockscout API"); - - status_result?; - let parsed = serde_json::from_str::(&body)?; - - // We technically only need one candidate, returning the top 2 in case there is - // a race condition and tokens have just been transferred out - Ok(parsed - .result - .into_iter() - .map(|owner| owner.address) - .take(2) - .collect()) - } -} - -#[derive(Deserialize)] -struct Response { - result: Vec, -} - -#[derive(Deserialize)] -struct TokenOwner { - address: Address, -} - -#[derive(MetricStorage, Clone, Debug)] -#[metric(subsystem = "blockscout_token_owner_finding")] -struct Metrics { - /// Tracks number of "ok" or "err" responses from blockscout. - #[metric(labels("result"))] - results: IntCounterVec, -} - -#[async_trait::async_trait] -impl TokenOwnerProposing for BlockscoutTokenOwnerFinder { - async fn find_candidate_owners(&self, token: Address) -> Result> { - let metric = &Metrics::instance(observe::metrics::get_storage_registry()) - .unwrap() - .results; - - match self.query_owners(token).await { - Ok(ok) => { - metric.with_label_values(&["ok"]).inc(); - Ok(ok) - } - Err(err) => { - tracing::warn!(?err, "error finding token owners with Blockscout"); - metric.with_label_values(&["err"]).inc(); - Err(err) - } - } - } -} - -#[cfg(test)] -mod tests { - use {super::*, alloy::primitives::address}; - - #[tokio::test] - #[ignore] - async fn test_blockscout_token_finding_mainnet() { - let finder = - BlockscoutTokenOwnerFinder::with_network(Client::default(), &Chain::Mainnet).unwrap(); - let owners = finder - .find_candidate_owners(address!("1337BedC9D22ecbe766dF105c9623922A27963EC")) - .await; - assert!(!owners.unwrap().is_empty()); - } - - #[tokio::test] - #[ignore] - async fn test_blockscout_token_finding_xdai() { - let finder = - BlockscoutTokenOwnerFinder::with_network(Client::default(), &Chain::Gnosis).unwrap(); - let owners = finder - .find_candidate_owners(address!("1337BedC9D22ecbe766dF105c9623922A27963EC")) - .await; - assert!(!owners.unwrap().is_empty()); - } - - #[tokio::test] - #[ignore] - async fn test_blockscout_token_finding_no_owners() { - let finder = - BlockscoutTokenOwnerFinder::with_network(Client::default(), &Chain::Gnosis).unwrap(); - let owners = finder - .find_candidate_owners(address!("000000000000000000000000000000000000def1")) - .await; - assert!(owners.unwrap().is_empty()); - } -} diff --git a/crates/shared/src/bad_token/token_owner_finder/ethplorer.rs b/crates/shared/src/bad_token/token_owner_finder/ethplorer.rs deleted file mode 100644 index 7aa37517e4..0000000000 --- a/crates/shared/src/bad_token/token_owner_finder/ethplorer.rs +++ /dev/null @@ -1,179 +0,0 @@ -use { - super::TokenOwnerProposing, - alloy::primitives::Address, - anyhow::{Result, ensure}, - chain::Chain, - prometheus::IntCounterVec, - prometheus_metric_storage::MetricStorage, - rate_limit::{RateLimiter, Strategy, back_off}, - reqwest::{Client, StatusCode, Url}, - serde::Deserialize, -}; - -const BASE: &str = "https://api.ethplorer.io"; -const FREE_API_KEY: &str = "freekey"; - -pub struct EthplorerTokenOwnerFinder { - client: Client, - base: Url, - api_key: String, - - /// The low tiers for Ethplorer have very aggressive rate limiting, so be - /// sure to setup a rate limiter for Ethplorer requests. - rate_limiter: Option, - - metrics: &'static Metrics, -} - -impl EthplorerTokenOwnerFinder { - pub fn try_with_network( - client: Client, - api_key: Option, - chain: &Chain, - ) -> Result { - ensure!( - *chain == Chain::Mainnet, - "Ethplorer API unsupported network" - ); - Ok(Self { - client, - base: Url::try_from(BASE).unwrap(), - api_key: api_key.unwrap_or_else(|| FREE_API_KEY.to_owned()), - rate_limiter: None, - metrics: Metrics::instance(observe::metrics::get_storage_registry())?, - }) - } - - pub fn with_base_url(&mut self, base_url: Url) -> &mut Self { - self.base = base_url; - self - } - - pub fn with_rate_limiter(&mut self, strategy: Strategy) -> &mut Self { - self.rate_limiter = Some(RateLimiter::from_strategy(strategy, "ethplorer".to_owned())); - self - } - - async fn query_owners(&self, token: Address) -> Result> { - let mut url = crate::url::join(&self.base, &format!("getTopTokenHolders/{token:?}")); - // We technically only need one candidate, returning the top 2 in case there - // is a race condition and tokens have just been transferred out. - url.query_pairs_mut().append_pair("limit", "2"); - - tracing::debug!(%url, "querying Ethplorer"); - // Don't log the API key! - url.query_pairs_mut().append_pair("apiKey", &self.api_key); - - let request = self.client.get(url).send(); - let response = match &self.rate_limiter { - Some(limiter) => limiter.execute(request, back_off::on_http_429).await??, - _ => request.await?, - }; - - let status = response.status(); - let status_result = response.error_for_status_ref().map(|_| ()); - let body = response.text().await?; - - tracing::debug!(%status, %body, "response from Ethplorer API"); - - // We need some special handling for "not a token contract" errors. In - // this case, we just want to return an empty token holder list to conform - // to the expectations of the `TokenHolderProposing` trait. - if status == StatusCode::BAD_REQUEST { - let err = serde_json::from_str::(&body)?; - if err.not_token_contract() { - return Ok(Default::default()); - } - } - status_result?; - - let parsed = serde_json::from_str::(&body)?; - - Ok(parsed - .holders - .into_iter() - .map(|holder| holder.address) - .collect()) - } -} - -#[derive(Deserialize)] -struct Response { - holders: Vec, -} - -#[derive(Deserialize)] -struct Holder { - address: Address, -} - -#[derive(Deserialize)] -struct Error { - error: ErrorData, -} - -#[derive(Deserialize)] -struct ErrorData { - code: i64, -} - -impl Error { - fn not_token_contract(&self) -> bool { - // https://github.com/EverexIO/Ethplorer/wiki/Ethplorer-API#error-codes - self.error.code == 150 - } -} - -#[derive(MetricStorage, Clone, Debug)] -#[metric(subsystem = "ethplorer_token_owner_finding")] -struct Metrics { - /// Tracks number of "ok" or "err" responses from ethplorer. - #[metric(labels("result"))] - results: IntCounterVec, -} - -#[async_trait::async_trait] -impl TokenOwnerProposing for EthplorerTokenOwnerFinder { - async fn find_candidate_owners(&self, token: Address) -> Result> { - let metric = &self.metrics.results; - let result = self.query_owners(token).await; - match &result { - Ok(_) => metric.with_label_values(&["ok"]).inc(), - Err(err) => { - tracing::warn!(?err, "error finding token owners with Ethplorer"); - metric.with_label_values(&["err"]).inc(); - } - } - - result - } -} - -#[cfg(test)] -mod tests { - use {super::*, alloy::primitives::address}; - - #[tokio::test] - #[ignore] - async fn token_finding_mainnet() { - let finder = - EthplorerTokenOwnerFinder::try_with_network(Client::default(), None, &Chain::Mainnet) - .unwrap(); - let owners = finder - .find_candidate_owners(address!("1337BedC9D22ecbe766dF105c9623922A27963EC")) - .await; - assert!(!owners.unwrap().is_empty()); - } - - #[tokio::test] - #[ignore] - async fn returns_no_owners_on_invalid_token() { - let finder = - EthplorerTokenOwnerFinder::try_with_network(Client::default(), None, &Chain::Gnosis) - .unwrap(); - let owners = finder - .find_candidate_owners(address!("000000000000000000000000000000000000def1")) - .await; - assert!(owners.unwrap().is_empty()); - } -} diff --git a/crates/shared/src/bad_token/token_owner_finder/liquidity.rs b/crates/shared/src/bad_token/token_owner_finder/liquidity.rs deleted file mode 100644 index a055df1de4..0000000000 --- a/crates/shared/src/bad_token/token_owner_finder/liquidity.rs +++ /dev/null @@ -1,113 +0,0 @@ -//! Module containing liquidity-based token owner finding implementations. - -use { - super::TokenOwnerProposing, - crate::sources::{uniswap_v2::pair_provider::PairProvider, uniswap_v3_pair_provider}, - alloy::{eips::BlockNumberOrTag, primitives::Address}, - anyhow::Result, - contracts::alloy::{BalancerV2Vault, IUniswapV3Factory}, - model::TokenPair, -}; - -pub struct UniswapLikePairProviderFinder { - pub inner: PairProvider, - pub base_tokens: Vec
, -} - -#[async_trait::async_trait] -impl TokenOwnerProposing for UniswapLikePairProviderFinder { - async fn find_candidate_owners(&self, token: Address) -> Result> { - Ok(self - .base_tokens - .iter() - .filter_map(|base_token| TokenPair::new(*base_token, token)) - .map(|pair| self.inner.pair_address(&pair)) - .collect()) - } -} - -/// The balancer vault contract contains all the balances of all pools. -pub struct BalancerVaultFinder(pub BalancerV2Vault::Instance); - -#[async_trait::async_trait] -impl TokenOwnerProposing for BalancerVaultFinder { - async fn find_candidate_owners(&self, _: Address) -> Result> { - Ok(vec![*self.0.address()]) - } -} - -pub struct UniswapV3Finder { - pub factory: IUniswapV3Factory::Instance, - pub base_tokens: Vec
, - fee_values: Vec, -} - -#[derive(Debug, Clone, Copy, clap::ValueEnum)] -pub enum FeeValues { - /// Use hardcoded list - Static, - /// Fetch on creation based on events queried from node. - /// Some nodes struggle with the request and take a long time to respond - /// leading to timeouts. - Dynamic, -} - -impl UniswapV3Finder { - pub async fn new( - factory: IUniswapV3Factory::Instance, - base_tokens: Vec
, - fee_values: FeeValues, - ) -> Result { - let fee_values = match fee_values { - FeeValues::Static => vec![500, 3000, 10000, 100], - // We fetch these once at start up because we don't expect them to change often. - // Alternatively could use a time based cache. - FeeValues::Dynamic => Self::fee_values(&factory).await?, - }; - tracing::debug!(?fee_values); - Ok(Self { - factory, - base_tokens, - fee_values, - }) - } - - // Possible fee values as given by - // https://github.com/Uniswap/v3-core/blob/9161f9ae4aaa109f7efdff84f1df8d4bc8bfd042/contracts/UniswapV3Factory.sol#L26 - async fn fee_values(factory: &IUniswapV3Factory::Instance) -> Result> { - // We expect there to be few of these kind of events (currently there are 4) so - // fetching all of them is fine. Alternatively we could index these - // events in the database. - let events = factory - .FeeAmountEnabled_filter() - .from_block(BlockNumberOrTag::Earliest) - .to_block(BlockNumberOrTag::Latest) - .query() - .await?; - let fee_values = events - .into_iter() - .map(|(enabled, _)| { - enabled - .fee - .try_into() - .expect("uint24 always fits inside u32") - }) - .collect(); - Ok(fee_values) - } -} - -#[async_trait::async_trait] -impl TokenOwnerProposing for UniswapV3Finder { - async fn find_candidate_owners(&self, token: Address) -> Result> { - Ok(self - .base_tokens - .iter() - .filter_map(|base_token| TokenPair::new(*base_token, token)) - .flat_map(|pair| self.fee_values.iter().map(move |fee| (pair, *fee))) - .map(|(pair, fee)| { - uniswap_v3_pair_provider::pair_address(self.factory.address(), &pair, fee) - }) - .collect()) - } -} diff --git a/crates/shared/src/bad_token/token_owner_finder/mod.rs b/crates/shared/src/bad_token/token_owner_finder/mod.rs deleted file mode 100644 index 12319b1778..0000000000 --- a/crates/shared/src/bad_token/token_owner_finder/mod.rs +++ /dev/null @@ -1,590 +0,0 @@ -pub mod blockscout; -pub mod ethplorer; -pub mod liquidity; -pub mod solvers; -pub mod token_owner_list; - -use { - self::{ - blockscout::BlockscoutTokenOwnerFinder, - liquidity::{ - BalancerVaultFinder, - FeeValues, - UniswapLikePairProviderFinder, - UniswapV3Finder, - }, - }, - crate::{ - arguments::{display_list, display_option, display_secret_option}, - bad_token::token_owner_finder::{ - ethplorer::EthplorerTokenOwnerFinder, - solvers::{ - solver_api::SolverConfiguration, - solver_finder::AutoUpdatingSolverTokenOwnerFinder, - }, - token_owner_list::TokenOwnerList, - }, - baseline_solver::BaseTokens, - ethrpc::{MAX_BATCH_SIZE, Web3}, - http_client::HttpClientFactory, - sources::uniswap_v2::pair_provider::PairProvider, - }, - alloy::primitives::{Address, U256}, - anyhow::{Context, Result}, - chain::Chain, - contracts::alloy::{BalancerV2Vault, ERC20, IUniswapV3Factory}, - ethrpc::alloy::{ProviderLabelingExt, errors::ContractErrorExt}, - futures::{Stream, StreamExt as _}, - rate_limit::Strategy, - reqwest::Url, - std::{ - collections::HashMap, - fmt::{self, Display, Formatter}, - sync::Arc, - time::Duration, - }, -}; - -/// This trait abstracts various sources for proposing token owner candidates -/// which are likely, but not guaranteed, to have some token balance. -#[async_trait::async_trait] -pub trait TokenOwnerProposing: Send + Sync { - /// Find candidate addresses that might own the token. - async fn find_candidate_owners(&self, token: Address) -> Result>; -} - -/// To detect bad tokens we need to find some address on the network that owns -/// the token so that we can use it in our simulations. -#[async_trait::async_trait] -pub trait TokenOwnerFinding: Send + Sync { - /// Find an addresses with at least `min_balance` of tokens and return it, - /// along with its actual balance. - async fn find_owner( - &self, - token: Address, - min_balance: U256, - ) -> Result>; -} - -/// Arguments related to the token owner finder. -#[derive(clap::Parser)] -#[group(skip)] -pub struct Arguments { - /// The token owner finding strategies to use. - #[clap(long, env, use_value_delimiter = true, value_enum)] - pub token_owner_finders: Option>, - - /// The fee value strategy to use for locating Uniswap V3 pools as token - /// holders for bad token detection. - #[clap(long, env, default_value = "static", value_enum)] - pub token_owner_finder_uniswap_v3_fee_values: FeeValues, - - /// The blockscout configuration. - #[clap(flatten)] - pub blockscout: Option, - - /// The ethplorer configuration. - #[clap(flatten)] - pub ethplorer: Option, - - /// Token owner finding rate limiting strategy. See - /// --price-estimation-rate-limiter documentation for format details. - #[clap(long, env)] - pub token_owner_finder_rate_limiter: Option, - - /// List of token addresses to be whitelisted as a potential token owners - /// For each token a list of owners is defined. - #[clap( - long, - env, - value_parser = parse_owners, - default_value = "", - )] - pub whitelisted_owners: HashMap>, - - /// The solvers urls to query the token owner pairs. - #[clap(long, env, use_value_delimiter = true)] - pub solver_token_owners_urls: Vec, - - /// Interval in seconds between consecutive queries to update the solver - /// token owner pairs. Values should be in pair with - /// `solver_token_owners_urls` - #[clap(long, env, use_value_delimiter = true, value_parser = humantime::parse_duration)] - pub solver_token_owners_cache_update_intervals: Vec, -} - -#[derive(clap::Parser)] -#[clap(group( - clap::ArgGroup::new("blockscout") - .requires_all(&[ - "blockscout_api_url", - "blockscout_api_key", - ]) - .multiple(true) - .required(false), -))] -pub struct Blockscout { - /// Override the default blockscout API url for this network - #[clap(long, env, group = "blockscout", required = false)] - pub blockscout_api_url: Url, - - /// The blockscout API key. - #[clap(long, env, group = "blockscout", required = false)] - pub blockscout_api_key: String, -} - -#[derive(clap::Parser)] -#[clap(group( - clap::ArgGroup::new("ethplorer") - .requires_all(&[ - "ethplorer_api_url", - "ethplorer_api_key", - ]) - .multiple(true) - .required(false), -))] -pub struct Ethplorer { - /// Override the default ethplorer API url - #[clap(long, env, group = "ethplorer", required = false)] - pub ethplorer_api_url: Url, - - /// The Ethplorer token holder API key. - #[clap(long, env, group = "ethplorer", required = false)] - pub ethplorer_api_key: String, -} - -fn parse_owners(s: &str) -> Result>> { - if s.is_empty() { - return Ok(Default::default()); - } - s.split(';') - .map(|pair_str| { - let (key, values) = pair_str - .split_once(':') - .context("missing token and owners")?; - let key = key.trim().parse()?; - let values = values - .trim() - .split(',') - .map(|value| value.trim().parse().context("failed to parse token owner")) - .collect::>()?; - Ok((key, values)) - }) - .collect() -} - -/// Support token owner finding strategies. -#[derive(Clone, Copy, Debug, Eq, PartialEq, clap::ValueEnum)] -pub enum TokenOwnerFindingStrategy { - /// Using baseline liquidity pools as token owners. - /// - /// The actual liquidity pools used depends on the configured baseline - /// liquidity. - Liquidity, - - /// Use the Blockscout token holder API to find token holders. - Blockscout, - - /// Use the Ethplorer token holder API. - Ethplorer, - - /// Use lists provided by the external solver teams - Solvers, -} - -impl TokenOwnerFindingStrategy { - /// Returns the default set of token owner finding strategies. - pub fn defaults_for_network(chain: &Chain) -> &'static [Self] { - match chain { - Chain::Mainnet => &[Self::Liquidity, Self::Blockscout, Self::Ethplorer], - Chain::Gnosis => &[Self::Liquidity, Self::Blockscout], - Chain::Sepolia - | Chain::Goerli - | Chain::ArbitrumOne - | Chain::Base - | Chain::Bnb - | Chain::Optimism - | Chain::Avalanche - | Chain::Polygon - | Chain::Linea - | Chain::Plasma - | Chain::Ink - | Chain::Lens => &[Self::Liquidity], - Chain::Hardhat => panic!("unsupported chain for token owner finding"), - } - } -} - -impl Display for Arguments { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - let Self { - token_owner_finders, - token_owner_finder_uniswap_v3_fee_values, - blockscout, - ethplorer, - token_owner_finder_rate_limiter, - whitelisted_owners, - solver_token_owners_urls, - solver_token_owners_cache_update_intervals, - } = self; - - writeln!(f, "token_owner_finders: {token_owner_finders:?}")?; - writeln!( - f, - "token_owner_finder_uniswap_v3_fee_values: \ - {token_owner_finder_uniswap_v3_fee_values:?}" - )?; - display_option( - f, - "blockscout_api_url", - &blockscout - .as_ref() - .map(|blockscout| blockscout.blockscout_api_url.clone()), - )?; - display_secret_option( - f, - "blockscout_api_key", - blockscout - .as_ref() - .map(|blockscout| blockscout.blockscout_api_key.clone()) - .as_ref(), - )?; - display_option( - f, - "ethplorer_api_url", - ðplorer - .as_ref() - .map(|blockscout| blockscout.ethplorer_api_url.clone()) - .as_ref(), - )?; - display_secret_option( - f, - "ethplorer_api_key", - ethplorer - .as_ref() - .map(|blockscout| blockscout.ethplorer_api_key.clone()) - .as_ref(), - )?; - display_option( - f, - "token_owner_finder_rate_limiter", - token_owner_finder_rate_limiter, - )?; - writeln!(f, "whitelisted_owners, {whitelisted_owners:?}")?; - display_list(f, "solver_token_owners_urls", solver_token_owners_urls)?; - writeln!( - f, - "solver_token_owners_cache_update_intervals, \ - {solver_token_owners_cache_update_intervals:?}" - )?; - Ok(()) - } -} - -/// Initializes a set of token owner finders. -#[expect(clippy::too_many_arguments)] -pub async fn init( - args: &Arguments, - web3: Web3, - chain: &Chain, - http_factory: &HttpClientFactory, - pair_providers: &[PairProvider], - vault: Option<&BalancerV2Vault::Instance>, - uniswapv3_factory: Option<&IUniswapV3Factory::Instance>, - base_tokens: &BaseTokens, - settlement_contract: Address, -) -> Result> { - let web3 = web3.labeled("tokenOwners"); - let finders = args - .token_owner_finders - .as_deref() - .unwrap_or_else(|| TokenOwnerFindingStrategy::defaults_for_network(chain)); - tracing::debug!(?finders, "initializing token owner finders"); - - let mut proposers = Vec::>::new(); - - if finders.contains(&TokenOwnerFindingStrategy::Liquidity) { - proposers.extend( - pair_providers - .iter() - .map(|provider| -> Arc { - Arc::new(UniswapLikePairProviderFinder { - inner: *provider, - base_tokens: base_tokens.tokens().iter().copied().collect(), - }) - }), - ); - if let Some(contract) = vault { - proposers.push(Arc::new(BalancerVaultFinder(contract.clone()))); - } - if let Some(contract) = uniswapv3_factory { - proposers.push(Arc::new( - UniswapV3Finder::new( - contract.clone(), - base_tokens.tokens().iter().copied().collect(), - args.token_owner_finder_uniswap_v3_fee_values, - ) - .await?, - )); - } - } - - if finders.contains(&TokenOwnerFindingStrategy::Blockscout) { - let mut blockscout = - BlockscoutTokenOwnerFinder::with_network(http_factory.create(), chain)?; - if let Some(blockscout_config) = &args.blockscout { - blockscout.with_base_url(blockscout_config.blockscout_api_url.clone()); - blockscout.with_api_key(blockscout_config.blockscout_api_key.clone()); - } - if let Some(strategy) = args.token_owner_finder_rate_limiter.clone() { - blockscout.with_rate_limiter(strategy); - } - proposers.push(Arc::new(blockscout)); - } - - if finders.contains(&TokenOwnerFindingStrategy::Ethplorer) { - let mut ethplorer = EthplorerTokenOwnerFinder::try_with_network( - http_factory.create(), - args.ethplorer - .as_ref() - .map(|ethplorer| ethplorer.ethplorer_api_key.clone()), - chain, - )?; - if let Some(ethplorer_config) = &args.ethplorer { - ethplorer.with_base_url(ethplorer_config.ethplorer_api_url.clone()); - } - if let Some(strategy) = args.token_owner_finder_rate_limiter.clone() { - ethplorer.with_rate_limiter(strategy); - } - proposers.push(Arc::new(ethplorer)); - } - - if finders.contains(&TokenOwnerFindingStrategy::Solvers) { - for (url, update_interval) in args - .solver_token_owners_urls - .clone() - .into_iter() - .zip(args.solver_token_owners_cache_update_intervals.clone()) - { - let identifier = url.to_string(); - let solver = Box::new(SolverConfiguration { - url, - client: http_factory.create(), - }); - let solver = - AutoUpdatingSolverTokenOwnerFinder::new(solver, update_interval, identifier); - proposers.push(Arc::new(solver)); - } - } - - proposers.push(Arc::new(TokenOwnerList::new( - args.whitelisted_owners.clone(), - ))); - - Ok(Arc::new(TokenOwnerFinder { - web3, - proposers, - settlement_contract, - })) -} - -/// A `TokenOwnerFinding` implementation that queries a node with proposed owner -/// candidates from an internal list of `TokenOwnerProposing` implementations. -pub struct TokenOwnerFinder { - pub web3: Web3, - pub proposers: Vec>, - pub settlement_contract: Address, -} - -impl TokenOwnerFinder { - /// Stream of addresses that might own the token. - fn candidate_owners(&self, token: Address) -> impl Stream + '_ { - // Combine the results of all finders into a single stream. - let streams = self.proposers.iter().map(|finder| { - futures::stream::once(finder.find_candidate_owners(token)) - .filter_map(|result| async { - match result { - Ok(inner) => Some(futures::stream::iter(inner)), - Err(err) => { - tracing::warn!(?err, "token owner proposing failed"); - None - } - } - }) - .flatten() - .boxed() - }); - futures::stream::select_all(streams) - } -} - -#[async_trait::async_trait] -impl TokenOwnerFinding for TokenOwnerFinder { - async fn find_owner( - &self, - token: Address, - min_balance: U256, - ) -> Result> { - let instance = ERC20::Instance::new(token, self.web3.provider.clone()); - - // We use a stream with ready_chunks so that we can start with the addresses of - // fast TokenOwnerFinding implementations first without having to wait - // for slow ones. - let stream = self.candidate_owners(token).ready_chunks(MAX_BATCH_SIZE); - futures::pin_mut!(stream); - - while let Some(chunk) = stream.next().await { - let futures = chunk - .into_iter() - // The token balance assertions of the bad token test assume the token - // owner is not the settlement contract. - .filter(|owner| *owner != self.settlement_contract) - .map(|owner| { - let balance = instance.balanceOf(owner); - async move { - match balance.call().await { - Ok(balance) => Ok((owner, balance)), - Err(err) if err.is_contract_error() => { - Ok((owner, alloy::primitives::U256::ZERO)) - } - Err(err) => Err(err), - } - } - }); - let balances = futures::future::try_join_all(futures).await?; - - if let Some((addr, balance)) = balances - .into_iter() - .find(|(_, balance)| *balance >= min_balance) - { - return Ok(Some((addr, balance))); - } - } - - Ok(None) - } -} - -#[cfg(test)] -mod test { - use {super::*, alloy::primitives::address, clap::Parser}; - - const TOKEN1: Address = address!("C02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"); - const TOKEN2: Address = address!("7Fc66500c84A76Ad7e9c93437bFc5Ac33E2DDaE9"); - const OWNER1: Address = address!("06920c9fc643de77b99cb7670a944ad31eaaa260"); - const OWNER2: Address = address!("06601571aa9d3e8f5f7cdd5b993192618964bab5"); - - #[test] - fn parse_owners_empty() { - assert_eq!(parse_owners("").unwrap(), Default::default()); - } - - #[test] - fn parse_owners_one_owner() { - let mut expected = HashMap::new(); - expected.insert(TOKEN1, vec![OWNER1]); - let parsed = parse_owners( - " - 0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2: - 0x06920c9fc643de77b99cb7670a944ad31eaaa260 - ", - ) - .unwrap(); - assert_eq!(parsed, expected); - } - - #[test] - fn parse_owners_two_owners() { - let mut expected = HashMap::new(); - expected.insert(TOKEN1, vec![OWNER1, OWNER2]); - let parsed = parse_owners( - " - 0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2: - 0x06920c9fc643de77b99cb7670a944ad31eaaa260, - 0x06601571aa9d3e8f5f7cdd5b993192618964bab5 - ", - ) - .unwrap(); - assert_eq!(parsed, expected); - } - - #[test] - fn parse_owners_two_tokens_with_one_owners() { - let mut expected = HashMap::new(); - expected.insert(TOKEN1, vec![OWNER1]); - expected.insert(TOKEN2, vec![OWNER2]); - let parsed = parse_owners( - " - 0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2: - 0x06920c9fc643de77b99cb7670a944ad31eaaa260; - 0x7Fc66500c84A76Ad7e9c93437bFc5Ac33E2DDaE9: - 0x06601571aa9d3e8f5f7cdd5b993192618964bab5 - ", - ) - .unwrap(); - assert_eq!(parsed, expected); - } - - #[test] - fn parse_owners_err() { - assert!(parse_owners("0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2:").is_err()); - assert!(parse_owners("0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2").is_err()); - assert!(parse_owners(":0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2").is_err()); - } - - #[test] - fn blockscout_correctly_configured() { - let args = vec![ - "test", // Program name - "--blockscout-api-key", - "someapikey", - "--blockscout-api-url", - "https://swap.cow.fi", - ]; - - let blockscout = Blockscout::try_parse_from(args); - - assert!(blockscout.is_ok()); - } - - #[test] - fn blockscout_wrongly_configured() { - let args = vec![ - "test", // Program name - "--blockscout-api-key", - "someapikey", - ]; - - let result = Blockscout::try_parse_from(args); - - assert!(result.is_err()); - } - - #[test] - fn ethplorer_correctly_configured() { - let args = vec![ - "test", // Program name - "--ethplorer-api-key", - "someapikey", - "--ethplorer-api-url", - "https://swap.cow.fi", - ]; - - let ethplorer = Ethplorer::try_parse_from(args); - - assert!(ethplorer.is_ok()); - } - - #[test] - fn ethplorer_wrongly_configured() { - let args = vec![ - "test", // Program name - "--ethplorer-api-key", - "someapikey", - ]; - - let result = Ethplorer::try_parse_from(args); - - assert!(result.is_err()); - } -} diff --git a/crates/shared/src/bad_token/token_owner_finder/solvers/mod.rs b/crates/shared/src/bad_token/token_owner_finder/solvers/mod.rs deleted file mode 100644 index 1b153c56a7..0000000000 --- a/crates/shared/src/bad_token/token_owner_finder/solvers/mod.rs +++ /dev/null @@ -1,13 +0,0 @@ -pub mod solver_api; -pub mod solver_finder; - -use {alloy::primitives::Address, anyhow::Result, std::collections::HashMap}; - -type Token = Address; -type Owner = Address; - -#[async_trait::async_trait] -pub trait TokenOwnerSolverApi: Send + Sync { - /// Get token owner pairs from specific solver - async fn get_token_owner_pairs(&self) -> Result>>; -} diff --git a/crates/shared/src/bad_token/token_owner_finder/solvers/solver_api.rs b/crates/shared/src/bad_token/token_owner_finder/solvers/solver_api.rs deleted file mode 100644 index a14fa46537..0000000000 --- a/crates/shared/src/bad_token/token_owner_finder/solvers/solver_api.rs +++ /dev/null @@ -1,30 +0,0 @@ -use { - super::TokenOwnerSolverApi, - alloy::primitives::Address, - anyhow::{Context, Result}, - reqwest::{Client, Url}, - std::collections::HashMap, -}; - -type Token = Address; -type Owner = Address; - -#[derive(Clone, Debug)] -pub struct SolverConfiguration { - pub url: Url, - pub client: Client, -} - -#[async_trait::async_trait] -impl TokenOwnerSolverApi for SolverConfiguration { - async fn get_token_owner_pairs(&self) -> Result>> { - let response = self - .client - .get(self.url.clone()) - .send() - .await? - .text() - .await?; - serde_json::from_str(&response).context(format!("bad query response: {response:?}")) - } -} diff --git a/crates/shared/src/bad_token/token_owner_finder/solvers/solver_finder.rs b/crates/shared/src/bad_token/token_owner_finder/solvers/solver_finder.rs deleted file mode 100644 index a88ed4822f..0000000000 --- a/crates/shared/src/bad_token/token_owner_finder/solvers/solver_finder.rs +++ /dev/null @@ -1,154 +0,0 @@ -use { - super::TokenOwnerSolverApi, - crate::bad_token::token_owner_finder::TokenOwnerProposing, - alloy::primitives::Address, - anyhow::Result, - prometheus::{ - IntCounterVec, - core::{AtomicU64, GenericCounter}, - }, - std::{ - collections::HashMap, - fmt::{self, Debug, Formatter}, - sync::{Arc, RwLock}, - time::Duration, - }, - tracing::Instrument, -}; - -type Token = Address; -type Owner = Address; - -#[derive(Debug)] -pub struct AutoUpdatingSolverTokenOwnerFinder { - inner: Arc, -} - -#[derive(prometheus_metric_storage::MetricStorage, Clone, Debug)] -struct Metrics { - /// Tracks how often a token owner update succeeded or failed. - #[metric(labels("identifier", "result"))] - token_owner_list_updates: IntCounterVec, -} - -struct Inner { - solver: Box, - cache: RwLock>>, - metrics: &'static Metrics, - identifier: String, -} - -impl Inner { - pub fn get_update_counter(&self, success: bool) -> GenericCounter { - let result = if success { "success" } else { "failure" }; - self.metrics - .token_owner_list_updates - .with_label_values(&[&self.identifier, result]) - } -} - -impl AutoUpdatingSolverTokenOwnerFinder { - pub fn new( - solver: Box, - update_interval: Duration, - identifier: String, - ) -> Self { - let inner = Arc::new(Inner { - solver, - cache: RwLock::new(Default::default()), - metrics: Metrics::instance(observe::metrics::get_storage_registry()).unwrap(), - identifier, - }); - - // reset metrics for consistent graphs in grafana - inner.get_update_counter(true).reset(); - inner.get_update_counter(false).reset(); - - // spawn a background task to regularly update cache - { - let inner = inner.clone(); - let updater = async move { - loop { - let result = inner.update().await; - inner.get_update_counter(result.is_ok()).inc(); - if let Err(err) = result { - tracing::warn!(?err, "failed to update token list"); - } - tokio::time::sleep(update_interval).await; - } - }; - tokio::task::spawn( - updater.instrument(tracing::info_span!("auto_updating_token_owner_finder")), - ); - } - - Self { inner } - } - - pub async fn update(&self) -> Result<()> { - self.inner.update().await - } -} - -impl Inner { - async fn update(&self) -> Result<()> { - let token_owner_pairs = self.solver.get_token_owner_pairs().await?; - - let mut cache = self.cache.write().unwrap(); - *cache = token_owner_pairs; - - Ok(()) - } -} - -impl Debug for Inner { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - f.debug_struct("Inner").field("cache", &self.cache).finish() - } -} - -#[async_trait::async_trait] -impl TokenOwnerProposing for AutoUpdatingSolverTokenOwnerFinder { - async fn find_candidate_owners(&self, token: Token) -> Result> { - Ok(self - .inner - .cache - .read() - .unwrap() - .get(&token) - .cloned() - .unwrap_or_default()) - } -} - -#[cfg(test)] -mod tests { - use { - super::*, - crate::bad_token::token_owner_finder::solvers::solver_api::SolverConfiguration, - alloy::primitives::address, - reqwest::{Client, Url}, - std::str::FromStr, - }; - - #[tokio::test] - #[ignore] - async fn seasolver_e2e_test() { - let url = std::env::var("SEASOLVER_TOKEN_HOLDERS").unwrap(); - let configuration = Box::new(SolverConfiguration { - url: Url::from_str(&url).unwrap(), - client: Client::new(), - }); - let finder = AutoUpdatingSolverTokenOwnerFinder::new( - configuration, - Duration::from_secs(1000), - "test".to_owned(), - ); - tokio::time::sleep(Duration::from_secs(10)).await; - let owners = finder - .find_candidate_owners(address!("132d8D2C76Db3812403431fAcB00F3453Fc42125")) - .await - .unwrap(); - dbg!(owners); - } -} diff --git a/crates/shared/src/bad_token/token_owner_finder/token_owner_list.rs b/crates/shared/src/bad_token/token_owner_finder/token_owner_list.rs deleted file mode 100644 index a1490661fe..0000000000 --- a/crates/shared/src/bad_token/token_owner_finder/token_owner_list.rs +++ /dev/null @@ -1,52 +0,0 @@ -use { - super::TokenOwnerProposing, - alloy::primitives::Address, - anyhow::Result, - std::collections::HashMap, -}; - -type Token = Address; -type Owner = Address; - -pub struct TokenOwnerList { - owners: HashMap>, -} - -impl TokenOwnerList { - pub fn new(owners: HashMap>) -> Self { - Self { owners } - } -} - -#[async_trait::async_trait] -impl TokenOwnerProposing for TokenOwnerList { - async fn find_candidate_owners(&self, token: Address) -> Result> { - Ok(self.owners.get(&token).cloned().unwrap_or_default()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[tokio::test] - async fn token_owner_list_constructor_empty() { - let finder = TokenOwnerList::new(Default::default()); - let candidate_owners = finder - .find_candidate_owners(Address::with_last_byte(10)) - .await; - assert!(candidate_owners.unwrap().is_empty()); - } - - #[tokio::test] - async fn token_owner_list_constructor() { - let token = Address::with_last_byte(1); - let owners = vec![Address::with_last_byte(2), Address::with_last_byte(3)]; - let finder = TokenOwnerList::new(HashMap::from([(token, owners.clone())])); - let candidate_owners = finder - .find_candidate_owners(Address::with_last_byte(1)) - .await - .unwrap(); - assert_eq!(owners, candidate_owners); - } -} diff --git a/crates/shared/src/bad_token/trace_call.rs b/crates/shared/src/bad_token/trace_call.rs index bc3cf34c41..8b529edcf4 100644 --- a/crates/shared/src/bad_token/trace_call.rs +++ b/crates/shared/src/bad_token/trace_call.rs @@ -1,5 +1,5 @@ use { - super::{BadTokenDetecting, TokenQuality, token_owner_finder::TokenOwnerFinding}, + super::TokenQuality, crate::{ethrpc::Web3, trace_many}, alloy::{ primitives::{Address, U256, keccak256}, @@ -17,79 +17,10 @@ use { anyhow::{Context, Result, bail, ensure}, contracts::alloy::ERC20, model::interaction::InteractionData, - std::{cmp, sync::Arc}, - tracing::instrument, }; const METHOD_NOT_FOUND_CODE: i64 = -32601; -/// Detects whether a token is "bad" (works in unexpected ways that are -/// problematic for solving) by simulating several transfers of a token. To find -/// an initial address to transfer from we use the amm pair providers. -/// Tokens are bad if: -/// - we cannot find an amm pool of the token to one of the base tokens -/// - transfer into the settlement contract or back out fails -/// - a transfer loses total balance -pub struct TraceCallDetector { - inner: TraceCallDetectorRaw, - finder: Arc, -} - -#[async_trait::async_trait] -impl BadTokenDetecting for TraceCallDetector { - #[instrument(skip_all)] - async fn detect(&self, token: Address) -> Result { - let quality = self.detect_impl(token).await?; - tracing::debug!(?token, ?quality, "determined token quality"); - Ok(quality) - } -} - -impl TraceCallDetector { - pub fn new(web3: Web3, settlement: Address, finder: Arc) -> Self { - Self { - inner: TraceCallDetectorRaw::new(web3, settlement), - finder, - } - } - - async fn detect_impl(&self, token: Address) -> Result { - // Arbitrary amount that is large enough that small relative fees should be - // visible. - const MIN_AMOUNT: u64 = 100_000; - let (take_from, amount) = match self - .finder - .find_owner(token, U256::from(MIN_AMOUNT)) - .await - .context("find_owner")? - { - Some((address, balance)) => { - // Don't use the full balance, but instead a portion of it. This - // makes the trace call less racy and prone to the transfer - // failing because of a balance change from one block to the - // next. This can happen because of either: - // - Block propagation - the trace_callMany is handled by a node that is 1 block - // in the past - // - New block observed - the trace_callMany is executed on a block that came in - // since we read the balance - let amount = cmp::max(balance / U256::from(2), U256::from(MIN_AMOUNT)); - - tracing::debug!(?token, ?address, ?amount, "found owner"); - (address, amount) - } - None => { - return Ok(TokenQuality::bad(format!( - "Could not find on chain source of the token with at least {MIN_AMOUNT} \ - balance.", - ))); - } - }; - self.inner - .test_transfer(take_from, token, amount, &[]) - .await - } -} - /// Detects whether a token is "bad" (works in unexpected ways that are /// problematic for solving) by simulating several transfers of a token. #[derive(Debug, Clone)] @@ -402,26 +333,8 @@ fn u256_from_be_bytes_strict(b: &[u8]) -> Option { mod tests { use { super::*, - crate::{ - bad_token::token_owner_finder::{ - TokenOwnerFinder, - blockscout::BlockscoutTokenOwnerFinder, - liquidity::{ - BalancerVaultFinder, - FeeValues, - UniswapLikePairProviderFinder, - UniswapV3Finder, - }, - solvers::{ - solver_api::SolverConfiguration, - solver_finder::AutoUpdatingSolverTokenOwnerFinder, - }, - }, - sources::{BaselineSource, uniswap_v2}, - }, alloy::{ - primitives::{Bytes, address}, - providers::Provider, + primitives::Bytes, rpc::types::trace::parity::{ Action, CallAction, @@ -430,10 +343,6 @@ mod tests { TransactionTrace, }, }, - chain::Chain, - contracts::alloy::{BalancerV2Vault, GPv2Settlement, IUniswapV3Factory}, - ethrpc::Web3, - std::{env, time::Duration}, }; #[test] @@ -542,408 +451,4 @@ mod tests { let expected = TokenQuality::Good; assert_eq!(result, expected); } - - // cargo test -p shared mainnet_tokens -- --nocapture --ignored - #[tokio::test] - #[ignore] - async fn mainnet_tokens() { - // observe::tracing::initialize("orderbook::bad_token=debug, - // shared::transport=debug", tracing::level_filters::LevelFilter::OFF); - let web3 = Web3::new_from_env(); - let version = web3.provider.get_chain_id().await.unwrap().to_string(); - - let base_tokens = &[ - testlib::tokens::WETH, - testlib::tokens::DAI, - testlib::tokens::USDC, - testlib::tokens::USDT, - testlib::tokens::COMP, - testlib::tokens::MKR, - testlib::tokens::WBTC, - ]; - - // tokens from our deny list - let bad_tokens = &[ - address!("0027449Bf0887ca3E431D263FFDeFb244D95b555"), // All balances are maxuint256 - address!("0189d31f6629c359007f72b8d5ec8fa1c126f95c"), - address!("01995786f1435743c42b7f2276c496a610b58612"), - address!("072c46f392e729c1f0d92a307c2c6dba06b5d078"), - address!("074545177a36ab81aac783211f25e14f1ed03c2b"), - address!("07be1ead7aebee544618bdc688fa3cff09857c32"), - address!("0858a26055d6584e5b47bbecf7f7e8cbc390995b"), - address!("0aacfbec6a24756c20d41914f2caba817c0d8521"), - address!("0ba45a8b5d5575935b8158a88c631e9f9c95a2e5"), - address!("0e69d0a2bbb30abcb7e5cfea0e4fde19c00a8d47"), - address!("1016f3c0a1939fa27538339da7e2a300031b6f37"), - address!("106552c11272420aad5d7e94f8acab9095a6c952"), - address!("106d3c66d22d2dd0446df23d7f5960752994d600"), - address!("1337DEF18C680aF1f9f45cBcab6309562975b1dD"), - address!("1341a2257fa7b770420ef70616f888056f90926c"), - address!("1426cc6d52d1b14e2b3b1cb04d57ea42b39c4c7c"), - address!("14dd7ebe6cb084cb73ef377e115554d47dc9d61e"), - address!("15874d65e649880c2614e7a480cb7c9a55787ff6"), - address!("1681bcb589b3cfcf0c0616b0ce9b19b240643dc1"), - address!("18bdfc80b97cb97f6b466cce967849ce9cd9d58c"), - address!("1b9baf2a3edea91ee431f02d449a1044d5726669"), - address!("2129ff6000b95a973236020bcd2b2006b0d8e019"), - address!("239dc02a28a0774738463e06245544a72745d5c5"), - address!("251457b7c5d85251ca1ab384361c821330be2520"), - address!("25a1de1c3ee658fe034b8914a1d8d34110423af8"), - address!("26a79bd709a7ef5e5f747b8d8f83326ea044d8cc"), - address!("289d5488ab09f43471914e572ec9e3651c735af2"), - address!("298d492e8c1d909d3f63bc4a36c66c64acb3d695"), - address!("2b1fe2cea92436e8c34b7c215af66aaa2932a8b2"), - address!("31acf54fae6166dc2f90c4d6f20d379965e96bc1"), - address!("32c868f6318d6334b2250f323d914bc2239e4eee"), - address!("33f128394af03db639107473e52d84ff1290499e"), - address!("37611b28aca5673744161dc337128cfdd2657f69"), - address!("389999216860ab8e0175387a0c90e5c52522c945"), - address!("39b8523fa094b0dc045e2c3e5dff34b3f2ca6220"), - address!("3a6fe4c752eb8d571a660a776be4003d619c30a3"), - address!("3a9fff453d50d4ac52a6890647b823379ba36b9e"), - address!("3ea50b7ef6a7eaf7e966e2cb72b519c16557497c"), - address!("3fca773d13f831753ec3ae9f39ad4a6814ebb695"), - address!("41933422dc4a1cb8c822e06f12f7b52fa5e7e094"), - address!("45734927fa2f616fbe19e65f42a0ef3d37d1c80a"), - address!("45804880de22913dafe09f4980848ece6ecbaf78"), - address!("48be867b240d2ffaff69e0746130f2c027d8d3d2"), - address!("4a6be56a211a4c4e0dd4474d524138933c17f3e3"), - address!("4b86e0295e7d32433ffa6411b82b4f4e56a581e1"), - address!("4ba6ddd7b89ed838fed25d208d4f644106e34279"), - address!("4bae380b5d762d543d426331b8437926443ae9ec"), - address!("4bcddfcfa8cb923952bcf16644b36e5da5ca3184"), - address!("4c9d5672ae33522240532206ab45508116daf263"), - address!("4F9254C83EB525f9FCf346490bbb3ed28a81C667"), - address!("4fab740779c73aa3945a5cf6025bf1b0e7f6349c"), - address!("51d3e4c0b2c83e62f5d517d250b3e856897d2052"), - address!("53ba22cb4e5e9c1be0d73913764f572192a71aca"), - address!("56de8bc61346321d4f2211e3ac3c0a7f00db9b76"), - address!("576097fa17e1f702bb9167f0f08f2ea0898a3ea5"), - address!("577e7f9fa80ab33e87a01b701114257c8d9455a8"), - address!("586c680e9a6d21b81ebecf46d78844dab7b3bcf9"), - address!("5d0fa08aeb173ade44b0cf7f31d506d8e04f0ac8"), - address!("62359ed7505efc61ff1d56fef82158ccaffa23d7"), - address!("63d0eea1d7c0d1e89d7e665708d7e8997c0a9ed6"), - address!("66d31def9c47b62184d7f57175eed5b5d9b7f038"), - address!("671ab077497575dcafb68327d2d2329207323e74"), - address!("685aea4f02e39e5a5bb7f7117e88db1151f38364"), - address!("68e0a48d3bff6633a31d1d100b70f93c3859218b"), - address!("69692d3345010a207b759a7d1af6fc7f38b35c5e"), - address!("6a00b86e30167f73e38be086081b80213e8266aa"), - address!("6b8e77d3db1faa17f7b24c24242b6a1eb5008a16"), - address!("6e10aacb89a28d6fa0fe68790777fec7e7f01890"), - address!("6fcb6408499a7c0f242e32d77eb51ffa1dd28a7e"), - address!("714599f7604144a3fe1737c440a70fc0fd6503ea"), - address!("75fef397d74a2d11b64e6915cd847c1e7f8e5520"), - address!("76851a93977bea9264c32255b6457882035c7501"), - address!("79ba92dda26fce15e1e9af47d5cfdfd2a093e000"), - address!("7f0f118d083d5175ab9d2d34c4c8fa4f43c3f47b"), - address!("7ff4169a6b5122b664c51c95727d87750ec07c84"), - address!("801ea8c463a776e85344c565e355137b5c3324cd"), - address!("88ef27e69108b2633f8e1c184cc37940a075cc02"), - address!("8c7424c3000942e5a93de4a01ce2ec86c06333cb"), - address!("8eb24319393716668d768dcec29356ae9cffe285"), - address!("910524678c0b1b23ffb9285a81f99c29c11cbaed"), - address!("910985ffa7101bf5801dd2e91555c465efd9aab3"), - address!("925f2c11b99c1a4c46606898ee91ed3d450cfeda"), - address!("944eee930933be5e23b690c8589021ec8619a301"), - address!("94987bc8aa5f36cb2461c190134929a29c3df726"), - address!("97ad070879be5c31a03a1fe7e35dfb7d51d0eef1"), - address!("97b65710d03e12775189f0d113202cc1443b0aa2"), - address!("98ecf3d8e21adaafe16c00cc3ff681e72690278b"), - address!("99043bb680ab9262c7b2ac524e00b215efb7db9b"), - address!("99ddddd8dfe33905338a073047cfad72e6833c06"), - address!("9a514389172863f12854ad40090aa4b928028542"), - address!("9af15d7b8776fa296019979e70a5be53c714a7ec"), - address!("9ea3b5b4ec044b70375236a281986106457b20ef"), - address!("9f41da75ab2b8c6f0dcef7173c4bf66bd4f6b36a"), - address!("a03f1250aa448226ed4066d8d1722ddd8b51df59"), - address!("a2b4c0af19cc16a6cfacce81f192b024d625817d"), - address!("a3e059c0b01f07f211c85bf7b4f1d907afb011df"), - address!("a5959e9412d27041194c3c3bcbe855face2864f7"), - address!("a9a8377287ea9c6b8b4249dd502e75d34148fc5b"), - address!("adaa92cba08434c22d036c4115a6b3d7e2b5569b"), - address!("aee53701e18d5ff6af4964c3a381e7d09b9b9075"), - address!("b893a8049f250b57efa8c62d51527a22404d7c9a"), - address!("B96f0e9bb32760091eb2D6B0A5Ca0D2C7b5644B1"), - address!("ba7435a4b4c747e0101780073eeda872a69bdcd4"), - address!("bae5f2d8a1299e5c4963eaff3312399253f27ccb"), - address!("bd36b14c63f483b286c7b49b6eaffb2fe10aabc4"), - address!("bdea5bb640dbfc4593809deec5cdb8f99b704cd2"), - address!("bf04e48c5d8880306591ef888cde201d3984eb3e"), - address!("bf25ea982b4f850dafb4a95367b890eee5a9e8f2"), - address!("bf494f02ee3fde1f20bee6242bce2d1ed0c15e47"), - address!("c03841b5135600312707d39eb2af0d2ad5d51a91"), - address!("c10bbb8fd399d580b740ed31ff5ac94aa78ba9ed"), - address!("c12d1c73ee7dc3615ba4e37e4abfdbddfa38907e"), - address!("c40af1e4fecfa05ce6bab79dcd8b373d2e436c4e"), - address!("c4d586ef7be9ebe80bd5ee4fbd228fe2db5f2c4e"), - address!("c50ef449171a51fbeafd7c562b064b6471c36caa"), - address!("c626d951eff8e421448074bd2ad7805c6d585793"), - address!("c73c167e7a4ba109e4052f70d5466d0c312a344d"), - address!("c7c24fe893c21e8a4ef46eaf31badcab9f362841"), - address!("cd7492db29e2ab436e819b249452ee1bbdf52214"), - address!("cf0c122c6b73ff809c693db761e7baebe62b6a2e"), - address!("cf2f589bea4645c3ef47f1f33bebf100bee66e05"), - address!("cf8c23cf17bb5815d5705a15486fa83805415625"), - address!("d0834d08c83dbe216811aaea0eeffb2349e57634"), - address!("d0d3ebcad6a20ce69bc3bc0e1ec964075425e533"), - address!("d1afbccc9a2c2187ea544363b986ea0ab6ef08b5"), - address!("d375a513692336cf9eebce5e38869b447948016f"), - address!("d3f6571be1d91ac68b40daaa24075ca7e2f0f72e"), - address!("d50825f50384bc40d5a10118996ef503b3670afd"), - address!("d5281bb2d1ee94866b03a0fccdd4e900c8cb5091"), - address!("da1e53e088023fe4d1dc5a418581748f52cbd1b8"), - address!("dd339f370bbb18b8f389bd0443329d82ecf4b593"), - // Should be denied because can't approve more than balance - address!("decade1c6bf2cd9fb89afad73e4a519c867adcf5"), - address!("dfdd3459d4f87234751696840092ee20c970fb07"), - address!("e0bdaafd0aab238c55d68ad54e616305d4a21772"), - address!("e2d66561b39eadbd488868af8493fb55d4b9d084"), - address!("e302bf71b1f6f3024e7642f9c824ac86b58436a0"), - address!("ea319e87cf06203dae107dd8e5672175e3ee976c"), - address!("ed5e5ab076ae60bdb9c49ac255553e65426a2167"), - address!("eeee2a622330e6d2036691e983dee87330588603"), - address!("ef5b32486ed432b804a51d129f4d2fbdf18057ec"), - address!("f1365ab39e192808b5301bcf6da973830e9e817f"), - address!("f198B4a2631B7D0B9FAc36f8B546Ed3DCe472A47"), - address!("fad45e47083e4607302aa43c65fb3106f1cd7607"), - address!("fcaa8eef70f373e00ac29208023d106c846259ee"), - address!("ff69e48af1174da7f15d0c771861c33d3f19ed8a"), - ]; - - // Of the deny listed tokens the following are detected as good: - // - token 0xc12d1c73ee7dc3615ba4e37e4abfdbddfa38907e Has some kind of - // "freezing" mechanism where some balance is unusuable. We don't seem to - // trigger it. - // - 0x910524678c0b1b23ffb9285a81f99c29c11cbaed Has some kind of time lock that - // we don't encounter. - // - 0xed5e5ab076ae60bdb9c49ac255553e65426a2167 Not sure why deny listed. - // - 0x1337def18c680af1f9f45cbcab6309562975b1dd Not sure why deny listed, maybe - // the callback that I didn't follow in the SC code. - // - 0x4f9254c83eb525f9fcf346490bbb3ed28a81c667 Not sure why deny listed. - - let settlement = GPv2Settlement::Instance::deployed(&web3.provider) - .await - .unwrap(); - let finder = Arc::new(TokenOwnerFinder { - web3: web3.clone(), - settlement_contract: *settlement.address(), - proposers: vec![ - Arc::new(UniswapLikePairProviderFinder { - inner: uniswap_v2::UniV2BaselineSourceParameters::from_baseline_source( - BaselineSource::UniswapV2, - &version, - ) - .unwrap() - .into_source(&web3) - .await - .unwrap() - .pair_provider, - base_tokens: base_tokens.to_vec(), - }), - Arc::new(UniswapLikePairProviderFinder { - inner: uniswap_v2::UniV2BaselineSourceParameters::from_baseline_source( - BaselineSource::SushiSwap, - &version, - ) - .unwrap() - .into_source(&web3) - .await - .unwrap() - .pair_provider, - base_tokens: base_tokens.to_vec(), - }), - Arc::new(BalancerVaultFinder( - BalancerV2Vault::Instance::deployed(&web3.provider) - .await - .unwrap(), - )), - Arc::new( - UniswapV3Finder::new( - IUniswapV3Factory::Instance::deployed(&web3.provider) - .await - .unwrap(), - base_tokens.to_vec(), - FeeValues::Static, - ) - .await - .unwrap(), - ), - Arc::new( - BlockscoutTokenOwnerFinder::with_network( - reqwest::Client::new(), - &Chain::Mainnet, - ) - .unwrap(), - ), - ], - }); - let token_cache = TraceCallDetector::new(web3, *settlement.address(), finder); - - println!("testing good tokens"); - for &token in base_tokens { - let result = token_cache.detect(token).await; - println!("token {token:?} is {result:?}"); - } - - println!("testing bad tokens"); - for &token in bad_tokens { - let result = token_cache.detect(token).await; - println!("token {token:?} is {result:?}"); - } - } - - #[tokio::test] - #[ignore] - async fn mainnet_univ3() { - observe::tracing::initialize(&observe::Config::default().with_env_filter("shared=debug")); - let web3 = Web3::new_from_env(); - let base_tokens = vec![testlib::tokens::WETH]; - let settlement = GPv2Settlement::Instance::deployed(&web3.provider) - .await - .unwrap(); - let factory = IUniswapV3Factory::Instance::deployed(&web3.provider) - .await - .unwrap(); - let univ3 = Arc::new( - UniswapV3Finder::new(factory, base_tokens, FeeValues::Dynamic) - .await - .unwrap(), - ); - let finder = Arc::new(TokenOwnerFinder { - web3: web3.clone(), - settlement_contract: *settlement.address(), - proposers: vec![univ3], - }); - let token_cache = TraceCallDetector::new(web3, *settlement.address(), finder); - - let result = token_cache.detect(testlib::tokens::USDC).await; - dbg!(&result); - assert!(result.unwrap().is_good()); - - let only_v3_token = address!("f1b99e3e573a1a9c5e6b2ce818b617f0e664e86b"); - let result = token_cache.detect(only_v3_token).await; - dbg!(&result); - assert!(result.unwrap().is_good()); - } - - #[tokio::test] - #[ignore] - async fn yearn_vault_tokens() { - let tokens = [ - address!("1025b1641d1F23C289412Dd5E5701e9810103a93"), - address!("132d8D2C76Db3812403431fAcB00F3453Fc42125"), - address!("1635b506a88fBF428465Ad65d00e8d6B6E5846C3"), - address!("16825039dfe2a5b01F3E1E6a2BBF9a576c6F95c4"), - address!("1b905331F7dE2748F4D6a0678e1521E20347643F"), - address!("23D3D0f1c697247d5e0a9efB37d8b0ED0C464f7f"), - address!("25212Df29073FfFA7A67399AcEfC2dd75a831A1A"), - address!("27B5739e22ad9033bcBf192059122d163b60349D"), - address!("2D5D4869381C4Fce34789BC1D38aCCe747E295AE"), - address!("2DfB14E32e2F8156ec15a2c21c3A6c053af52Be8"), - address!("2a38B9B0201Ca39B17B460eD2f11e4929559071E"), - address!("2e5c7e9B1Da0D9Cb2832eBb06241d18552A85400"), - address!("30FCf7c6cDfC46eC237783D94Fc78553E79d4E9C"), - address!("341bb10D8f5947f3066502DC8125d9b8949FD3D6"), - address!("378cb52b00F9D0921cb46dFc099CFf73b42419dC"), - address!("39CAF13a104FF567f71fd2A4c68C026FDB6E740B"), - address!("3B27F92C0e212C671EA351827EDF93DB27cc0c65"), - address!("3B96d491f067912D18563d56858Ba7d6EC67a6fa"), - address!("3c5DF3077BcF800640B5DAE8c91106575a4826E6"), - address!("4560b99C904aAD03027B5178CCa81584744AC01f"), - address!("490bD0886F221A5F79713D3E84404355A9293C50"), - address!("4B5BfD52124784745c1071dcB244C6688d2533d3"), - address!("528D50dC9a333f01544177a924893FA1F5b9F748"), - address!("59518884EeBFb03e90a18ADBAAAB770d4666471e"), - address!("595a68a8c9D5C230001848B69b1947ee2A607164"), - address!("5AB64C599FcC59f0f2726A300b03166A395578Da"), - address!("5a770DbD3Ee6bAF2802D29a901Ef11501C44797A"), - address!("5c0A86A32c129538D62C106Eb8115a8b02358d57"), - address!("5e69e8b51B71C8596817fD442849BD44219bb095"), - address!("5fA5B62c8AF877CB37031e0a3B2f34A78e3C56A6"), - address!("625b7DF2fa8aBe21B0A976736CDa4775523aeD1E"), - address!("671a912C10bba0CFA74Cfc2d6Fba9BA1ed9530B2"), - address!("67e019bfbd5a67207755D04467D6A70c0B75bF60"), - address!("6A5468752f8DB94134B6508dAbAC54D3b45efCE6"), - address!("6B5ce31AF687a671a804d8070Ddda99Cab926dfE"), - address!("6Ede7F19df5df6EF23bD5B9CeDb651580Bdf56Ca"), - address!("6d765CbE5bC922694afE112C140b8878b9FB0390"), - address!("7047F90229a057C13BF847C0744D646CFb6c9E1A"), - address!("718AbE90777F5B778B52D553a5aBaa148DD0dc5D"), - address!("790a60024bC3aea28385b60480f15a0771f26D09"), - address!("801Ab06154Bf539dea4385a39f5fa8534fB53073"), - address!("8414Db07a7F743dEbaFb402070AB01a4E0d2E45e"), - address!("84E13785B5a27879921D6F685f041421C7F482dA"), - address!("873fB544277FD7b977B196a826459a69E27eA4ea"), - address!("8b9C0c24307344B6D7941ab654b2Aeee25347473"), - address!("8cc94ccd0f3841a468184aCA3Cc478D2148E1757"), - address!("8ee57c05741aA9DB947A744E713C15d4d19D8822"), - address!("8fA3A9ecd9EFb07A8CE90A6eb014CF3c0E3B32Ef"), - address!("9A39f31DD5EDF5919A5C0c2433cE053fAD2E0336"), - address!("9d409a0A012CFbA9B15F6D4B36Ac57A46966Ab9a"), - address!("A696a63cc78DfFa1a63E9E50587C197387FF6C7E"), - address!("A74d4B67b3368E83797a35382AFB776bAAE4F5C8"), - address!("A9412Ffd7E0866755ae0dda3318470A61F62abe8"), - address!("B4AdA607B9d6b2c9Ee07A275e9616B84AC560139"), - address!("BCBB5b54Fa51e7b7Dc920340043B203447842A6b"), - address!("Bfedbcbe27171C418CDabC2477042554b1904857"), - address!("C4dAf3b5e2A9e93861c3FBDd25f1e943B8D87417"), - address!("D6Ea40597Be05c201845c0bFd2e96A60bACde267"), - address!("E537B5cc158EB71037D4125BDD7538421981E6AA"), - address!("E5eDcE53e39Cbc6d819E2C340BCF295e0084ff7c"), - address!("F29AE508698bDeF169B89834F76704C3B205aedf"), - address!("F59D66c1d593Fb10e2f8c2a6fD2C958792434B9c"), - address!("F6B9DFE6bc42ed2eaB44D6B829017f7B78B29f88"), - address!("FBEB78a723b8087fD2ea7Ef1afEc93d35E8Bed42"), - address!("FD0877d9095789cAF24c98F7CCe092fa8E120775"), - address!("a258C4606Ca8206D8aA700cE2143D7db854D168c"), - address!("a354F35829Ae975e850e23e9615b11Da1B3dC4DE"), - address!("b09F2a67a731466182518fae980feAe96479d80b"), - address!("b4D1Be44BfF40ad6e506edf43156577a3f8672eC"), - address!("c5F3D11580c41cD07104e9AF154Fc6428bb93c73"), - address!("c97232527B62eFb0D8ed38CF3EA103A6CcA4037e"), - address!("c97511a1dDB162C8742D39FF320CfDCd13fBcf7e"), - address!("d88dBBA3f9c4391Ee46f5FF548f289054db6E51C"), - address!("d8C620991b8E626C099eAaB29B1E3eEa279763bb"), - address!("d9788f3931Ede4D5018184E198699dC6d66C1915"), - address!("dA816459F1AB5631232FE5e97a05BBBb94970c95"), - address!("db25cA703181E7484a155DD612b06f57E12Be5F0"), - address!("e9Dc63083c464d6EDcCFf23444fF3CFc6886f6FB"), - address!("f2db9a7c0ACd427A680D640F02d90f6186E71725"), - address!("f8768814b88281DE4F532a3beEfA5b85B69b9324"), - ]; - - let solver_token_finder = Arc::new(AutoUpdatingSolverTokenOwnerFinder::new( - Box::new(SolverConfiguration { - url: env::var("SOLVER_TOKEN_OWNERS_URLS") - .unwrap() - .parse() - .unwrap(), - client: reqwest::Client::new(), - }), - Duration::MAX, - "test".to_owned(), - )); - - // Force the cache to update at least once. - solver_token_finder.update().await.unwrap(); - - let web3 = Web3::new_from_env(); - - let settlement = GPv2Settlement::Instance::deployed(&web3.provider) - .await - .unwrap(); - let finder = Arc::new(TokenOwnerFinder { - web3: web3.clone(), - proposers: vec![solver_token_finder], - settlement_contract: *settlement.address(), - }); - let token_cache = TraceCallDetector::new(web3, *settlement.address(), finder); - - for token in tokens { - let result = token_cache.detect(token).await; - println!("token {token:?} is {result:?}"); - } - } } From 32b0408f0c2840a23dcc5d9f44dfd522e2afe0ce Mon Sep 17 00:00:00 2001 From: ilya Date: Fri, 13 Feb 2026 17:52:33 +0000 Subject: [PATCH 064/219] Bound native price cache with moka (#4154) # Description The native price cache is backed by an unbounded `Mutex>`. Entries are inserted on every price estimate but never removed. Once at steady state with many unique tokens, the cache grows without bound. The size limit is currently hardcoded at 20k, since on mainnet we have around that number of unique tokens: ```sql SELECT COUNT(*) AS unique_token_count FROM (SELECT sell_token AS token FROM orders UNION SELECT buy_token AS token FROM orders) t; Returns -> 20109 ``` Per cache entry breakdown: - CachedResult value: ~32 bytes - Address key: 20 bytes - Moka internal overhead: ~100-120 bytes - Total per entry: ~170 bytes At 20,000 entries: ~3.4 MB upper bound # Changes - Replace the `Mutex>` cache with `moka::sync::Cache`. - Adapt get_cached_price(): - No more MutexGuard parameter. Use data.get(&token) which returns Option (moka clones the value). Check staleness + is_ready() on the returned value. No `requested_at` to update, since moka tracks access internally. - Remove the `requested_at` field from the `CachedResult` since it is not needed anymore and turned out to be dead code. - Adapt the `insert()` function to use `entry_by_ref().and_compute_with()`, which gives gives atomic read-modify-write for `accumulative_errors_count`. ## How to test Existing tests. --- Cargo.lock | 1 + crates/shared/Cargo.toml | 1 + .../price_estimation/native_price_cache.rs | 78 +++++++++---------- 3 files changed, 41 insertions(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 422f11eeb6..c40ca9c023 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6154,6 +6154,7 @@ dependencies = [ "maplit", "mockall", "model", + "moka", "num", "number", "observe", diff --git a/crates/shared/Cargo.toml b/crates/shared/Cargo.toml index 752be21c00..b156517aec 100644 --- a/crates/shared/Cargo.toml +++ b/crates/shared/Cargo.toml @@ -53,6 +53,7 @@ tracing = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter", "fmt", "time"] } url = { workspace = true } +moka = { workspace = true, features = ["sync"] } mockall = { workspace = true, optional = true } [dev-dependencies] diff --git a/crates/shared/src/price_estimation/native_price_cache.rs b/crates/shared/src/price_estimation/native_price_cache.rs index 201d3dc3a5..87b98e1642 100644 --- a/crates/shared/src/price_estimation/native_price_cache.rs +++ b/crates/shared/src/price_estimation/native_price_cache.rs @@ -13,7 +13,7 @@ use { rand::Rng, std::{ collections::{HashMap, HashSet}, - sync::{Arc, Mutex, MutexGuard}, + sync::Arc, time::{Duration, Instant}, }, tokio::time, @@ -94,7 +94,6 @@ type CacheEntry = Result; struct CachedResult { result: CacheEntry, updated_at: Instant, - requested_at: Instant, accumulative_errors_count: u32, } @@ -112,14 +111,12 @@ impl CachedResult { Self { result, updated_at: now, - requested_at: now, accumulative_errors_count: u32::from(is_accumulating_error), } } fn update(&mut self, result: CacheEntry) { let now = Instant::now(); - self.requested_at = now; self.updated_at = now; self.accumulative_errors_count = match result { Err(PriceEstimationError::EstimatorInternal(_)) => self.accumulative_errors_count + 1, @@ -159,8 +156,10 @@ fn should_cache(result: &Result) -> bool { #[derive(Clone)] pub struct Cache(Arc); +const MAX_CACHE_SIZE: u64 = 20_000; + struct CacheInner { - data: Mutex>, + data: moka::sync::Cache, max_age: Duration, } @@ -169,26 +168,25 @@ impl Cache { let mut rng = rand::thread_rng(); let now = std::time::Instant::now(); - let data = initial_prices - .into_iter() - .filter_map(|(token, price)| { + let data = moka::sync::Cache::builder() + .max_capacity(MAX_CACHE_SIZE) + .build(); + + for (token, price) in initial_prices { + if let Some(price) = from_normalized_price(price) { let updated_at = Self::random_updated_at(max_age, now, &mut rng); - Some(( + data.insert( token, CachedResult { - result: Ok(from_normalized_price(price)?), + result: Ok(price), updated_at, - requested_at: now, accumulative_errors_count: 0, }, - )) - }) - .collect::>(); - - Self(Arc::new(CacheInner { - data: Mutex::new(data), - max_age, - })) + ); + } + } + + Self(Arc::new(CacheInner { data, max_age })) } fn max_age(&self) -> Duration { @@ -205,19 +203,19 @@ impl Cache { } fn len(&self) -> usize { - self.0.data.lock().unwrap().len() + // Should never fire since we are bounded with MAX_CACHE_SIZE + usize::try_from(self.0.data.entry_count()).expect("cache size should fit in a usize") } fn get_cached_price( token: Address, now: Instant, - cache: &mut MutexGuard>, + cache: &moka::sync::Cache, max_age: &Duration, ) -> Option { - let entry = cache.get_mut(&token)?; - entry.requested_at = now; + let entry = cache.get(&token)?; let is_recent = now.saturating_duration_since(entry.updated_at) < *max_age; - (is_recent && entry.is_ready()).then_some(entry.clone()) + (is_recent && entry.is_ready()).then_some(entry) } /// Only returns prices that are currently cached. @@ -226,10 +224,9 @@ impl Cache { tokens: &[Address], ) -> HashMap> { let now = Instant::now(); - let mut cache = self.0.data.lock().unwrap(); let mut results = HashMap::default(); for token in tokens { - let cached = Self::get_cached_price(*token, now, &mut cache, &self.0.max_age); + let cached = Self::get_cached_price(*token, now, &self.0.data, &self.0.max_age); let label = if cached.is_some() { "hits" } else { "misses" }; CacheMetrics::get() .native_price_cache_access @@ -243,11 +240,17 @@ impl Cache { } fn insert(&self, token: Address, result: CacheEntry) { - let mut cache = self.0.data.lock().unwrap(); - cache - .entry(token) - .and_modify(|value| value.update(result.clone())) - .or_insert_with(|| CachedResult::new(result)); + self.0 + .data + .entry_by_ref(&token) + .and_upsert_with(|maybe_entry| match maybe_entry { + Some(entry) => { + let mut cached = entry.into_value(); + cached.update(result); + cached + } + None => CachedResult::new(result), + }); } } @@ -311,11 +314,10 @@ impl CachingNativePriceEstimator { let estimates = tokens.into_iter().map(move |token| async move { // check if the price is cached by now let now = Instant::now(); + if let Some(cached) = + Cache::get_cached_price(token, now, &self.0.cache.0.data, &max_age) { - let mut cache = self.0.cache.0.data.lock().unwrap(); - if let Some(cached) = Cache::get_cached_price(token, now, &mut cache, &max_age) { - return (token, cached.result); - } + return (token, cached.result); } let approximation = self @@ -390,8 +392,7 @@ impl NativePriceEstimating for CachingNativePriceEstimator { async move { let cached = { let now = Instant::now(); - let mut cache = self.0.cache.0.data.lock().unwrap(); - Cache::get_cached_price(token, now, &mut cache, &self.0.cache.0.max_age) + Cache::get_cached_price(token, now, &self.0.cache.0.data, &self.0.cache.0.max_age) }; let label = if cached.is_some() { "hits" } else { "misses" }; @@ -565,8 +566,7 @@ mod tests { { // Check that `updated_at` timestamps are initialized with // reasonable values. - let data = estimator.cache().0.data.lock().unwrap(); - for value in data.values() { + for (_, value) in &estimator.cache().0.data { let elapsed = value.updated_at.elapsed(); assert!(elapsed >= min_age && elapsed <= max_age); } From 5b7d56e03c690782f794cb802f324ea2688027cb Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Fri, 13 Feb 2026 23:41:04 +0100 Subject: [PATCH 065/219] Simplify deny listed tokens (#4157) # Description Originally the orderbook and autopilot tried to detect tokens with unsupported behavior with sophisticated simulations. Since then we: * onboarded more solvers that could handle tokens that were previously causing issues * introduced quote simulation that can detect such tokens more reliably * moved towards minimal intervention for problematic tokens in the protocol itself Today the only thing we actually make use of is a list of tokens that are actually deny listed. Those should probably be revisited as well but for now I decided to only simplify the code we have today while keeping feature parity. # Changes - turned `ListBasedDetector` into a simple wrapper around a HashSet - given that there is no more complicated or fallible logic in the detection mechanism I decided to drop the instrumentation as well This simplification should also drastically reduce the ~30ms we currently spend on filtering out orders that trade deny-listed tokens. Screenshot 2026-02-13 at 16 57 47 ## How to test existing e2e tests --- crates/autopilot/src/arguments.rs | 8 - crates/autopilot/src/run.rs | 29 +--- crates/autopilot/src/solvable_orders.rs | 73 +++------ crates/orderbook/src/arguments.rs | 8 - crates/orderbook/src/run.rs | 31 +--- crates/shared/src/arguments.rs | 7 - crates/shared/src/bad_token/instrumented.rs | 73 --------- crates/shared/src/bad_token/list_based.rs | 148 ++---------------- crates/shared/src/bad_token/mod.rs | 10 -- crates/shared/src/order_validation.rs | 118 +++----------- crates/shared/src/price_estimation/factory.rs | 10 +- .../shared/src/price_estimation/sanitized.rs | 44 ++---- 12 files changed, 86 insertions(+), 473 deletions(-) delete mode 100644 crates/shared/src/bad_token/instrumented.rs diff --git a/crates/autopilot/src/arguments.rs b/crates/autopilot/src/arguments.rs index 689be1705b..1b8c592e7e 100644 --- a/crates/autopilot/src/arguments.rs +++ b/crates/autopilot/src/arguments.rs @@ -70,12 +70,6 @@ pub struct Arguments { #[clap(long, env, action = clap::ArgAction::Set, default_value = "false")] pub skip_event_sync: bool, - /// List of token addresses that should be allowed regardless of whether the - /// bad token detector thinks they are bad. Base tokens are - /// automatically allowed. - #[clap(long, env, use_value_delimiter = true)] - pub allowed_tokens: Vec
, - /// List of token addresses to be ignored throughout service #[clap(long, env, use_value_delimiter = true)] pub unsupported_tokens: Vec
, @@ -291,7 +285,6 @@ impl std::fmt::Display for Arguments { metrics_address, api_address, skip_event_sync, - allowed_tokens, unsupported_tokens, native_price_estimators, api_native_price_estimators, @@ -338,7 +331,6 @@ impl std::fmt::Display for Arguments { writeln!(f, "api_address: {api_address}")?; display_secret_option(f, "db_write_url", Some(&db_write_url))?; writeln!(f, "skip_event_sync: {skip_event_sync}")?; - writeln!(f, "allowed_tokens: {allowed_tokens:?}")?; writeln!(f, "unsupported_tokens: {unsupported_tokens:?}")?; writeln!(f, "native_price_estimators: {native_price_estimators}")?; display_option( diff --git a/crates/autopilot/src/run.rs b/crates/autopilot/src/run.rs index 1bc6d4f40f..9c973aab7d 100644 --- a/crates/autopilot/src/run.rs +++ b/crates/autopilot/src/run.rs @@ -35,11 +35,7 @@ use { shared::{ account_balances::{self, BalanceSimulator}, arguments::tracing_config, - bad_token::{ - instrumented::InstrumentedBadTokenDetectorExt, - list_based::{ListBasedDetector, UnknownTokenStrategy}, - }, - baseline_solver::BaseTokens, + bad_token::list_based::DenyListedTokens, code_fetching::CachedCodeFetcher, http_client::HttpClientFactory, order_quoting::{self, OrderQuoter}, @@ -262,23 +258,7 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { .expect("failed to create gas price estimator"), ); - let base_tokens = Arc::new(BaseTokens::new( - *eth.contracts().weth().address(), - &args.shared.base_tokens, - )); - let mut allowed_tokens = args.allowed_tokens.clone(); - allowed_tokens.extend(base_tokens.tokens().iter()); - allowed_tokens.push(model::order::BUY_ETH_ADDRESS); - let unsupported_tokens = args.unsupported_tokens.clone(); - - let bad_token_detector = Arc::new( - ListBasedDetector::new( - allowed_tokens, - unsupported_tokens, - UnknownTokenStrategy::Allow, - ) - .instrumented(), - ); + let deny_listed_tokens = DenyListedTokens::new(args.unsupported_tokens.clone()); let token_info_fetcher = Arc::new(CachedTokenInfoFetcher::new(Arc::new(TokenInfoFetcher { web3: web3.clone(), @@ -303,12 +283,11 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { .call() .await .expect("failed to query solver authenticator address"), - base_tokens: base_tokens.clone(), block_stream: eth.current_block().clone(), }, factory::Components { http_factory: http_factory.clone(), - bad_token_detector: bad_token_detector.clone(), + deny_listed_tokens: deny_listed_tokens.clone(), tokens: token_info_fetcher.clone(), code_fetcher: code_fetcher.clone(), }, @@ -459,7 +438,7 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { args.banned_users_max_cache_size.get().to_u64().unwrap(), ), balance_fetcher.clone(), - bad_token_detector.clone(), + deny_listed_tokens.clone(), competition_native_price_updater.clone(), *eth.contracts().weth().address(), domain::ProtocolFees::new( diff --git a/crates/autopilot/src/solvable_orders.rs b/crates/autopilot/src/solvable_orders.rs index 2d427cd15f..92cc5605c2 100644 --- a/crates/autopilot/src/solvable_orders.rs +++ b/crates/autopilot/src/solvable_orders.rs @@ -7,7 +7,7 @@ use { alloy::primitives::{Address, U256}, anyhow::{Context, Result}, database::order_events::OrderEventLabel, - futures::{FutureExt, future::join_all}, + futures::FutureExt, itertools::Itertools, model::{ order::{Order, OrderClass, OrderUid}, @@ -17,7 +17,7 @@ use { prometheus::{Histogram, HistogramVec, IntCounter, IntCounterVec, IntGauge, IntGaugeVec}, shared::{ account_balances::{BalanceFetching, Query}, - bad_token::BadTokenDetecting, + bad_token::list_based::DenyListedTokens, price_estimation::{ native::{NativePriceEstimating, to_normalized_price}, native_price_cache::NativePriceUpdater, @@ -124,7 +124,7 @@ pub struct SolvableOrdersCache { persistence: infra::Persistence, banned_users: banned::Users, balance_fetcher: Arc, - bad_token_detector: Arc, + deny_listed_tokens: DenyListedTokens, cache: Mutex>, native_price_estimator: Arc, weth: Address, @@ -150,7 +150,7 @@ impl SolvableOrdersCache { persistence: infra::Persistence, banned_users: banned::Users, balance_fetcher: Arc, - bad_token_detector: Arc, + deny_listed_tokens: DenyListedTokens, native_price_estimator: Arc, weth: Address, protocol_fees: domain::ProtocolFees, @@ -164,7 +164,7 @@ impl SolvableOrdersCache { persistence, banned_users, balance_fetcher, - bad_token_detector, + deny_listed_tokens, cache: Mutex::new(None), native_price_estimator, weth, @@ -432,16 +432,13 @@ impl SolvableOrdersCache { ) -> Vec> { let presignature_pending_orders = find_presignature_pending_orders(&orders); - let (banned_user_orders, unsupported_token_orders) = tokio::join!( - self.timed_future( + let unsupported_token_orders = find_unsupported_tokens(&orders, &self.deny_listed_tokens); + let banned_user_orders = self + .timed_future( "banned_user_filtering", - find_banned_user_orders(&orders, &self.banned_users) - ), - self.timed_future( - "unsupported_token_filtering", - find_unsupported_tokens(&orders, self.bad_token_detector.clone()) - ), - ); + find_banned_user_orders(&orders, &self.banned_users), + ) + .await; tracing::trace!("filtered invalid orders"); Metrics::track_filtered_orders("banned_user", &banned_user_orders); @@ -670,46 +667,16 @@ async fn get_orders_with_native_prices( (orders, removed_orders, prices) } -async fn find_unsupported_tokens( +fn find_unsupported_tokens( orders: &[Arc], - bad_token: Arc, + deny_listed_tokens: &DenyListedTokens, ) -> Vec { - let bad_tokens = join_all( - orders - .iter() - .flat_map(|o| o.data.token_pair().into_iter().flatten()) - .unique() - .map(|token| { - let bad_token = bad_token.clone(); - async move { - match bad_token.detect(token).await { - Ok(quality) => (!quality.is_good()).then_some(token), - Err(err) => { - tracing::warn!( - ?token, - ?err, - "unable to determine token quality, assume good" - ); - Some(token) - } - } - } - }), - ) - .await - .into_iter() - .flatten() - .collect::>(); - orders .iter() .filter_map(|order| { - order - .data - .token_pair() - .into_iter() - .flatten() - .any(|token| bad_tokens.contains(&token)) + [&order.data.buy_token, &order.data.sell_token] + .iter() + .any(|token| deny_listed_tokens.contains(token)) .then_some(order.metadata.uid) }) .collect() @@ -724,7 +691,7 @@ mod tests { maplit::{btreemap, hashset}, model::order::{OrderBuilder, OrderData, OrderMetadata, OrderUid}, shared::{ - bad_token::list_based::ListBasedDetector, + bad_token::list_based::DenyListedTokens, price_estimation::{ HEALTHY_PRICE_ESTIMATION_TIME, PriceEstimationError, @@ -1095,7 +1062,7 @@ mod tests { let token0 = Address::with_last_byte(0); let token1 = Address::with_last_byte(1); let token2 = Address::with_last_byte(2); - let bad_token = Arc::new(ListBasedDetector::deny_list(vec![token0])); + let deny_listed_tokens = DenyListedTokens::new(vec![token0]); let orders = vec![ Arc::new( OrderBuilder::default() @@ -1116,9 +1083,7 @@ mod tests { .build(), ), ]; - let unsupported_tokens_orders = find_unsupported_tokens(&orders, bad_token) - .now_or_never() - .unwrap(); + let unsupported_tokens_orders = find_unsupported_tokens(&orders, &deny_listed_tokens); assert_eq!( unsupported_tokens_orders, [orders[0].metadata.uid, orders[2].metadata.uid] diff --git a/crates/orderbook/src/arguments.rs b/crates/orderbook/src/arguments.rs index 11273c5406..9f40f7a120 100644 --- a/crates/orderbook/src/arguments.rs +++ b/crates/orderbook/src/arguments.rs @@ -96,12 +96,6 @@ pub struct Arguments { #[clap(long, env, default_value = "2")] pub fast_price_estimation_results_required: NonZeroUsize, - /// List of token addresses that should be allowed regardless of whether the - /// bad token detector thinks they are bad. Base tokens are - /// automatically allowed. - #[clap(long, env, use_value_delimiter = true)] - pub allowed_tokens: Vec
, - /// Skip EIP-1271 order signature validation on creation. #[clap(long, env, action = clap::ArgAction::Set, default_value = "false")] pub eip1271_skip_creation_validation: bool, @@ -177,7 +171,6 @@ impl std::fmt::Display for Arguments { unsupported_tokens, banned_users, banned_users_max_cache_size, - allowed_tokens, eip1271_skip_creation_validation, native_price_estimators, fast_price_estimation_results_required, @@ -220,7 +213,6 @@ impl std::fmt::Display for Arguments { f, "banned_users_max_cache_size: {banned_users_max_cache_size:?}" )?; - writeln!(f, "allowed_tokens: {allowed_tokens:?}")?; writeln!( f, "eip1271_skip_creation_validation: {eip1271_skip_creation_validation}" diff --git a/crates/orderbook/src/run.rs b/crates/orderbook/src/run.rs index 5e353e3c33..3be02485fb 100644 --- a/crates/orderbook/src/run.rs +++ b/crates/orderbook/src/run.rs @@ -21,18 +21,14 @@ use { WETH9, support::Balances, }, - model::{DomainSeparator, order::BUY_ETH_ADDRESS}, + model::DomainSeparator, num::ToPrimitive, observe::metrics::{DEFAULT_METRICS_PORT, serve_metrics}, order_validation, shared::{ account_balances::{self, BalanceSimulator}, arguments::tracing_config, - bad_token::{ - instrumented::InstrumentedBadTokenDetectorExt, - list_based::{ListBasedDetector, UnknownTokenStrategy}, - }, - baseline_solver::BaseTokens, + bad_token::list_based::DenyListedTokens, code_fetching::CachedCodeFetcher, gas_price::InstrumentedGasEstimator, http_client::HttpClientFactory, @@ -196,23 +192,7 @@ pub async fn run(args: Arguments) { .expect("failed to create gas price estimator"), )); - let base_tokens = Arc::new(BaseTokens::new( - *native_token.address(), - &args.shared.base_tokens, - )); - let mut allowed_tokens = args.allowed_tokens.clone(); - allowed_tokens.extend(base_tokens.tokens().iter()); - allowed_tokens.push(BUY_ETH_ADDRESS); - let unsupported_tokens = args.unsupported_tokens.clone(); - - let bad_token_detector = Arc::new( - ListBasedDetector::new( - allowed_tokens, - unsupported_tokens, - UnknownTokenStrategy::Allow, - ) - .instrumented(), - ); + let deny_listed_tokens = DenyListedTokens::new(args.unsupported_tokens.clone()); let current_block_stream = args .shared @@ -241,12 +221,11 @@ pub async fn run(args: Arguments) { .call() .await .expect("failed to query solver authenticator address"), - base_tokens: base_tokens.clone(), block_stream: current_block_stream.clone(), }, factory::Components { http_factory: http_factory.clone(), - bad_token_detector: bad_token_detector.clone(), + deny_listed_tokens: deny_listed_tokens.clone(), tokens: token_info_fetcher.clone(), code_fetcher: code_fetcher.clone(), }, @@ -348,7 +327,7 @@ pub async fn run(args: Arguments) { )), validity_configuration, args.eip1271_skip_creation_validation, - bad_token_detector.clone(), + deny_listed_tokens.clone(), hooks_contract, optimal_quoter.clone(), balance_fetcher, diff --git a/crates/shared/src/arguments.rs b/crates/shared/src/arguments.rs index 3474375cbb..a01f2a7dc3 100644 --- a/crates/shared/src/arguments.rs +++ b/crates/shared/src/arguments.rs @@ -187,11 +187,6 @@ pub struct Arguments { )] pub gas_estimators: Vec, - /// Base tokens used for finding multi-hop paths between multiple AMMs - /// Should be the most liquid tokens of the given network. - #[clap(long, env, use_value_delimiter = true)] - pub base_tokens: Vec
, - /// The time between new blocks on the network. #[clap(long, env, value_parser = humantime::parse_duration)] pub network_block_interval: Option, @@ -314,7 +309,6 @@ impl Display for Arguments { chain_id, simulation_node_url, gas_estimators, - base_tokens, network_block_interval, settlement_contract_address, balances_contract_address, @@ -335,7 +329,6 @@ impl Display for Arguments { display_option(f, "chain_id", chain_id)?; display_option(f, "simulation_node_url", simulation_node_url)?; writeln!(f, "gas_estimators: {gas_estimators:?}")?; - writeln!(f, "base_tokens: {base_tokens:?}")?; display_option( f, "network_block_interval", diff --git a/crates/shared/src/bad_token/instrumented.rs b/crates/shared/src/bad_token/instrumented.rs deleted file mode 100644 index d6a876b406..0000000000 --- a/crates/shared/src/bad_token/instrumented.rs +++ /dev/null @@ -1,73 +0,0 @@ -use { - super::{BadTokenDetecting, TokenQuality}, - alloy::primitives::Address, - anyhow::Result, - prometheus::IntCounterVec, - prometheus_metric_storage::MetricStorage, - tracing::Instrument, -}; - -pub trait InstrumentedBadTokenDetectorExt { - fn instrumented(self) -> InstrumentedBadTokenDetector; -} - -impl InstrumentedBadTokenDetectorExt for T { - fn instrumented(self) -> InstrumentedBadTokenDetector { - InstrumentedBadTokenDetector { - inner: Box::new(self), - } - } -} - -#[derive(MetricStorage, Clone, Debug)] -#[metric(subsystem = "token_quality")] -struct Metrics { - /// Tracks how many token detections result in good or bad token quality or - /// an error. - #[metric(labels("quality"))] - results: IntCounterVec, -} - -pub struct InstrumentedBadTokenDetector { - inner: Box, -} - -#[async_trait::async_trait] -impl BadTokenDetecting for InstrumentedBadTokenDetector { - async fn detect(&self, token: Address) -> Result { - let result = self - .inner - .detect(token) - .instrument(tracing::info_span!( - "token_quality", - token = format!("{token:#x}") - )) - .await; - - let label = match &result { - Ok(TokenQuality::Good) => "good", - // prometheus isn't very good for string based data so we simply log the bad - // tokens/errors and get the information from Kibana when we need it. - Err(err) => { - tracing::warn!( - "bad token detection for {:?} returned error:\n{:?}", - token, - err - ); - "error" - } - Ok(quality @ TokenQuality::Bad { .. }) => { - tracing::debug!("bad token detection for {:?} returned {:?}", token, quality); - "bad" - } - }; - - Metrics::instance(observe::metrics::get_storage_registry()) - .expect("unexpected error getting metrics instance") - .results - .with_label_values(&[label]) - .inc(); - - result - } -} diff --git a/crates/shared/src/bad_token/list_based.rs b/crates/shared/src/bad_token/list_based.rs index 9731716b0f..27bd55a3c5 100644 --- a/crates/shared/src/bad_token/list_based.rs +++ b/crates/shared/src/bad_token/list_based.rs @@ -1,146 +1,26 @@ use { - super::{BadTokenDetecting, TokenQuality}, alloy::primitives::Address, - anyhow::Result, - std::sync::Arc, - tracing::instrument, + std::{collections::HashSet, sync::Arc}, }; -/// If a token is neither in the allow nor the deny list treat it this way. -pub enum UnknownTokenStrategy { - Allow, - Deny, - Forward(Arc), -} +/// Explicitly deny listed tokens. +#[derive(Default, Clone)] +pub struct DenyListedTokens(Arc); -/// Classify tokens with explicit allow and deny lists. -pub struct ListBasedDetector { - allow_list: Vec
, - deny_list: Vec
, - strategy: UnknownTokenStrategy, +#[derive(Default)] +struct Inner { + deny_list: HashSet
, } -impl ListBasedDetector { - /// Panics if same token is both allowed and denied. - pub fn new( - allow_list: Vec
, - deny_list: Vec
, - strategy: UnknownTokenStrategy, - ) -> Self { - assert!( - allow_list.iter().all(|token| !deny_list.contains(token)), - "token is allowed and denied" - ); - Self { - allow_list, - deny_list, - strategy, - } - } - - pub fn deny_list(list: Vec
) -> Self { - Self { - allow_list: Vec::new(), - deny_list: list, - strategy: UnknownTokenStrategy::Allow, - } +impl DenyListedTokens { + pub fn new(deny_list: Vec
) -> Self { + let deny_list = deny_list.into_iter().collect(); + Self(Arc::new(Inner { deny_list })) } } -#[async_trait::async_trait] -impl BadTokenDetecting for ListBasedDetector { - #[instrument(skip_all)] - async fn detect(&self, token: Address) -> Result { - if self.allow_list.contains(&token) { - return Ok(TokenQuality::Good); - } - - if self.deny_list.contains(&token) { - return Ok(TokenQuality::Bad { - reason: "token is explicitly deny listed".to_string(), - }); - } - - match &self.strategy { - UnknownTokenStrategy::Allow => Ok(TokenQuality::Good), - UnknownTokenStrategy::Deny => Ok(TokenQuality::Bad { - reason: "token is not allow listed".to_string(), - }), - UnknownTokenStrategy::Forward(inner) => inner.detect(token).await, - } - } -} - -#[cfg(test)] -mod tests { - use {super::*, crate::bad_token::MockBadTokenDetecting, futures::FutureExt}; - - #[test] - fn uses_lists() { - // Would panic if used. - let inner = MockBadTokenDetecting::new(); - let detector = ListBasedDetector { - allow_list: vec![Address::with_last_byte(0)], - deny_list: vec![Address::with_last_byte(1)], - strategy: UnknownTokenStrategy::Forward(Arc::new(inner)), - }; - - let result = detector - .detect(Address::with_last_byte(0)) - .now_or_never() - .unwrap(); - assert!(result.unwrap().is_good()); - - let result = detector - .detect(Address::with_last_byte(1)) - .now_or_never() - .unwrap(); - assert!(!result.unwrap().is_good()); - } - - #[test] - fn not_in_list_default() { - let detector = ListBasedDetector { - allow_list: Vec::new(), - deny_list: Vec::new(), - strategy: UnknownTokenStrategy::Allow, - }; - let result = detector - .detect(Address::with_last_byte(0)) - .now_or_never() - .unwrap(); - assert!(result.unwrap().is_good()); - - let detector = ListBasedDetector { - allow_list: Vec::new(), - deny_list: Vec::new(), - strategy: UnknownTokenStrategy::Deny, - }; - let result = detector - .detect(Address::with_last_byte(0)) - .now_or_never() - .unwrap(); - assert!(!result.unwrap().is_good()); - } - - #[test] - fn not_in_list_forwards() { - let mut inner = MockBadTokenDetecting::new(); - inner - .expect_detect() - .times(1) - .returning(|_| Ok(TokenQuality::Good)); - - let detector = ListBasedDetector { - allow_list: Vec::new(), - deny_list: Vec::new(), - strategy: UnknownTokenStrategy::Forward(Arc::new(inner)), - }; - - let result = detector - .detect(Address::with_last_byte(0)) - .now_or_never() - .unwrap(); - assert!(result.unwrap().is_good()); +impl DenyListedTokens { + pub fn contains(&self, token: &Address) -> bool { + self.0.deny_list.contains(token) } } diff --git a/crates/shared/src/bad_token/mod.rs b/crates/shared/src/bad_token/mod.rs index f4d146e74c..6fed7b5306 100644 --- a/crates/shared/src/bad_token/mod.rs +++ b/crates/shared/src/bad_token/mod.rs @@ -1,9 +1,6 @@ -pub mod instrumented; pub mod list_based; pub mod trace_call; -use {alloy::primitives::Address, anyhow::Result}; - /// How well behaved a token is. #[derive(Debug, Clone, Eq, PartialEq)] pub enum TokenQuality { @@ -22,10 +19,3 @@ impl TokenQuality { } } } - -/// Detect how well behaved a token is. -#[cfg_attr(any(test, feature = "test-util"), mockall::automock)] -#[async_trait::async_trait] -pub trait BadTokenDetecting: Send + Sync { - async fn detect(&self, token: Address) -> Result; -} diff --git a/crates/shared/src/order_validation.rs b/crates/shared/src/order_validation.rs index f7909ae54c..5839cfbec3 100644 --- a/crates/shared/src/order_validation.rs +++ b/crates/shared/src/order_validation.rs @@ -1,7 +1,7 @@ use { crate::{ account_balances::{self, BalanceFetching, TransferSimulationError}, - bad_token::{BadTokenDetecting, TokenQuality}, + bad_token::list_based::DenyListedTokens, code_fetching::CodeFetching, order_quoting::{ CalculateQuoteError, @@ -250,7 +250,7 @@ pub struct OrderValidator { banned_users: Arc, validity_configuration: OrderValidPeriodConfiguration, eip1271_skip_creation_validation: bool, - bad_token_detector: Arc, + deny_listed_tokens: DenyListedTokens, hooks: HooksTrampoline::Instance, /// For Full-Validation: performed time of order placement quoter: Arc, @@ -321,7 +321,7 @@ impl OrderValidator { banned_users: Arc, validity_configuration: OrderValidPeriodConfiguration, eip1271_skip_creation_validation: bool, - bad_token_detector: Arc, + deny_listed_tokens: DenyListedTokens, hooks: HooksTrampoline::Instance, quoter: Arc, balance_fetcher: Arc, @@ -338,7 +338,7 @@ impl OrderValidator { banned_users, validity_configuration, eip1271_skip_creation_validation, - bad_token_detector, + deny_listed_tokens, hooks, quoter, balance_fetcher, @@ -524,14 +524,12 @@ impl OrderValidating for OrderValidator { return Err(PartialValidationError::InvalidNativeSellToken); } - for &token in &[order.sell_token, order.buy_token] { - if let TokenQuality::Bad { reason } = self - .bad_token_detector - .detect(token) - .await - .map_err(PartialValidationError::Other)? - { - return Err(PartialValidationError::UnsupportedToken { token, reason }); + for token in &[order.sell_token, order.buy_token] { + if self.deny_listed_tokens.contains(token) { + return Err(PartialValidationError::UnsupportedToken { + token: *token, + reason: "token is deny listed".to_string(), + }); } } @@ -1037,7 +1035,6 @@ mod tests { super::*, crate::{ account_balances::MockBalanceFetching, - bad_token::{MockBadTokenDetecting, TokenQuality}, code_fetching::MockCodeFetching, order_quoting::{FindQuoteError, MockOrderQuoting}, signature_validator::MockSignatureValidating, @@ -1076,7 +1073,7 @@ mod tests { Arc::new(order_validation::banned::Users::from_set(banned_users)), validity_configuration, false, - Arc::new(MockBadTokenDetecting::new()), + DenyListedTokens::default(), HooksTrampoline::Instance::new( Address::from([0xcf; 20]), ProviderBuilder::new() @@ -1214,16 +1211,6 @@ mod tests { max_limit: Duration::from_secs(200), }; - let mut bad_token_detector = MockBadTokenDetecting::new(); - bad_token_detector - .expect_detect() - .with(eq(Address::with_last_byte(1))) - .returning(|_| Ok(TokenQuality::Good)); - bad_token_detector - .expect_detect() - .with(eq(Address::with_last_byte(2))) - .returning(|_| Ok(TokenQuality::Good)); - let mut limit_order_counter = MockLimitOrderCounting::new(); limit_order_counter.expect_count().returning(|_| Ok(0u64)); let validator = OrderValidator::new( @@ -1231,7 +1218,7 @@ mod tests { Arc::new(order_validation::banned::Users::none()), validity_configuration, false, - Arc::new(bad_token_detector), + Default::default(), HooksTrampoline::Instance::new( Address::from([0xcf; 20]), ProviderBuilder::new() @@ -1305,16 +1292,6 @@ mod tests { max_limit: Duration::from_secs(200), }; - let mut bad_token_detector = MockBadTokenDetecting::new(); - bad_token_detector - .expect_detect() - .with(eq(Address::with_last_byte(1))) - .returning(|_| Ok(TokenQuality::Good)); - bad_token_detector - .expect_detect() - .with(eq(Address::with_last_byte(2))) - .returning(|_| Ok(TokenQuality::Good)); - let mut limit_order_counter = MockLimitOrderCounting::new(); limit_order_counter.expect_count().returning(|_| Ok(0u64)); let validator = OrderValidator::new( @@ -1322,7 +1299,7 @@ mod tests { Arc::new(order_validation::banned::Users::none()), validity_configuration, false, - Arc::new(bad_token_detector), + Default::default(), HooksTrampoline::Instance::new( Address::from([0xcf; 20]), ProviderBuilder::new() @@ -1377,14 +1354,10 @@ mod tests { #[tokio::test] async fn post_validate_ok() { let mut order_quoter = MockOrderQuoting::new(); - let mut bad_token_detector = MockBadTokenDetecting::new(); let mut balance_fetcher = MockBalanceFetching::new(); order_quoter .expect_find_quote() .returning(|_, _| Ok(Default::default())); - bad_token_detector - .expect_detect() - .returning(|_| Ok(TokenQuality::Good)); balance_fetcher .expect_can_transfer() .returning(|_, _| Ok(())); @@ -1416,7 +1389,7 @@ mod tests { max_limit: Duration::from_secs(200), }, false, - Arc::new(bad_token_detector), + Default::default(), hooks.clone(), Arc::new(order_quoter), Arc::new(balance_fetcher), @@ -1587,7 +1560,6 @@ mod tests { #[tokio::test] async fn post_validate_too_many_limit_orders() { let mut order_quoter = MockOrderQuoting::new(); - let mut bad_token_detector = MockBadTokenDetecting::new(); let mut balance_fetcher = MockBalanceFetching::new(); order_quoter.expect_find_quote().returning(|_, _| { Ok(Quote { @@ -1598,9 +1570,6 @@ mod tests { fee_amount: Default::default(), }) }); - bad_token_detector - .expect_detect() - .returning(|_| Ok(TokenQuality::Good)); balance_fetcher .expect_can_transfer() .returning(|_, _| Ok(())); @@ -1628,7 +1597,7 @@ mod tests { max_limit: Duration::from_secs(200), }, false, - Arc::new(bad_token_detector), + Default::default(), HooksTrampoline::Instance::new( Address::from([0xcf; 20]), ProviderBuilder::new() @@ -1675,14 +1644,10 @@ mod tests { #[tokio::test] async fn post_limit_does_not_apply_to_in_market_orders() { let mut order_quoter = MockOrderQuoting::new(); - let mut bad_token_detector = MockBadTokenDetecting::new(); let mut balance_fetcher = MockBalanceFetching::new(); order_quoter .expect_find_quote() .returning(|_, _| Ok(Default::default())); - bad_token_detector - .expect_detect() - .returning(|_| Ok(TokenQuality::Good)); balance_fetcher .expect_can_transfer() .returning(|_, _| Ok(())); @@ -1705,7 +1670,7 @@ mod tests { Arc::new(order_validation::banned::Users::none()), OrderValidPeriodConfiguration::any(), false, - Arc::new(bad_token_detector), + Default::default(), HooksTrampoline::Instance::new( Address::from([0xcf; 20]), ProviderBuilder::new() @@ -1751,14 +1716,10 @@ mod tests { #[tokio::test] async fn post_validate_err_zero_amount() { let mut order_quoter = MockOrderQuoting::new(); - let mut bad_token_detector = MockBadTokenDetecting::new(); let mut balance_fetcher = MockBalanceFetching::new(); order_quoter .expect_find_quote() .returning(|_, _| Ok(Default::default())); - bad_token_detector - .expect_detect() - .returning(|_| Ok(TokenQuality::Good)); balance_fetcher .expect_can_transfer() .returning(|_, _| Ok(())); @@ -1770,7 +1731,7 @@ mod tests { Arc::new(order_validation::banned::Users::none()), OrderValidPeriodConfiguration::any(), false, - Arc::new(bad_token_detector), + Default::default(), HooksTrampoline::Instance::new( Address::from([0xcf; 20]), ProviderBuilder::new() @@ -1809,14 +1770,10 @@ mod tests { #[tokio::test] async fn post_validate_err_wrong_owner() { let mut order_quoter = MockOrderQuoting::new(); - let mut bad_token_detector = MockBadTokenDetecting::new(); let mut balance_fetcher = MockBalanceFetching::new(); order_quoter .expect_find_quote() .returning(|_, _| Ok(Default::default())); - bad_token_detector - .expect_detect() - .returning(|_| Ok(TokenQuality::Good)); balance_fetcher .expect_can_transfer() .returning(|_, _| Ok(())); @@ -1828,7 +1785,7 @@ mod tests { Arc::new(order_validation::banned::Users::none()), OrderValidPeriodConfiguration::any(), false, - Arc::new(bad_token_detector), + Default::default(), HooksTrampoline::Instance::new( Address::from([0xcf; 20]), ProviderBuilder::new() @@ -1869,16 +1826,11 @@ mod tests { #[tokio::test] async fn post_validate_err_unsupported_token() { let mut order_quoter = MockOrderQuoting::new(); - let mut bad_token_detector = MockBadTokenDetecting::new(); + let deny_listed_tokens = DenyListedTokens::new(vec![Address::with_last_byte(1)]); let mut balance_fetcher = MockBalanceFetching::new(); order_quoter .expect_find_quote() .returning(|_, _| Ok(Default::default())); - bad_token_detector.expect_detect().returning(|_| { - Ok(TokenQuality::Bad { - reason: Default::default(), - }) - }); balance_fetcher .expect_can_transfer() .returning(|_, _| Ok(())); @@ -1891,7 +1843,7 @@ mod tests { Arc::new(order_validation::banned::Users::none()), OrderValidPeriodConfiguration::any(), false, - Arc::new(bad_token_detector), + deny_listed_tokens, HooksTrampoline::Instance::new( Address::from([0xcf; 20]), ProviderBuilder::new() @@ -1937,14 +1889,10 @@ mod tests { #[tokio::test] async fn post_validate_err_insufficient_balance() { let mut order_quoter = MockOrderQuoting::new(); - let mut bad_token_detector = MockBadTokenDetecting::new(); let mut balance_fetcher = MockBalanceFetching::new(); order_quoter .expect_find_quote() .returning(|_, _| Ok(Default::default())); - bad_token_detector - .expect_detect() - .returning(|_| Ok(TokenQuality::Good)); balance_fetcher .expect_can_transfer() .returning(|_, _| Err(TransferSimulationError::InsufficientBalance)); @@ -1956,7 +1904,7 @@ mod tests { Arc::new(order_validation::banned::Users::none()), OrderValidPeriodConfiguration::any(), false, - Arc::new(bad_token_detector), + Default::default(), HooksTrampoline::Instance::new( Address::from([0xcf; 20]), ProviderBuilder::new() @@ -1997,15 +1945,11 @@ mod tests { #[tokio::test] async fn post_validate_err_invalid_eip1271_signature() { let mut order_quoter = MockOrderQuoting::new(); - let mut bad_token_detector = MockBadTokenDetecting::new(); let mut balance_fetcher = MockBalanceFetching::new(); let mut signature_validator = MockSignatureValidating::new(); order_quoter .expect_find_quote() .returning(|_, _| Ok(Default::default())); - bad_token_detector - .expect_detect() - .returning(|_| Ok(TokenQuality::Good)); balance_fetcher .expect_can_transfer() .returning(|_, _| Ok(())); @@ -2020,7 +1964,7 @@ mod tests { Arc::new(order_validation::banned::Users::none()), OrderValidPeriodConfiguration::any(), false, - Arc::new(bad_token_detector), + Default::default(), HooksTrampoline::Instance::new( Address::from([0xcf; 20]), ProviderBuilder::new() @@ -2071,14 +2015,10 @@ mod tests { is_expected_error: impl Fn(ValidationError) -> bool, ) { let mut order_quoter = MockOrderQuoting::new(); - let mut bad_token_detector = MockBadTokenDetecting::new(); let mut balance_fetcher = MockBalanceFetching::new(); order_quoter .expect_find_quote() .returning(|_, _| Ok(Default::default())); - bad_token_detector - .expect_detect() - .returning(|_| Ok(TokenQuality::Good)); balance_fetcher .expect_can_transfer() .returning(move |_, _| Err(create_error())); @@ -2091,7 +2031,7 @@ mod tests { Arc::new(order_validation::banned::Users::none()), OrderValidPeriodConfiguration::any(), false, - Arc::new(bad_token_detector), + Default::default(), HooksTrampoline::Instance::new( Address::from([0xcf; 20]), ProviderBuilder::new() @@ -2167,14 +2107,10 @@ mod tests { #[test] fn allows_insufficient_balance_for_orders_with_sufficient_flashloan_hint() { let mut order_quoter = MockOrderQuoting::new(); - let mut bad_token_detector = MockBadTokenDetecting::new(); let mut balance_fetcher = MockBalanceFetching::new(); order_quoter .expect_find_quote() .returning(|_, _| Ok(Default::default())); - bad_token_detector - .expect_detect() - .returning(|_| Ok(TokenQuality::Good)); balance_fetcher .expect_can_transfer() .returning(|_, _| Err(TransferSimulationError::InsufficientBalance)); @@ -2186,7 +2122,7 @@ mod tests { Arc::new(order_validation::banned::Users::none()), OrderValidPeriodConfiguration::any(), false, - Arc::new(bad_token_detector), + Default::default(), HooksTrampoline::Instance::new( Address::from([0xcf; 20]), ProviderBuilder::new() @@ -2577,11 +2513,7 @@ mod tests { .with(eq(quote_id), eq(quote_search_parameters.clone())) .returning(move |_, _| Ok(quote_data.clone())); - let mut bad_token_detector = MockBadTokenDetecting::new(); let mut balance_fetcher = MockBalanceFetching::new(); - bad_token_detector - .expect_detect() - .returning(|_| Ok(TokenQuality::Good)); balance_fetcher .expect_can_transfer() .returning(|_, _| Ok(())); @@ -2602,7 +2534,7 @@ mod tests { max_limit: Duration::from_secs(200), }, false, - Arc::new(bad_token_detector), + Default::default(), HooksTrampoline::Instance::new( Address::from([0xcf; 20]), ProviderBuilder::new() diff --git a/crates/shared/src/price_estimation/factory.rs b/crates/shared/src/price_estimation/factory.rs index 74ac93f0f1..43fe6e1e01 100644 --- a/crates/shared/src/price_estimation/factory.rs +++ b/crates/shared/src/price_estimation/factory.rs @@ -13,8 +13,7 @@ use { }, crate::{ arguments, - bad_token::BadTokenDetecting, - baseline_solver::BaseTokens, + bad_token::list_based::DenyListedTokens, code_fetching::CachedCodeFetcher, ethrpc::Web3, gas_price_estimation::GasPriceEstimating, @@ -61,14 +60,13 @@ pub struct Network { pub native_token: Address, pub settlement: Address, pub authenticator: Address, - pub base_tokens: Arc, pub block_stream: CurrentBlockWatcher, } /// The shared components needed for creating price estimators. pub struct Components { pub http_factory: HttpClientFactory, - pub bad_token_detector: Arc, + pub deny_listed_tokens: DenyListedTokens, pub tokens: Arc, pub code_fetcher: Arc, } @@ -306,7 +304,7 @@ impl<'a> PriceEstimatorFactory<'a> { SanitizedPriceEstimator::new( estimator, self.network.native_token, - self.components.bad_token_detector.clone(), + self.components.deny_listed_tokens.clone(), false, // not estimating native price ) } @@ -320,7 +318,7 @@ impl<'a> PriceEstimatorFactory<'a> { SanitizedPriceEstimator::new( estimator, self.network.native_token, - self.components.bad_token_detector.clone(), + self.components.deny_listed_tokens.clone(), true, // estimating native price ) } diff --git a/crates/shared/src/price_estimation/sanitized.rs b/crates/shared/src/price_estimation/sanitized.rs index 96b036cf30..48ecfd70aa 100644 --- a/crates/shared/src/price_estimation/sanitized.rs +++ b/crates/shared/src/price_estimation/sanitized.rs @@ -1,6 +1,6 @@ use { crate::{ - bad_token::{BadTokenDetecting, TokenQuality}, + bad_token::list_based::DenyListedTokens, price_estimation::{ Estimate, PriceEstimating, @@ -21,7 +21,7 @@ use { /// ETH as buy token appropriately. pub struct SanitizedPriceEstimator { inner: Arc, - bad_token_detector: Arc, + deny_listed_tokens: DenyListedTokens, native_token: Address, /// Enables the short-circuiting logic in case the sell and buy tokens are /// the same @@ -32,26 +32,25 @@ impl SanitizedPriceEstimator { pub fn new( inner: Arc, native_token: Address, - bad_token_detector: Arc, + deny_listed_tokens: DenyListedTokens, is_estimating_native_price: bool, ) -> Self { Self { inner, native_token, - bad_token_detector, + deny_listed_tokens, is_estimating_native_price, } } /// Checks if the traded tokens are supported by the protocol. - async fn handle_bad_tokens(&self, query: &Query) -> Result<(), PriceEstimationError> { + fn handle_deny_listed_tokens(&self, query: &Query) -> Result<(), PriceEstimationError> { for token in [query.sell_token, query.buy_token] { - match self.bad_token_detector.detect(token).await { - Err(err) => return Err(PriceEstimationError::ProtocolInternal(err)), - Ok(TokenQuality::Bad { reason }) => { - return Err(PriceEstimationError::UnsupportedToken { token, reason }); - } - _ => (), + if self.deny_listed_tokens.contains(&token) { + return Err(PriceEstimationError::UnsupportedToken { + token, + reason: "token is deny listed".to_string(), + }); } } Ok(()) @@ -65,7 +64,7 @@ impl PriceEstimating for SanitizedPriceEstimator { query: Arc, ) -> futures::future::BoxFuture<'_, super::PriceEstimateResult> { async move { - self.handle_bad_tokens(&query).await?; + self.handle_deny_listed_tokens(&query)?; // When estimating native price the sell token is substituted by // native one. In that case, the output amount of the price // estimation can be trivially computed as the same amount as input @@ -153,10 +152,7 @@ impl PriceEstimating for SanitizedPriceEstimator { mod tests { use { super::*, - crate::{ - bad_token::{MockBadTokenDetecting, TokenQuality}, - price_estimation::{HEALTHY_PRICE_ESTIMATION_TIME, MockPriceEstimating}, - }, + crate::price_estimation::{HEALTHY_PRICE_ESTIMATION_TIME, MockPriceEstimating}, alloy::primitives::{Address, U256 as AlloyU256}, model::order::OrderKind, number::nonzero::NonZeroU256, @@ -166,16 +162,7 @@ mod tests { #[tokio::test] async fn handles_trivial_estimates_on_its_own() { - let mut bad_token_detector = MockBadTokenDetecting::new(); - bad_token_detector.expect_detect().returning(|token| { - if token == BAD_TOKEN { - Ok(TokenQuality::Bad { - reason: "Token not supported".into(), - }) - } else { - Ok(TokenQuality::Good) - } - }); + let deny_listed_tokens = DenyListedTokens::new(vec![BAD_TOKEN]); let native_token = Address::with_last_byte(42); @@ -457,10 +444,9 @@ mod tests { } .boxed() }); - let bad_token_detector = Arc::new(bad_token_detector); let sanitized_estimator = SanitizedPriceEstimator { inner: Arc::new(wrapped_estimator), - bad_token_detector: bad_token_detector.clone(), + deny_listed_tokens: deny_listed_tokens.clone(), native_token, is_estimating_native_price: true, }; @@ -563,7 +549,7 @@ mod tests { let sanitized_estimator_non_native = SanitizedPriceEstimator { inner: Arc::new(wrapped_estimator), - bad_token_detector, + deny_listed_tokens, native_token, is_estimating_native_price: false, }; From 8e7c79ff8290cca4f8620816bd720627eefd8a6b Mon Sep 17 00:00:00 2001 From: Marcin Szymczak Date: Mon, 16 Feb 2026 17:24:22 +0100 Subject: [PATCH 066/219] Extract common utils, serialization to shared and serde-ext (#4142) # Description The simulation refactoring PR (#4140) is large due to some common types being moved out to `shared`. This PR covers these aspects and moves common `driver` utilities to `shared`. Alongside these common (de)serialization logic is moved to new crate `serde-ext` # Changes - [x] Move driver::util::bytes to `shared` - [x] Remove util::Bytes from `autopilot` - [x] Rename shared::ethrpc to web3 for a better name and to avoid naming conflicts with the `ethrpc` crate - [x] Move common (de)serialization logic to new `serde-ext` and use it throughout `driver` and `solvers-dto` ## How to test - E2E tests - [ ] Staging deployment works --- Cargo.lock | 12 ++ Cargo.toml | 1 + crates/autopilot/src/boundary/mod.rs | 4 +- .../ethflow_events/event_retriever.rs | 2 +- .../onchain_order_events/event_retriever.rs | 2 +- crates/autopilot/src/infra/blockchain/mod.rs | 5 +- crates/autopilot/src/run.rs | 10 +- crates/autopilot/src/util/bytes.rs | 18 --- crates/autopilot/src/util/mod.rs | 4 - crates/driver/Cargo.toml | 1 + .../src/boundary/liquidity/uniswap/v3.rs | 2 +- crates/driver/src/boundary/mod.rs | 33 ++---- crates/driver/src/domain/competition/mod.rs | 11 +- .../src/domain/competition/order/app_data.rs | 4 +- .../src/domain/competition/order/mod.rs | 7 +- .../src/domain/competition/order/signature.rs | 18 +-- .../src/domain/competition/pre_processing.rs | 9 +- .../risk_detector/bad_tokens/simulation.rs | 2 +- .../domain/competition/solution/encoding.rs | 21 ++-- .../competition/solution/interaction.rs | 8 +- crates/driver/src/domain/eth/mod.rs | 11 +- .../src/infra/api/routes/quote/dto/order.rs | 7 +- .../src/infra/api/routes/quote/dto/quote.rs | 21 ++-- .../api/routes/reveal/dto/reveal_response.rs | 10 +- .../api/routes/solve/dto/solve_request.rs | 25 ++--- .../api/routes/solve/dto/solve_response.rs | 15 ++- crates/driver/src/infra/blockchain/mod.rs | 13 ++- crates/driver/src/infra/config/file/mod.rs | 12 +- crates/driver/src/infra/mempool/mod.rs | 7 +- .../liquidity_sources/liquorice/notifier.rs | 9 +- crates/driver/src/infra/simulator/enso/dto.rs | 6 +- .../src/infra/simulator/tenderly/dto.rs | 4 +- crates/driver/src/infra/solver/dto/auction.rs | 2 +- .../driver/src/infra/solver/dto/solution.rs | 8 +- crates/driver/src/util/mod.rs | 4 +- crates/e2e/src/setup/deploy.rs | 2 +- .../e2e/src/setup/onchain_components/mod.rs | 2 +- crates/e2e/src/setup/services.rs | 2 +- crates/e2e/tests/e2e/api_version.rs | 2 +- crates/e2e/tests/e2e/app_data.rs | 2 +- crates/e2e/tests/e2e/app_data_signer.rs | 2 +- crates/e2e/tests/e2e/buffers.rs | 2 +- crates/e2e/tests/e2e/cors.rs | 2 +- crates/e2e/tests/e2e/cow_amm.rs | 2 +- crates/e2e/tests/e2e/deprecated_endpoints.rs | 2 +- crates/e2e/tests/e2e/eth_integration.rs | 2 +- crates/e2e/tests/e2e/eth_safe.rs | 2 +- crates/e2e/tests/e2e/hooks.rs | 2 +- crates/e2e/tests/e2e/jit_orders.rs | 2 +- crates/e2e/tests/e2e/limit_orders.rs | 2 +- crates/e2e/tests/e2e/malformed_requests.rs | 2 +- crates/e2e/tests/e2e/order_cancellation.rs | 2 +- crates/e2e/tests/e2e/partial_fill.rs | 2 +- .../tests/e2e/partially_fillable_balance.rs | 2 +- .../e2e/tests/e2e/partially_fillable_pool.rs | 2 +- .../e2e/tests/e2e/place_order_with_quote.rs | 2 +- crates/e2e/tests/e2e/protocol_fee.rs | 2 +- crates/e2e/tests/e2e/quoting.rs | 2 +- crates/e2e/tests/e2e/replace_order.rs | 2 +- crates/e2e/tests/e2e/smart_contract_orders.rs | 2 +- crates/e2e/tests/e2e/solver_competition.rs | 2 +- crates/e2e/tests/e2e/submission.rs | 2 +- crates/e2e/tests/e2e/token_metadata.rs | 2 +- .../tests/e2e/tracking_insufficient_funds.rs | 2 +- crates/e2e/tests/e2e/trades_v2.rs | 2 +- crates/e2e/tests/e2e/uncovered_order.rs | 2 +- crates/e2e/tests/e2e/univ2.rs | 2 +- crates/e2e/tests/e2e/user_surplus.rs | 2 +- crates/e2e/tests/e2e/vault_balances.rs | 2 +- crates/e2e/tests/e2e/wrapper.rs | 2 +- crates/orderbook/src/run.rs | 4 +- crates/refunder/src/arguments.rs | 4 +- crates/refunder/src/lib.rs | 2 +- crates/refunder/src/submitter.rs | 2 +- crates/serde-ext/Cargo.toml | 12 ++ .../util/serialize => serde-ext/src}/hex.rs | 0 .../serialize/mod.rs => serde-ext/src/lib.rs} | 0 .../util/serialize => serde-ext/src}/u256.rs | 14 +-- crates/shared/src/arguments.rs | 2 +- crates/shared/src/bad_token/trace_call.rs | 2 +- .../{driver/src/util => shared/src}/bytes.rs | 0 crates/shared/src/code_fetching.rs | 2 +- crates/shared/src/gas_price_estimation/mod.rs | 2 +- crates/shared/src/lib.rs | 2 +- crates/shared/src/price_estimation/factory.rs | 2 +- .../sources/balancer_v2/pool_fetching/mod.rs | 2 +- crates/shared/src/sources/swapr.rs | 2 +- crates/shared/src/sources/uniswap_v2/mod.rs | 2 +- .../src/sources/uniswap_v2/pool_fetching.rs | 2 +- crates/shared/src/trace_many.rs | 2 +- crates/shared/src/{ethrpc.rs => web3.rs} | 0 crates/solver/src/liquidity/balancer_v2.rs | 2 +- crates/solver/src/liquidity/uniswap_v2.rs | 2 +- crates/solver/src/liquidity/uniswap_v3.rs | 2 +- crates/solver/src/liquidity/zeroex.rs | 2 +- crates/solvers-dto/Cargo.toml | 1 + crates/solvers-dto/src/auction.rs | 5 +- crates/solvers-dto/src/lib.rs | 106 ------------------ crates/solvers-dto/src/notification.rs | 3 +- crates/solvers-dto/src/solution.rs | 15 ++- 100 files changed, 230 insertions(+), 366 deletions(-) delete mode 100644 crates/autopilot/src/util/bytes.rs create mode 100644 crates/serde-ext/Cargo.toml rename crates/{driver/src/util/serialize => serde-ext/src}/hex.rs (100%) rename crates/{driver/src/util/serialize/mod.rs => serde-ext/src/lib.rs} (100%) rename crates/{driver/src/util/serialize => serde-ext/src}/u256.rs (72%) rename crates/{driver/src/util => shared/src}/bytes.rs (100%) rename crates/shared/src/{ethrpc.rs => web3.rs} (100%) diff --git a/Cargo.lock b/Cargo.lock index c40ca9c023..f2f53f5498 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2720,6 +2720,7 @@ dependencies = [ "reqwest 0.11.27", "s3", "serde", + "serde-ext", "serde_json", "serde_with", "shared", @@ -5966,6 +5967,16 @@ dependencies = [ "serde_derive", ] +[[package]] +name = "serde-ext" +version = "0.1.0" +dependencies = [ + "alloy", + "const-hex", + "serde", + "serde_with", +] + [[package]] name = "serde_core" version = "1.0.228" @@ -6331,6 +6342,7 @@ dependencies = [ "const-hex", "number", "serde", + "serde-ext", "serde_with", ] diff --git a/Cargo.toml b/Cargo.toml index 57ed351635..61baa62db7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -105,6 +105,7 @@ proc-macro2 = "1.0.103" alloy-sol-macro-input = "1.4.1" alloy-sol-macro-expander = "1.4.1" jsonrpc-core = "18.0.0" +serde-ext = { path = "crates/serde-ext" } [workspace.lints] clippy.cast_possible_wrap = "deny" diff --git a/crates/autopilot/src/boundary/mod.rs b/crates/autopilot/src/boundary/mod.rs index ab71671bfa..42d3439d85 100644 --- a/crates/autopilot/src/boundary/mod.rs +++ b/crates/autopilot/src/boundary/mod.rs @@ -33,8 +33,8 @@ pub mod events; pub mod order; /// Builds a web3 client based on the ethrpc args config. -pub fn web3_client(ethrpc: &Url, ethrpc_args: &shared::ethrpc::Arguments) -> Web3 { - shared::ethrpc::web3(ethrpc_args, ethrpc, "base") +pub fn web3_client(ethrpc: &Url, ethrpc_args: &shared::web3::Arguments) -> Web3 { + shared::web3::web3(ethrpc_args, ethrpc, "base") } pub struct SolvableOrders { diff --git a/crates/autopilot/src/database/ethflow_events/event_retriever.rs b/crates/autopilot/src/database/ethflow_events/event_retriever.rs index b1ed267ad2..dfc159471a 100644 --- a/crates/autopilot/src/database/ethflow_events/event_retriever.rs +++ b/crates/autopilot/src/database/ethflow_events/event_retriever.rs @@ -8,7 +8,7 @@ use { sol_types::SolEvent, }, contracts::alloy::CoWSwapEthFlow::CoWSwapEthFlow, - shared::{ethrpc::Web3, event_handling::AlloyEventRetrieving}, + shared::{event_handling::AlloyEventRetrieving, web3::Web3}, }; pub struct EthFlowRefundRetriever { diff --git a/crates/autopilot/src/database/onchain_order_events/event_retriever.rs b/crates/autopilot/src/database/onchain_order_events/event_retriever.rs index a56e10fe68..d321885e4a 100644 --- a/crates/autopilot/src/database/onchain_order_events/event_retriever.rs +++ b/crates/autopilot/src/database/onchain_order_events/event_retriever.rs @@ -5,7 +5,7 @@ use { sol_types::SolEvent, }, contracts::alloy::CoWSwapOnchainOrders, - shared::{ethrpc::Web3, event_handling::AlloyEventRetrieving}, + shared::{event_handling::AlloyEventRetrieving, web3::Web3}, }; // Note: we use a custom implementation of `EventRetrieving` rather than using diff --git a/crates/autopilot/src/infra/blockchain/mod.rs b/crates/autopilot/src/infra/blockchain/mod.rs index 90248b9ac9..f5a0f5df08 100644 --- a/crates/autopilot/src/infra/blockchain/mod.rs +++ b/crates/autopilot/src/infra/blockchain/mod.rs @@ -27,10 +27,7 @@ pub struct Rpc { impl Rpc { /// Instantiate an RPC client to an Ethereum (or Ethereum-compatible) node /// at the specifed URL. - pub async fn new( - url: &url::Url, - ethrpc_args: &shared::ethrpc::Arguments, - ) -> Result { + pub async fn new(url: &url::Url, ethrpc_args: &shared::web3::Arguments) -> Result { let web3 = boundary::web3_client(url, ethrpc_args); let chain = Chain::try_from(web3.provider.get_chain_id().await?) .map_err(|_| Error::UnsupportedChain)?; diff --git a/crates/autopilot/src/run.rs b/crates/autopilot/src/run.rs index 9c973aab7d..39b4224be5 100644 --- a/crates/autopilot/src/run.rs +++ b/crates/autopilot/src/run.rs @@ -83,7 +83,7 @@ impl Liveness { /// Creates Web3 transport based on the given config. #[instrument(skip_all)] -async fn ethrpc(url: &Url, ethrpc_args: &shared::ethrpc::Arguments) -> infra::blockchain::Rpc { +async fn ethrpc(url: &Url, ethrpc_args: &shared::web3::Arguments) -> infra::blockchain::Rpc { infra::blockchain::Rpc::new(url, ethrpc_args) .await .expect("connect ethereum RPC") @@ -93,7 +93,7 @@ async fn ethrpc(url: &Url, ethrpc_args: &shared::ethrpc::Arguments) -> infra::bl async fn unbuffered_ethrpc(url: &Url) -> infra::blockchain::Rpc { ethrpc( url, - &shared::ethrpc::Arguments { + &shared::web3::Arguments { ethrpc_max_batch_size: 0, ethrpc_max_concurrent_requests: 0, ethrpc_batch_delay: Default::default(), @@ -170,12 +170,12 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { crate::database::run_database_metrics_work(db_write.clone()); let http_factory = HttpClientFactory::new(&args.http_client); - let web3 = shared::ethrpc::web3(&args.shared.ethrpc, &args.shared.node_url, "base"); + let web3 = shared::web3::web3(&args.shared.ethrpc, &args.shared.node_url, "base"); let simulation_web3 = args .shared .simulation_node_url .as_ref() - .map(|node_url| shared::ethrpc::web3(&args.shared.ethrpc, node_url, "simulation")); + .map(|node_url| shared::web3::web3(&args.shared.ethrpc, node_url, "simulation")); let chain_id = web3 .provider @@ -653,7 +653,7 @@ async fn shadow_mode(args: Arguments) -> ! { .into_iter() .collect(); - let web3 = shared::ethrpc::web3(&args.shared.ethrpc, &args.shared.node_url, "base"); + let web3 = shared::web3::web3(&args.shared.ethrpc, &args.shared.node_url, "base"); let weth = WETH9::Instance::deployed(&web3.provider) .await .expect("couldn't find deployed WETH contract"); diff --git a/crates/autopilot/src/util/bytes.rs b/crates/autopilot/src/util/bytes.rs deleted file mode 100644 index c8b2d0992a..0000000000 --- a/crates/autopilot/src/util/bytes.rs +++ /dev/null @@ -1,18 +0,0 @@ -/// A thin wrapper around a collection of bytes. Provides hex debug formatting. -#[derive(Clone, Copy, PartialEq, Eq, Hash, Default)] -pub struct Bytes(pub T); - -impl std::fmt::Debug for Bytes -where - T: AsRef<[u8]>, -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", const_hex::encode_prefixed(&self.0)) - } -} - -impl From for Bytes { - fn from(value: T) -> Self { - Self(value) - } -} diff --git a/crates/autopilot/src/util/mod.rs b/crates/autopilot/src/util/mod.rs index 277ec7be2f..bb1babe21b 100644 --- a/crates/autopilot/src/util/mod.rs +++ b/crates/autopilot/src/util/mod.rs @@ -1,9 +1,5 @@ use url::Url; -mod bytes; - -pub use self::bytes::Bytes; - /// Joins a path with a URL, ensuring that there is only one slash between them. /// It doesn't matter if the URL ends with a slash or the path starts with one. pub fn join(url: &Url, mut path: &str) -> Url { diff --git a/crates/driver/Cargo.toml b/crates/driver/Cargo.toml index d93e4f4631..f5a725802d 100644 --- a/crates/driver/Cargo.toml +++ b/crates/driver/Cargo.toml @@ -46,6 +46,7 @@ rand = { workspace = true } reqwest = { workspace = true } s3 = { workspace = true } serde = { workspace = true, features = ["derive"] } +serde-ext = { workspace = true } serde_json = { workspace = true } serde_with = { workspace = true } solvers-dto = { path = "../solvers-dto" } diff --git a/crates/driver/src/boundary/liquidity/uniswap/v3.rs b/crates/driver/src/boundary/liquidity/uniswap/v3.rs index f6d41df6b8..f717c852ef 100644 --- a/crates/driver/src/boundary/liquidity/uniswap/v3.rs +++ b/crates/driver/src/boundary/liquidity/uniswap/v3.rs @@ -93,7 +93,7 @@ pub fn to_interaction( eth::Interaction { target: encoded.0, value: encoded.1.into(), - call_data: crate::util::Bytes(encoded.2.0.to_vec()), + call_data: encoded.2, } } diff --git a/crates/driver/src/boundary/mod.rs b/crates/driver/src/boundary/mod.rs index 630616a9f3..8ab03cafe3 100644 --- a/crates/driver/src/boundary/mod.rs +++ b/crates/driver/src/boundary/mod.rs @@ -32,29 +32,18 @@ pub use { anyhow::{Error, Result}, contracts, model::order::OrderData, - shared::ethrpc::Web3, + shared::web3::Web3, }; -/// Builds a web3 client that buffers requests and sends them in a -/// batch call. -pub fn buffered_web3_client( - ethrpc: &Url, - max_batch_size: usize, - max_concurrent_requests: usize, -) -> Web3 { - web3_client(ethrpc, max_batch_size, max_concurrent_requests) -} - /// Builds a web3 client that sends requests one by one. -pub fn unbuffered_web3_client(ethrpc: &Url) -> Web3 { - web3_client(ethrpc, 0, 0) -} - -fn web3_client(ethrpc: &Url, max_batch_size: usize, max_concurrent_requests: usize) -> Web3 { - let ethrpc_args = shared::ethrpc::Arguments { - ethrpc_max_batch_size: max_batch_size, - ethrpc_max_concurrent_requests: max_concurrent_requests, - ethrpc_batch_delay: Default::default(), - }; - shared::ethrpc::web3(ðrpc_args, ethrpc, "base") +pub fn unbuffered_web3(ethrpc: &Url) -> Web3 { + shared::web3::web3( + &shared::web3::Arguments { + ethrpc_max_batch_size: 0, + ethrpc_max_concurrent_requests: 0, + ethrpc_batch_delay: Default::default(), + }, + ethrpc, + "base", + ) } diff --git a/crates/driver/src/domain/competition/mod.rs b/crates/driver/src/domain/competition/mod.rs index 5c51e38bb1..afe65a25d3 100644 --- a/crates/driver/src/domain/competition/mod.rs +++ b/crates/driver/src/domain/competition/mod.rs @@ -19,8 +19,9 @@ use { simulator::{RevertError, SimulatorError}, solver::{self, SolutionMerging, Solver}, }, - util::{Bytes, math}, + util::math, }, + alloy::primitives::Bytes, futures::{StreamExt, future::Either, stream::FuturesUnordered}, hyper::body::Bytes as RequestBytes, itertools::Itertools, @@ -851,22 +852,22 @@ pub struct PriceLimits { #[derive(Debug)] pub struct Revealed { /// The internalized calldata is the final calldata that appears onchain. - pub internalized_calldata: Bytes>, + pub internalized_calldata: Bytes, /// The uninternalized calldata must be known so that the CoW solver team /// can manually enforce certain rules which can not be enforced /// automatically. - pub uninternalized_calldata: Bytes>, + pub uninternalized_calldata: Bytes, } #[derive(Debug)] pub struct Settled { /// The transaction hash in which the solution was submitted. pub tx_hash: eth::TxId, - pub internalized_calldata: Bytes>, + pub internalized_calldata: Bytes, /// The uninternalized calldata must be known so that the CoW solver team /// can manually enforce certain rules which can not be enforced /// automatically. - pub uninternalized_calldata: Bytes>, + pub uninternalized_calldata: Bytes, } #[derive(Debug, thiserror::Error)] diff --git a/crates/driver/src/domain/competition/order/app_data.rs b/crates/driver/src/domain/competition/order/app_data.rs index 0134f331dc..d9c927457c 100644 --- a/crates/driver/src/domain/competition/order/app_data.rs +++ b/crates/driver/src/domain/competition/order/app_data.rs @@ -1,5 +1,5 @@ use { - crate::util::Bytes, + alloy::primitives::FixedBytes, anyhow::Context, app_data::AppDataDocument, derive_more::From, @@ -157,7 +157,7 @@ pub const APP_DATA_LEN: usize = 32; /// While this type holds the hash, the data itself is uploaded to IPFS. This /// hash is signed along with the order. #[derive(Debug, Default, Clone, Copy, Hash, PartialEq, Eq)] -pub struct AppDataHash(pub Bytes<[u8; APP_DATA_LEN]>); +pub struct AppDataHash(pub FixedBytes); impl From<[u8; APP_DATA_LEN]> for AppDataHash { fn from(inner: [u8; APP_DATA_LEN]) -> Self { diff --git a/crates/driver/src/domain/competition/order/mod.rs b/crates/driver/src/domain/competition/order/mod.rs index e0935a96eb..3a7c6e1b59 100644 --- a/crates/driver/src/domain/competition/order/mod.rs +++ b/crates/driver/src/domain/competition/order/mod.rs @@ -2,8 +2,9 @@ use { crate::{ domain::eth, infra::{Ethereum, blockchain}, - util::{self, Bytes}, + util, }, + alloy::primitives::FixedBytes, derive_more::{From, Into}, model::order::{BuyTokenDestination, SellTokenSource}, }; @@ -199,7 +200,7 @@ pub const UID_LEN: usize = 56; /// UID of an order. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] -pub struct Uid(pub Bytes<[u8; UID_LEN]>); +pub struct Uid(pub FixedBytes); impl From<&solvers_dto::solution::OrderUid> for Uid { fn from(value: &solvers_dto::solution::OrderUid) -> Self { @@ -213,7 +214,7 @@ impl Uid { bytes[0..32].copy_from_slice(order_hash.as_slice()); bytes[32..52].copy_from_slice(owner.as_slice()); bytes[52..56].copy_from_slice(&valid_to.to_be_bytes()); - Self(Bytes(bytes)) + Self(FixedBytes(bytes)) } /// Address that authorized the order. Sell tokens will be taken diff --git a/crates/driver/src/domain/competition/order/signature.rs b/crates/driver/src/domain/competition/order/signature.rs index 852f8b3606..acddc8a30e 100644 --- a/crates/driver/src/domain/competition/order/signature.rs +++ b/crates/driver/src/domain/competition/order/signature.rs @@ -1,13 +1,10 @@ -use { - crate::{domain::eth, util::Bytes}, - model::signature::EcdsaSignature, -}; +use {crate::domain::eth, alloy::primitives::Bytes, model::signature::EcdsaSignature}; /// Signature over the order data. #[derive(Debug, Clone)] pub struct Signature { pub scheme: Scheme, - pub data: Bytes>, + pub data: Bytes, /// The address used to sign and place this order. pub signer: eth::Address, } @@ -21,17 +18,14 @@ impl Signature { Ok(match self.scheme { Scheme::Eip712 => model::signature::Signature::Eip712(EcdsaSignature::from_bytes( self.data - .0 - .as_slice() - .try_into() - .map_err(|_| anyhow::anyhow!("ECDSA signature must be 65 bytes"))?, + .as_array() + .ok_or_else(|| anyhow::anyhow!("ECDSA signature must be 65 bytes"))?, )?), Scheme::EthSign => model::signature::Signature::EthSign(EcdsaSignature::from_bytes( self.data .0 - .as_slice() - .try_into() - .map_err(|_| anyhow::anyhow!("ECDSA signature must be 65 bytes"))?, + .as_array() + .ok_or_else(|| anyhow::anyhow!("ECDSA signature must be 65 bytes"))?, )?), Scheme::Eip1271 => model::signature::Signature::Eip1271(self.data.clone().into()), Scheme::PreSign => model::signature::Signature::PreSign, diff --git a/crates/driver/src/domain/competition/pre_processing.rs b/crates/driver/src/domain/competition/pre_processing.rs index bc67effeb0..ce18e013ee 100644 --- a/crates/driver/src/domain/competition/pre_processing.rs +++ b/crates/driver/src/domain/competition/pre_processing.rs @@ -8,8 +8,8 @@ use { liquidity, }, infra::{self, api::routes::solve::dto::SolveRequest, observe::metrics, tokens}, - util::Bytes, }, + alloy::primitives::{Bytes, FixedBytes}, anyhow::{Context, Result}, chrono::Utc, futures::{FutureExt, StreamExt, future::BoxFuture, stream::FuturesUnordered}, @@ -287,7 +287,7 @@ impl Utilities { .map(|i| InteractionData { target: i.target, value: i.value.0, - call_data: i.call_data.0.clone(), + call_data: i.call_data.0.to_vec(), }) .collect() } else { @@ -460,7 +460,8 @@ impl Utilities { }, kind: order::Kind::Limit, side: template.order.kind.into(), - app_data: order::app_data::AppDataHash(Bytes(template.order.app_data.0)).into(), + app_data: order::app_data::AppDataHash(FixedBytes(template.order.app_data.0)) + .into(), buy_token_balance: template.order.buy_token_balance.into(), sell_token_balance: template.order.sell_token_balance.into(), partial: match template.order.partially_fillable { @@ -485,7 +486,7 @@ impl Utilities { signature: match template.signature { Signature::Eip1271(bytes) => order::Signature { scheme: order::signature::Scheme::Eip1271, - data: Bytes(bytes), + data: Bytes::from(bytes), signer: amm, }, _ => { diff --git a/crates/driver/src/domain/competition/risk_detector/bad_tokens/simulation.rs b/crates/driver/src/domain/competition/risk_detector/bad_tokens/simulation.rs index 9392ce806e..f041b7c417 100644 --- a/crates/driver/src/domain/competition/risk_detector/bad_tokens/simulation.rs +++ b/crates/driver/src/domain/competition/risk_detector/bad_tokens/simulation.rs @@ -67,7 +67,7 @@ impl Detector { .map(|i| InteractionData { target: i.target, value: i.value.0, - call_data: i.call_data.0.clone(), + call_data: i.call_data.to_vec(), }) .collect(); let trader = order.trader().0; diff --git a/crates/driver/src/domain/competition/solution/encoding.rs b/crates/driver/src/domain/competition/solution/encoding.rs index 5ac39b6037..55e1128940 100644 --- a/crates/driver/src/domain/competition/solution/encoding.rs +++ b/crates/driver/src/domain/competition/solution/encoding.rs @@ -10,11 +10,10 @@ use { liquidity, }, infra::{self, solver::ManageNativeToken}, - util::Bytes, }, allowance::Allowance, alloy::{ - primitives::{Address, U256}, + primitives::{Address, Bytes, FixedBytes, U256}, sol_types::SolCall, }, contracts::alloy::{FlashLoanRouter::LoanRequest, WETH9}, @@ -399,11 +398,11 @@ struct Trade { sell_amount: eth::U256, buy_amount: eth::U256, valid_to: u32, - app_data: Bytes<[u8; 32]>, + app_data: FixedBytes<32>, fee_amount: eth::U256, flags: Flags, executed_amount: eth::U256, - signature: Bytes>, + signature: Bytes, } struct Price { @@ -424,7 +423,7 @@ struct Flags { pub mod codec { use { crate::domain::{competition::order, eth}, - alloy::primitives::U256, + alloy::primitives::{Bytes, U256}, contracts::alloy::GPv2Settlement, }; @@ -485,16 +484,14 @@ pub mod codec { } } - pub fn signature(signature: &order::Signature) -> super::Bytes> { + pub fn signature(signature: &order::Signature) -> Bytes { match signature.scheme { order::signature::Scheme::Eip712 | order::signature::Scheme::EthSign => { signature.data.clone() } - order::signature::Scheme::Eip1271 => { - [signature.signer.as_slice(), signature.data.0.as_slice()] - .concat() - .into() - } + order::signature::Scheme::Eip1271 => [signature.signer.as_slice(), &signature.data] + .concat() + .into(), order::signature::Scheme::PreSign => signature.signer.to_vec().into(), } } @@ -517,7 +514,7 @@ mod test { address!("C02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"), ); assert_eq!( - interaction.call_data.0.as_slice(), + interaction.call_data.as_ref(), hex!( "095ea7b3000000000000000000000000000000000022d473030f116ddee9f6b43ac78ba3ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" ) diff --git a/crates/driver/src/domain/competition/solution/interaction.rs b/crates/driver/src/domain/competition/solution/interaction.rs index b7fc594c78..25468f39b0 100644 --- a/crates/driver/src/domain/competition/solution/interaction.rs +++ b/crates/driver/src/domain/competition/solution/interaction.rs @@ -1,6 +1,6 @@ -use crate::{ - domain::{self, eth, liquidity}, - util::Bytes, +use { + crate::domain::{self, eth, liquidity}, + alloy::primitives::Bytes, }; /// Interaction with a smart contract which is needed to execute this solution @@ -67,7 +67,7 @@ impl Interaction { pub struct Custom { pub target: eth::ContractAddress, pub value: eth::Ether, - pub call_data: Bytes>, + pub call_data: Bytes, pub allowances: Vec, /// See the [`Interaction::inputs`] method. pub inputs: Vec, diff --git a/crates/driver/src/domain/eth/mod.rs b/crates/driver/src/domain/eth/mod.rs index 53a72b84c4..c2f4f85756 100644 --- a/crates/driver/src/domain/eth/mod.rs +++ b/crates/driver/src/domain/eth/mod.rs @@ -1,6 +1,5 @@ use { - crate::util::Bytes, - alloy::rpc::types::TransactionRequest, + alloy::{primitives::Bytes, rpc::types::TransactionRequest}, derive_more::{From, Into}, number::u256_ext::U256Ext, solvers_dto::auction::FlashloanHint, @@ -327,7 +326,7 @@ pub struct BlockNo(pub u64); pub struct Interaction { pub target: Address, pub value: Ether, - pub call_data: Bytes>, + pub call_data: Bytes, } impl From for model::interaction::InteractionData { @@ -335,7 +334,7 @@ impl From for model::interaction::InteractionData { Self { target: interaction.target, value: interaction.value.0, - call_data: interaction.call_data.0, + call_data: interaction.call_data.to_vec(), } } } @@ -369,7 +368,7 @@ pub struct Tx { pub from: Address, pub to: Address, pub value: Ether, - pub input: Bytes>, + pub input: Bytes, #[debug(ignore)] pub access_list: AccessList, } @@ -380,7 +379,7 @@ impl From for TransactionRequest { .from(value.from) .to(value.to) .value(value.value.0) - .input(value.input.0.into()) + .input(value.input.into()) .access_list(value.access_list.into()) } } diff --git a/crates/driver/src/infra/api/routes/quote/dto/order.rs b/crates/driver/src/infra/api/routes/quote/dto/order.rs index c893e0f901..be8ef90319 100644 --- a/crates/driver/src/infra/api/routes/quote/dto/order.rs +++ b/crates/driver/src/infra/api/routes/quote/dto/order.rs @@ -1,8 +1,5 @@ use { - crate::{ - domain::{competition, eth, quote}, - util::serialize, - }, + crate::domain::{competition, eth, quote}, serde::Deserialize, serde_with::serde_as, }; @@ -27,7 +24,7 @@ impl Order { pub struct Order { sell_token: eth::Address, buy_token: eth::Address, - #[serde_as(as = "serialize::U256")] + #[serde_as(as = "serde_ext::U256")] amount: eth::U256, kind: Kind, deadline: chrono::DateTime, diff --git a/crates/driver/src/infra/api/routes/quote/dto/quote.rs b/crates/driver/src/infra/api/routes/quote/dto/quote.rs index ebc82de552..2b5b1038de 100644 --- a/crates/driver/src/infra/api/routes/quote/dto/quote.rs +++ b/crates/driver/src/infra/api/routes/quote/dto/quote.rs @@ -1,8 +1,5 @@ use { - crate::{ - domain::{self, competition::solution::encoding::codec, eth, quote}, - util::serialize, - }, + crate::domain::{self, competition::solution::encoding::codec, eth, quote}, model::{ order::{BuyTokenDestination, SellTokenSource}, signature::SigningScheme, @@ -34,7 +31,7 @@ impl Quote { #[derive(Debug, Serialize)] #[serde(rename_all = "camelCase")] pub struct Quote { - #[serde_as(as = "HashMap<_, serialize::U256>")] + #[serde_as(as = "HashMap<_, serde_ext::U256>")] clearing_prices: HashMap, pre_interactions: Vec, interactions: Vec, @@ -51,9 +48,9 @@ pub struct Quote { #[serde(rename_all = "camelCase")] struct Interaction { target: eth::Address, - #[serde_as(as = "serialize::U256")] + #[serde_as(as = "serde_ext::U256")] value: eth::U256, - #[serde_as(as = "serialize::Hex")] + #[serde_as(as = "serde_ext::Hex")] call_data: Vec, } @@ -73,21 +70,21 @@ impl From for Interaction { struct JitOrder { buy_token: eth::Address, sell_token: eth::Address, - #[serde_as(as = "serialize::U256")] + #[serde_as(as = "serde_ext::U256")] sell_amount: eth::U256, - #[serde_as(as = "serialize::U256")] + #[serde_as(as = "serde_ext::U256")] buy_amount: eth::U256, - #[serde_as(as = "serialize::U256")] + #[serde_as(as = "serde_ext::U256")] executed_amount: eth::U256, receiver: eth::Address, partially_fillable: bool, valid_to: u32, - #[serde_as(as = "serialize::Hex")] + #[serde_as(as = "serde_ext::Hex")] app_data: [u8; 32], side: Side, sell_token_source: SellTokenSource, buy_token_destination: BuyTokenDestination, - #[serde_as(as = "serialize::Hex")] + #[serde_as(as = "serde_ext::Hex")] signature: Vec, signing_scheme: SigningScheme, } diff --git a/crates/driver/src/infra/api/routes/reveal/dto/reveal_response.rs b/crates/driver/src/infra/api/routes/reveal/dto/reveal_response.rs index 0f1d495042..bc5053a4d5 100644 --- a/crates/driver/src/infra/api/routes/reveal/dto/reveal_response.rs +++ b/crates/driver/src/infra/api/routes/reveal/dto/reveal_response.rs @@ -1,8 +1,4 @@ -use { - crate::{domain::competition, util::serialize}, - serde::Serialize, - serde_with::serde_as, -}; +use {crate::domain::competition, serde::Serialize, serde_with::serde_as}; impl RevealResponse { pub fn new(reveal: competition::Revealed) -> Self { @@ -26,8 +22,8 @@ pub struct RevealResponse { #[derive(Debug, Serialize)] #[serde(rename_all = "camelCase")] struct Calldata { - #[serde_as(as = "serialize::Hex")] + #[serde_as(as = "serde_ext::Hex")] internalized: Vec, - #[serde_as(as = "serialize::Hex")] + #[serde_as(as = "serde_ext::Hex")] uninternalized: Vec, } diff --git a/crates/driver/src/infra/api/routes/solve/dto/solve_request.rs b/crates/driver/src/infra/api/routes/solve/dto/solve_request.rs index 5d534e6cd4..4818112e88 100644 --- a/crates/driver/src/infra/api/routes/solve/dto/solve_request.rs +++ b/crates/driver/src/infra/api/routes/solve/dto/solve_request.rs @@ -12,7 +12,6 @@ use { eth, }, infra::{Ethereum, tokens}, - util::serialize, }, serde::Deserialize, serde_with::serde_as, @@ -230,7 +229,7 @@ impl SolveRequest { #[serde(rename_all = "camelCase")] struct Token { pub address: eth::Address, - #[serde_as(as = "Option")] + #[serde_as(as = "Option")] pub price: Option, pub trusted: bool, } @@ -239,13 +238,13 @@ struct Token { #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] struct Order { - #[serde_as(as = "serialize::Hex")] + #[serde_as(as = "serde_ext::Hex")] uid: [u8; order::UID_LEN], sell_token: eth::Address, buy_token: eth::Address, - #[serde_as(as = "serialize::U256")] + #[serde_as(as = "serde_ext::U256")] sell_amount: eth::U256, - #[serde_as(as = "serialize::U256")] + #[serde_as(as = "serde_ext::U256")] buy_amount: eth::U256, protocol_fees: Vec, created: u32, @@ -255,7 +254,7 @@ struct Order { owner: eth::Address, partially_fillable: bool, /// Always zero if the order is not partially fillable. - #[serde_as(as = "serialize::U256")] + #[serde_as(as = "serde_ext::U256")] executed: eth::U256, pre_interactions: Vec, post_interactions: Vec, @@ -264,10 +263,10 @@ struct Order { #[serde(default)] buy_token_balance: BuyTokenBalance, class: Class, - #[serde_as(as = "serialize::Hex")] + #[serde_as(as = "serde_ext::Hex")] app_data: [u8; order::app_data::APP_DATA_LEN], signing_scheme: SigningScheme, - #[serde_as(as = "serialize::Hex")] + #[serde_as(as = "serde_ext::Hex")] signature: Vec, quote: Option, } @@ -284,9 +283,9 @@ enum Kind { #[serde(rename_all = "camelCase")] struct Interaction { target: eth::Address, - #[serde_as(as = "serialize::U256")] + #[serde_as(as = "serde_ext::U256")] value: eth::U256, - #[serde_as(as = "serialize::Hex")] + #[serde_as(as = "serde_ext::Hex")] call_data: Vec, } @@ -342,11 +341,11 @@ enum FeePolicy { #[derive(Debug, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Quote { - #[serde_as(as = "serialize::U256")] + #[serde_as(as = "serde_ext::U256")] pub sell_amount: eth::U256, - #[serde_as(as = "serialize::U256")] + #[serde_as(as = "serde_ext::U256")] pub buy_amount: eth::U256, - #[serde_as(as = "serialize::U256")] + #[serde_as(as = "serde_ext::U256")] pub fee: eth::U256, pub solver: eth::Address, } diff --git a/crates/driver/src/infra/api/routes/solve/dto/solve_response.rs b/crates/driver/src/infra/api/routes/solve/dto/solve_response.rs index 4fde32eeed..c7777f950e 100644 --- a/crates/driver/src/infra/api/routes/solve/dto/solve_response.rs +++ b/crates/driver/src/infra/api/routes/solve/dto/solve_response.rs @@ -2,7 +2,6 @@ use { crate::{ domain::{competition, competition::order, eth}, infra::Solver, - util::serialize, }, serde::Serialize, serde_with::serde_as, @@ -72,11 +71,11 @@ pub struct Solution { /// in subsequent requests (reveal, settle). solution_id: u64, submission_address: eth::Address, - #[serde_as(as = "serialize::U256")] + #[serde_as(as = "serde_ext::U256")] score: eth::U256, - #[serde_as(as = "HashMap")] + #[serde_as(as = "HashMap")] orders: HashMap, - #[serde_as(as = "HashMap<_, serialize::U256>")] + #[serde_as(as = "HashMap<_, serde_ext::U256>")] clearing_prices: HashMap, } @@ -87,17 +86,17 @@ pub struct TradedOrder { pub side: Side, pub sell_token: eth::Address, pub buy_token: eth::Address, - #[serde_as(as = "serialize::U256")] + #[serde_as(as = "serde_ext::U256")] /// Sell limit order amount. pub limit_sell: eth::U256, - #[serde_as(as = "serialize::U256")] + #[serde_as(as = "serde_ext::U256")] /// Buy limit order amount. pub limit_buy: eth::U256, /// The effective amount that left the user's wallet including all fees. - #[serde_as(as = "serialize::U256")] + #[serde_as(as = "serde_ext::U256")] pub executed_sell: eth::U256, /// The effective amount the user received after all fees. - #[serde_as(as = "serialize::U256")] + #[serde_as(as = "serde_ext::U256")] pub executed_buy: eth::U256, } diff --git a/crates/driver/src/infra/blockchain/mod.rs b/crates/driver/src/infra/blockchain/mod.rs index ecdb544e7a..1244567c6e 100644 --- a/crates/driver/src/infra/blockchain/mod.rs +++ b/crates/driver/src/infra/blockchain/mod.rs @@ -20,6 +20,7 @@ use { BalanceOverrides, BalanceOverriding, }, + web3, }, std::{fmt, sync::Arc}, thiserror::Error, @@ -49,10 +50,14 @@ impl Rpc { /// Instantiate an RPC client to an Ethereum (or Ethereum-compatible) node /// at the specifed URL. pub async fn try_new(args: RpcArgs) -> Result { - let web3 = boundary::buffered_web3_client( + let web3 = web3::web3( + &web3::Arguments { + ethrpc_max_batch_size: args.max_batch_size, + ethrpc_max_concurrent_requests: args.max_concurrent_requests, + ethrpc_batch_delay: Default::default(), + }, &args.url, - args.max_batch_size, - args.max_concurrent_requests, + "base", ); let chain = Chain::try_from(web3.provider.get_chain_id().await?)?; @@ -212,7 +217,7 @@ impl Ethereum { .from(tx.from) .to(tx.to) .value(tx.value.0) - .input(tx.input.0.into()) + .input(tx.input.into()) .access_list(tx.access_list.into()); let tx = match self.simulation_gas_price().await { diff --git a/crates/driver/src/infra/config/file/mod.rs b/crates/driver/src/infra/config/file/mod.rs index bf12ac0895..8f94ba854e 100644 --- a/crates/driver/src/infra/config/file/mod.rs +++ b/crates/driver/src/infra/config/file/mod.rs @@ -1,6 +1,6 @@ pub use load::load; use { - crate::{domain::eth, infra, util::serialize}, + crate::{domain::eth, infra}, alloy::{eips::BlockNumberOrTag, primitives::Address}, number::serialization::HexOrDecimalU256, reqwest::Url, @@ -34,7 +34,7 @@ struct Config { /// Disable gas simulation and always use this fixed gas value instead. This /// can be useful for testing, but shouldn't be used in production since it /// will cause the driver to return invalid scores. - #[serde_as(as = "Option")] + #[serde_as(as = "Option")] disable_gas_simulation: Option, /// Defines the gas estimator to use. @@ -99,13 +99,13 @@ struct SubmissionConfig { /// The minimum priority fee in Gwei the solver is ensuring to pay in a /// settlement. #[serde(default)] - #[serde_as(as = "serialize::U256")] + #[serde_as(as = "serde_ext::U256")] min_priority_fee: eth::U256, /// The maximum gas price in Gwei the solver is willing to pay in a /// settlement. #[serde(default = "default_gas_price_cap")] - #[serde_as(as = "serialize::U256")] + #[serde_as(as = "serde_ext::U256")] gas_price_cap: eth::U256, /// The target confirmation time for settlement transactions used @@ -159,7 +159,7 @@ struct Mempool { /// Maximum additional tip in Gwei that we are willing to give to /// the validator above regular gas price estimation. #[serde(default = "default_max_additional_tip")] - #[serde_as(as = "serialize::U256")] + #[serde_as(as = "serde_ext::U256")] max_additional_tip: eth::U256, /// Additional tip in percentage of max_fee_per_gas we are giving to /// validator above regular gas price estimation. Expects a @@ -377,7 +377,7 @@ struct Slippage { /// The absolute slippage allowed by the solver. #[serde(rename = "absolute-slippage")] - #[serde_as(as = "Option")] + #[serde_as(as = "Option")] absolute: Option, } diff --git a/crates/driver/src/infra/mempool/mod.rs b/crates/driver/src/infra/mempool/mod.rs index 09ff772e3a..8b7b39646e 100644 --- a/crates/driver/src/infra/mempool/mod.rs +++ b/crates/driver/src/infra/mempool/mod.rs @@ -1,6 +1,6 @@ use { crate::{ - boundary::unbuffered_web3_client, + boundary::{Web3, unbuffered_web3}, domain::{eth, mempools}, infra::{self, solver::Account}, }, @@ -13,7 +13,6 @@ use { }, anyhow::Context, dashmap::DashMap, - ethrpc::Web3, std::sync::Arc, url::Url, }; @@ -84,7 +83,7 @@ impl std::fmt::Display for Mempool { impl Mempool { pub fn new(config: Config, solver_accounts: Vec) -> Self { - let transport = unbuffered_web3_client(&config.url); + let transport = unbuffered_web3(&config.url); // Register the solver accounts into the wallet to submit txs on their behalf for account in solver_accounts { transport.wallet.register_signer(account); @@ -137,7 +136,7 @@ impl Mempool { .max_fee_per_gas(max_fee_per_gas) .max_priority_fee_per_gas(max_priority_fee_per_gas) .gas_limit(gas_limit) - .input(tx.input.0.into()) + .input(tx.input.into()) .value(tx.value.0) .access_list(tx.access_list.into()); diff --git a/crates/driver/src/infra/notify/liquidity_sources/liquorice/notifier.rs b/crates/driver/src/infra/notify/liquidity_sources/liquorice/notifier.rs index 9818d2d366..9ea3292280 100644 --- a/crates/driver/src/infra/notify/liquidity_sources/liquorice/notifier.rs +++ b/crates/driver/src/infra/notify/liquidity_sources/liquorice/notifier.rs @@ -163,9 +163,8 @@ mod utils { crate::{ domain::eth, infra::notify::liquidity_sources::liquorice::notifier::utils::extract_rfq_id_from_interaction, - util::Bytes, }, - alloy::primitives::Address, + alloy::primitives::{Address, Bytes}, }; #[test] @@ -176,7 +175,7 @@ mod utils { let rfq_id = extract_rfq_id_from_interaction( ð::Interaction { target: liquorice_settlement_address, - call_data: Bytes(calldata), + call_data: calldata.into(), value: 0.into(), }, liquorice_settlement_address, @@ -191,7 +190,7 @@ mod utils { let rfq_id = extract_rfq_id_from_interaction( ð::Interaction { target: liquorice_settlement_address, - call_data: Bytes(vec![]), + call_data: Bytes::new(), value: 0.into(), }, liquorice_settlement_address, @@ -205,7 +204,7 @@ mod utils { let rfq_id = extract_rfq_id_from_interaction( ð::Interaction { target: Address::random(), - call_data: Bytes(vec![]), + call_data: Bytes::new(), value: 0.into(), }, Address::random(), diff --git a/crates/driver/src/infra/simulator/enso/dto.rs b/crates/driver/src/infra/simulator/enso/dto.rs index 141390a089..4edf5bc511 100644 --- a/crates/driver/src/infra/simulator/enso/dto.rs +++ b/crates/driver/src/infra/simulator/enso/dto.rs @@ -1,7 +1,7 @@ //! Data transfer objects for interacting with the Enso Trade Simulator API. use { - crate::{domain::eth, util::serialize}, + crate::domain::eth, alloy::rpc::types::AccessList, serde::{Deserialize, Serialize}, serde_with::serde_as, @@ -14,7 +14,7 @@ pub struct Request { pub chain_id: u64, pub from: eth::Address, pub to: eth::Address, - #[serde_as(as = "serialize::Hex")] + #[serde_as(as = "serde_ext::Hex")] pub data: Vec, pub value: eth::U256, pub gas_limit: u64, @@ -34,6 +34,6 @@ pub struct Response { pub block_number: u64, pub success: bool, pub exit_reason: String, - #[serde_as(as = "serialize::Hex")] + #[serde_as(as = "serde_ext::Hex")] pub return_data: Vec, } diff --git a/crates/driver/src/infra/simulator/tenderly/dto.rs b/crates/driver/src/infra/simulator/tenderly/dto.rs index 4e8f70ffa8..d697bd2809 100644 --- a/crates/driver/src/infra/simulator/tenderly/dto.rs +++ b/crates/driver/src/infra/simulator/tenderly/dto.rs @@ -1,7 +1,7 @@ //! Data transfer objects for interacting with the Tenderly API. use { - crate::{domain::eth, util::serialize}, + crate::domain::eth, serde::{Deserialize, Serialize}, serde_with::serde_as, }; @@ -12,7 +12,7 @@ pub struct Request { pub network_id: String, pub from: eth::Address, pub to: eth::Address, - #[serde_as(as = "serialize::Hex")] + #[serde_as(as = "serde_ext::Hex")] pub input: Vec, pub value: eth::U256, pub save: bool, diff --git a/crates/driver/src/infra/solver/dto/auction.rs b/crates/driver/src/infra/solver/dto/auction.rs index 6ef1963c2f..6bc212740c 100644 --- a/crates/driver/src/infra/solver/dto/auction.rs +++ b/crates/driver/src/infra/solver/dto/auction.rs @@ -361,7 +361,7 @@ fn interaction_from_domain(value: eth::Interaction) -> solvers_dto::auction::Int solvers_dto::auction::InteractionData { target: value.target, value: value.value.0, - call_data: value.call_data.0, + call_data: value.call_data.to_vec(), } } diff --git a/crates/driver/src/infra/solver/dto/solution.rs b/crates/driver/src/infra/solver/dto/solution.rs index a3794444a5..7b8f627fbd 100644 --- a/crates/driver/src/infra/solver/dto/solution.rs +++ b/crates/driver/src/infra/solver/dto/solution.rs @@ -6,8 +6,8 @@ use { liquidity, }, infra::Solver, - util::Bytes, }, + alloy::primitives::Bytes, app_data::AppDataHash, itertools::Itertools, model::{ @@ -150,7 +150,7 @@ impl Solutions { .map(|interaction| eth::Interaction { target: interaction.target, value: interaction.value.into(), - call_data: Bytes(interaction.calldata), + call_data: Bytes::from(interaction.calldata), }) .collect(), solution @@ -227,7 +227,7 @@ impl Solutions { .map(|interaction| eth::Interaction { target: interaction.target, value: interaction.value.into(), - call_data: Bytes(interaction.calldata), + call_data: interaction.calldata.into(), }) .collect(), solver.clone(), @@ -345,7 +345,7 @@ impl JitOrder { // signature bytes. This leads to the owner being encoded twice in // the final settlement calldata unless we remove that from the raw // data. - signature.data = Bytes(self.0.signature[20..].to_vec()); + signature.data = Bytes::copy_from_slice(&self.0.signature[20..]); } signature.signer = signer; diff --git a/crates/driver/src/util/mod.rs b/crates/driver/src/util/mod.rs index 35fcc22956..38aff00c0d 100644 --- a/crates/driver/src/util/mod.rs +++ b/crates/driver/src/util/mod.rs @@ -1,8 +1,6 @@ -mod bytes; pub mod http; pub mod math; mod percent; -pub mod serialize; mod time; -pub use {bytes::Bytes, percent::Percent, time::Timestamp}; +pub use {percent::Percent, time::Timestamp}; diff --git a/crates/e2e/src/setup/deploy.rs b/crates/e2e/src/setup/deploy.rs index f6963d31fb..83e3c1589a 100644 --- a/crates/e2e/src/setup/deploy.rs +++ b/crates/e2e/src/setup/deploy.rs @@ -18,7 +18,7 @@ use { }, ethrpc::alloy::CallBuilderExt, model::DomainSeparator, - shared::ethrpc::Web3, + shared::web3::Web3, }; #[derive(Default)] diff --git a/crates/e2e/src/setup/onchain_components/mod.rs b/crates/e2e/src/setup/onchain_components/mod.rs index 1524b8ef59..c5080fc3e9 100644 --- a/crates/e2e/src/setup/onchain_components/mod.rs +++ b/crates/e2e/src/setup/onchain_components/mod.rs @@ -23,7 +23,7 @@ use { signature::{EcdsaSignature, EcdsaSigningScheme}, }, number::units::EthUnit, - shared::ethrpc::Web3, + shared::web3::Web3, std::{borrow::BorrowMut, ops::Deref}, }; diff --git a/crates/e2e/src/setup/services.rs b/crates/e2e/src/setup/services.rs index 74226f468f..49ee87202c 100644 --- a/crates/e2e/src/setup/services.rs +++ b/crates/e2e/src/setup/services.rs @@ -26,7 +26,7 @@ use { trade::Trade, }, reqwest::{Client, StatusCode, Url}, - shared::ethrpc::Web3, + shared::web3::Web3, sqlx::Connection, std::{ collections::{HashMap, hash_map::Entry}, diff --git a/crates/e2e/tests/e2e/api_version.rs b/crates/e2e/tests/e2e/api_version.rs index fd1998ca57..8cf68b94bc 100644 --- a/crates/e2e/tests/e2e/api_version.rs +++ b/crates/e2e/tests/e2e/api_version.rs @@ -1,4 +1,4 @@ -use {e2e::setup::*, number::units::EthUnit, shared::ethrpc::Web3}; +use {e2e::setup::*, number::units::EthUnit, shared::web3::Web3}; #[tokio::test] #[ignore] diff --git a/crates/e2e/tests/e2e/app_data.rs b/crates/e2e/tests/e2e/app_data.rs index 512c0d9d59..cd60790f08 100644 --- a/crates/e2e/tests/e2e/app_data.rs +++ b/crates/e2e/tests/e2e/app_data.rs @@ -9,7 +9,7 @@ use { }, number::units::EthUnit, reqwest::StatusCode, - shared::ethrpc::Web3, + shared::web3::Web3, std::str::FromStr, }; diff --git a/crates/e2e/tests/e2e/app_data_signer.rs b/crates/e2e/tests/e2e/app_data_signer.rs index 3630647dd2..b08811dd32 100644 --- a/crates/e2e/tests/e2e/app_data_signer.rs +++ b/crates/e2e/tests/e2e/app_data_signer.rs @@ -7,7 +7,7 @@ use { signature::EcdsaSigningScheme, }, number::units::EthUnit, - shared::ethrpc::Web3, + shared::web3::Web3, }; #[tokio::test] diff --git a/crates/e2e/tests/e2e/buffers.rs b/crates/e2e/tests/e2e/buffers.rs index 6d3736e420..e22e401ca7 100644 --- a/crates/e2e/tests/e2e/buffers.rs +++ b/crates/e2e/tests/e2e/buffers.rs @@ -7,7 +7,7 @@ use { signature::EcdsaSigningScheme, }, number::units::EthUnit, - shared::ethrpc::Web3, + shared::web3::Web3, }; #[tokio::test] diff --git a/crates/e2e/tests/e2e/cors.rs b/crates/e2e/tests/e2e/cors.rs index 540385d082..06af718024 100644 --- a/crates/e2e/tests/e2e/cors.rs +++ b/crates/e2e/tests/e2e/cors.rs @@ -4,7 +4,7 @@ use { e2e::setup::{API_HOST, OnchainComponents, Services, run_test}, reqwest::{Method, StatusCode}, - shared::ethrpc::Web3, + shared::web3::Web3, }; #[tokio::test] diff --git a/crates/e2e/tests/e2e/cow_amm.rs b/crates/e2e/tests/e2e/cow_amm.rs index 0cb0cf730d..7faa156388 100644 --- a/crates/e2e/tests/e2e/cow_amm.rs +++ b/crates/e2e/tests/e2e/cow_amm.rs @@ -29,7 +29,7 @@ use { signature::EcdsaSigningScheme, }, number::units::EthUnit, - shared::ethrpc::Web3, + shared::web3::Web3, solvers_dto::solution::{ BuyTokenBalance, Call, diff --git a/crates/e2e/tests/e2e/deprecated_endpoints.rs b/crates/e2e/tests/e2e/deprecated_endpoints.rs index 757d31cbde..f99788e04c 100644 --- a/crates/e2e/tests/e2e/deprecated_endpoints.rs +++ b/crates/e2e/tests/e2e/deprecated_endpoints.rs @@ -8,7 +8,7 @@ use { signature::EcdsaSigningScheme, }, number::units::EthUnit, - shared::ethrpc::Web3, + shared::web3::Web3, }; #[tokio::test] diff --git a/crates/e2e/tests/e2e/eth_integration.rs b/crates/e2e/tests/e2e/eth_integration.rs index 1a0417ddb2..fcd368ba7c 100644 --- a/crates/e2e/tests/e2e/eth_integration.rs +++ b/crates/e2e/tests/e2e/eth_integration.rs @@ -8,7 +8,7 @@ use { signature::EcdsaSigningScheme, }, number::{nonzero::NonZeroU256, units::EthUnit}, - shared::ethrpc::Web3, + shared::web3::Web3, }; #[tokio::test] diff --git a/crates/e2e/tests/e2e/eth_safe.rs b/crates/e2e/tests/e2e/eth_safe.rs index 3925e8f60f..5b9cd7aa32 100644 --- a/crates/e2e/tests/e2e/eth_safe.rs +++ b/crates/e2e/tests/e2e/eth_safe.rs @@ -7,7 +7,7 @@ use { signature::{Signature, hashed_eip712_message}, }, number::units::EthUnit, - shared::ethrpc::Web3, + shared::web3::Web3, }; #[tokio::test] diff --git a/crates/e2e/tests/e2e/hooks.rs b/crates/e2e/tests/e2e/hooks.rs index 39696c2904..b88eedec5e 100644 --- a/crates/e2e/tests/e2e/hooks.rs +++ b/crates/e2e/tests/e2e/hooks.rs @@ -22,7 +22,7 @@ use { number::{nonzero::NonZeroU256, units::EthUnit}, reqwest::StatusCode, serde_json::json, - shared::ethrpc::Web3, + shared::web3::Web3, }; #[tokio::test] diff --git a/crates/e2e/tests/e2e/jit_orders.rs b/crates/e2e/tests/e2e/jit_orders.rs index 0c7968ab49..b93218458a 100644 --- a/crates/e2e/tests/e2e/jit_orders.rs +++ b/crates/e2e/tests/e2e/jit_orders.rs @@ -7,7 +7,7 @@ use { signature::EcdsaSigningScheme, }, number::units::EthUnit, - shared::ethrpc::Web3, + shared::web3::Web3, solvers_dto::solution::{Asset, Solution}, std::collections::HashMap, }; diff --git a/crates/e2e/tests/e2e/limit_orders.rs b/crates/e2e/tests/e2e/limit_orders.rs index 6417277b19..9777bb14b3 100644 --- a/crates/e2e/tests/e2e/limit_orders.rs +++ b/crates/e2e/tests/e2e/limit_orders.rs @@ -17,7 +17,7 @@ use { signature::EcdsaSigningScheme, }, number::{conversions::big_decimal_to_big_uint, units::EthUnit}, - shared::ethrpc::Web3, + shared::web3::Web3, std::{collections::HashMap, ops::DerefMut}, }; diff --git a/crates/e2e/tests/e2e/malformed_requests.rs b/crates/e2e/tests/e2e/malformed_requests.rs index fb9aff9df4..046f460c8b 100644 --- a/crates/e2e/tests/e2e/malformed_requests.rs +++ b/crates/e2e/tests/e2e/malformed_requests.rs @@ -6,7 +6,7 @@ use { orderbook::api::Error, reqwest::StatusCode, serde_json::json, - shared::ethrpc::Web3, + shared::web3::Web3, }; const VALID_ORDER_UID: &str = "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; diff --git a/crates/e2e/tests/e2e/order_cancellation.rs b/crates/e2e/tests/e2e/order_cancellation.rs index d9c6c84d37..a2dd9a5cdf 100644 --- a/crates/e2e/tests/e2e/order_cancellation.rs +++ b/crates/e2e/tests/e2e/order_cancellation.rs @@ -19,7 +19,7 @@ use { }, number::{nonzero::NonZeroU256, units::EthUnit}, serde_json::json, - shared::ethrpc::Web3, + shared::web3::Web3, }; #[tokio::test] diff --git a/crates/e2e/tests/e2e/partial_fill.rs b/crates/e2e/tests/e2e/partial_fill.rs index eeb97a3551..7da2b5c875 100644 --- a/crates/e2e/tests/e2e/partial_fill.rs +++ b/crates/e2e/tests/e2e/partial_fill.rs @@ -8,7 +8,7 @@ use { }, number::units::EthUnit, orderbook::dto::order::Status, - shared::ethrpc::Web3, + shared::web3::Web3, }; #[tokio::test] diff --git a/crates/e2e/tests/e2e/partially_fillable_balance.rs b/crates/e2e/tests/e2e/partially_fillable_balance.rs index c9d9ce08d1..520ba5d8fd 100644 --- a/crates/e2e/tests/e2e/partially_fillable_balance.rs +++ b/crates/e2e/tests/e2e/partially_fillable_balance.rs @@ -7,7 +7,7 @@ use { signature::EcdsaSigningScheme, }, number::units::EthUnit, - shared::ethrpc::Web3, + shared::web3::Web3, }; #[tokio::test] diff --git a/crates/e2e/tests/e2e/partially_fillable_pool.rs b/crates/e2e/tests/e2e/partially_fillable_pool.rs index 7898dabe1f..84e5c8a7ab 100644 --- a/crates/e2e/tests/e2e/partially_fillable_pool.rs +++ b/crates/e2e/tests/e2e/partially_fillable_pool.rs @@ -7,7 +7,7 @@ use { signature::EcdsaSigningScheme, }, number::units::EthUnit, - shared::ethrpc::Web3, + shared::web3::Web3, }; #[tokio::test] #[ignore] diff --git a/crates/e2e/tests/e2e/place_order_with_quote.rs b/crates/e2e/tests/e2e/place_order_with_quote.rs index 07bef2aa6c..47d7808297 100644 --- a/crates/e2e/tests/e2e/place_order_with_quote.rs +++ b/crates/e2e/tests/e2e/place_order_with_quote.rs @@ -9,7 +9,7 @@ use { signature::EcdsaSigningScheme, }, number::units::EthUnit, - shared::ethrpc::Web3, + shared::web3::Web3, std::ops::DerefMut, }; diff --git a/crates/e2e/tests/e2e/protocol_fee.rs b/crates/e2e/tests/e2e/protocol_fee.rs index 7f12981675..42f34549b0 100644 --- a/crates/e2e/tests/e2e/protocol_fee.rs +++ b/crates/e2e/tests/e2e/protocol_fee.rs @@ -22,7 +22,7 @@ use { number::units::EthUnit, reqwest::StatusCode, serde_json::json, - shared::ethrpc::Web3, + shared::web3::Web3, }; #[tokio::test] diff --git a/crates/e2e/tests/e2e/quoting.rs b/crates/e2e/tests/e2e/quoting.rs index 6437a94ea2..c9839e8865 100644 --- a/crates/e2e/tests/e2e/quoting.rs +++ b/crates/e2e/tests/e2e/quoting.rs @@ -9,7 +9,7 @@ use { }, number::{nonzero::NonZeroU256, units::EthUnit}, serde_json::json, - shared::ethrpc::Web3, + shared::web3::Web3, std::{ sync::Arc, time::{Duration, Instant}, diff --git a/crates/e2e/tests/e2e/replace_order.rs b/crates/e2e/tests/e2e/replace_order.rs index c1c8d65cff..ddc9ab1812 100644 --- a/crates/e2e/tests/e2e/replace_order.rs +++ b/crates/e2e/tests/e2e/replace_order.rs @@ -12,7 +12,7 @@ use { orderbook::{OrderCancellationError, OrderReplacementError}, }, reqwest::StatusCode, - shared::ethrpc::Web3, + shared::web3::Web3, }; // Parse OrderReplacementError from HTTP response diff --git a/crates/e2e/tests/e2e/smart_contract_orders.rs b/crates/e2e/tests/e2e/smart_contract_orders.rs index bc1ca03996..3b70c85415 100644 --- a/crates/e2e/tests/e2e/smart_contract_orders.rs +++ b/crates/e2e/tests/e2e/smart_contract_orders.rs @@ -8,7 +8,7 @@ use { }, number::units::EthUnit, reqwest::StatusCode, - shared::ethrpc::Web3, + shared::web3::Web3, }; #[tokio::test] diff --git a/crates/e2e/tests/e2e/solver_competition.rs b/crates/e2e/tests/e2e/solver_competition.rs index dbe659be82..1b03cea819 100644 --- a/crates/e2e/tests/e2e/solver_competition.rs +++ b/crates/e2e/tests/e2e/solver_competition.rs @@ -7,7 +7,7 @@ use { signature::EcdsaSigningScheme, }, number::units::EthUnit, - shared::ethrpc::Web3, + shared::web3::Web3, solvers_dto::solution::Solution, std::collections::HashMap, }; diff --git a/crates/e2e/tests/e2e/submission.rs b/crates/e2e/tests/e2e/submission.rs index 562c7fd87a..02528fc5fb 100644 --- a/crates/e2e/tests/e2e/submission.rs +++ b/crates/e2e/tests/e2e/submission.rs @@ -16,7 +16,7 @@ use { signature::EcdsaSigningScheme, }, number::{nonzero::NonZeroU256, testing::ApproxEq, units::EthUnit}, - shared::ethrpc::Web3, + shared::web3::Web3, std::time::Duration, }; diff --git a/crates/e2e/tests/e2e/token_metadata.rs b/crates/e2e/tests/e2e/token_metadata.rs index f234e58b2e..b04c5ccb9e 100644 --- a/crates/e2e/tests/e2e/token_metadata.rs +++ b/crates/e2e/tests/e2e/token_metadata.rs @@ -7,7 +7,7 @@ use { signature::EcdsaSigningScheme, }, number::units::EthUnit, - shared::ethrpc::Web3, + shared::web3::Web3, }; #[tokio::test] diff --git a/crates/e2e/tests/e2e/tracking_insufficient_funds.rs b/crates/e2e/tests/e2e/tracking_insufficient_funds.rs index 0ed43b79b8..93bacc84df 100644 --- a/crates/e2e/tests/e2e/tracking_insufficient_funds.rs +++ b/crates/e2e/tests/e2e/tracking_insufficient_funds.rs @@ -7,7 +7,7 @@ use { signature::EcdsaSigningScheme, }, number::units::EthUnit, - shared::ethrpc::Web3, + shared::web3::Web3, }; #[tokio::test] diff --git a/crates/e2e/tests/e2e/trades_v2.rs b/crates/e2e/tests/e2e/trades_v2.rs index 0877a4c64c..664157e4ec 100644 --- a/crates/e2e/tests/e2e/trades_v2.rs +++ b/crates/e2e/tests/e2e/trades_v2.rs @@ -7,7 +7,7 @@ use { signature::EcdsaSigningScheme, }, number::units::EthUnit, - shared::ethrpc::Web3, + shared::web3::Web3, }; #[tokio::test] diff --git a/crates/e2e/tests/e2e/uncovered_order.rs b/crates/e2e/tests/e2e/uncovered_order.rs index 8f6dd52624..a49e54243c 100644 --- a/crates/e2e/tests/e2e/uncovered_order.rs +++ b/crates/e2e/tests/e2e/uncovered_order.rs @@ -6,7 +6,7 @@ use { signature::EcdsaSigningScheme, }, number::units::EthUnit, - shared::ethrpc::Web3, + shared::web3::Web3, }; #[tokio::test] diff --git a/crates/e2e/tests/e2e/univ2.rs b/crates/e2e/tests/e2e/univ2.rs index 067fb3b3b2..57812bf129 100644 --- a/crates/e2e/tests/e2e/univ2.rs +++ b/crates/e2e/tests/e2e/univ2.rs @@ -9,7 +9,7 @@ use { signature::EcdsaSigningScheme, }, number::units::EthUnit, - shared::ethrpc::Web3, + shared::web3::Web3, }; #[tokio::test] diff --git a/crates/e2e/tests/e2e/user_surplus.rs b/crates/e2e/tests/e2e/user_surplus.rs index 71d029068f..4023affeb6 100644 --- a/crates/e2e/tests/e2e/user_surplus.rs +++ b/crates/e2e/tests/e2e/user_surplus.rs @@ -8,7 +8,7 @@ use { signature::EcdsaSigningScheme, }, number::units::EthUnit, - shared::ethrpc::Web3, + shared::web3::Web3, }; #[tokio::test] diff --git a/crates/e2e/tests/e2e/vault_balances.rs b/crates/e2e/tests/e2e/vault_balances.rs index bfc93b3393..d50fa3dff4 100644 --- a/crates/e2e/tests/e2e/vault_balances.rs +++ b/crates/e2e/tests/e2e/vault_balances.rs @@ -6,7 +6,7 @@ use { signature::EcdsaSigningScheme, }, number::units::EthUnit, - shared::ethrpc::Web3, + shared::web3::Web3, }; #[tokio::test] diff --git a/crates/e2e/tests/e2e/wrapper.rs b/crates/e2e/tests/e2e/wrapper.rs index 6bcf5c47f6..edffd1444b 100644 --- a/crates/e2e/tests/e2e/wrapper.rs +++ b/crates/e2e/tests/e2e/wrapper.rs @@ -18,7 +18,7 @@ use { }, number::units::EthUnit, serde_json::json, - shared::ethrpc::Web3, + shared::web3::Web3, }; /// The block number from which we will fetch state for the forked test. diff --git a/crates/orderbook/src/run.rs b/crates/orderbook/src/run.rs index 3be02485fb..8ab767e4eb 100644 --- a/crates/orderbook/src/run.rs +++ b/crates/orderbook/src/run.rs @@ -67,12 +67,12 @@ pub async fn start(args: impl Iterator) { pub async fn run(args: Arguments) { let http_factory = HttpClientFactory::new(&args.http_client); - let web3 = shared::ethrpc::web3(&args.shared.ethrpc, &args.shared.node_url, "base"); + let web3 = shared::web3::web3(&args.shared.ethrpc, &args.shared.node_url, "base"); let simulation_web3 = args .shared .simulation_node_url .as_ref() - .map(|node_url| shared::ethrpc::web3(&args.shared.ethrpc, node_url, "simulation")); + .map(|node_url| shared::web3::web3(&args.shared.ethrpc, node_url, "simulation")); let chain_id = web3 .provider diff --git a/crates/refunder/src/arguments.rs b/crates/refunder/src/arguments.rs index 5e3c09d9bc..36fab7d1d0 100644 --- a/crates/refunder/src/arguments.rs +++ b/crates/refunder/src/arguments.rs @@ -1,7 +1,7 @@ use { alloy::primitives::Address, clap::Parser, - shared::{arguments::display_option, ethrpc, http_client, logging_args_with_default_filter}, + shared::{arguments::display_option, http_client, logging_args_with_default_filter, web3}, std::time::Duration, url::Url, }; @@ -14,7 +14,7 @@ pub struct Arguments { pub http_client: http_client::Arguments, #[clap(flatten)] - pub ethrpc: ethrpc::Arguments, + pub ethrpc: web3::Arguments, #[clap(flatten)] pub logging: LoggingArguments, diff --git a/crates/refunder/src/lib.rs b/crates/refunder/src/lib.rs index 94b4b9d49a..e9f17b9c0b 100644 --- a/crates/refunder/src/lib.rs +++ b/crates/refunder/src/lib.rs @@ -67,7 +67,7 @@ pub async fn run(args: arguments::Arguments) { .expect("failed to create database"); // Blockchain/RPC setup - let web3 = shared::ethrpc::web3(&args.ethrpc, &args.node_url, "base"); + let web3 = shared::web3::web3(&args.ethrpc, &args.node_url, "base"); if let Some(expected_chain_id) = args.chain_id { let chain_id = web3 diff --git a/crates/refunder/src/submitter.rs b/crates/refunder/src/submitter.rs index 7930af9ba6..54764091c5 100644 --- a/crates/refunder/src/submitter.rs +++ b/crates/refunder/src/submitter.rs @@ -12,8 +12,8 @@ use { contracts::alloy::CoWSwapEthFlow::{self, EthFlowOrder}, database::OrderUid, shared::{ - ethrpc::Web3, gas_price_estimation::{Eip1559EstimationExt, GasPriceEstimating}, + web3::Web3, }, std::time::Duration, }; diff --git a/crates/serde-ext/Cargo.toml b/crates/serde-ext/Cargo.toml new file mode 100644 index 0000000000..e45beaa60a --- /dev/null +++ b/crates/serde-ext/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "serde-ext" +version = "0.1.0" +authors = ["Cow Protocol Developers "] +edition = "2024" +license = "MIT OR Apache-2.0" + +[dependencies] +alloy = { workspace = true } +const-hex = { workspace = true } +serde = { workspace = true } +serde_with = { workspace = true } \ No newline at end of file diff --git a/crates/driver/src/util/serialize/hex.rs b/crates/serde-ext/src/hex.rs similarity index 100% rename from crates/driver/src/util/serialize/hex.rs rename to crates/serde-ext/src/hex.rs diff --git a/crates/driver/src/util/serialize/mod.rs b/crates/serde-ext/src/lib.rs similarity index 100% rename from crates/driver/src/util/serialize/mod.rs rename to crates/serde-ext/src/lib.rs diff --git a/crates/driver/src/util/serialize/u256.rs b/crates/serde-ext/src/u256.rs similarity index 72% rename from crates/driver/src/util/serialize/u256.rs rename to crates/serde-ext/src/u256.rs index 3984c117dc..a0a5e0e9a0 100644 --- a/crates/driver/src/util/serialize/u256.rs +++ b/crates/serde-ext/src/u256.rs @@ -1,5 +1,5 @@ use { - crate::domain::eth, + alloy::primitives as alloy, serde::{Deserializer, Serializer, de}, serde_with::{DeserializeAs, SerializeAs}, }; @@ -10,12 +10,12 @@ use { #[derive(Debug)] pub struct U256; -impl<'de> DeserializeAs<'de, eth::U256> for U256 { - fn deserialize_as>(deserializer: D) -> Result { +impl<'de> DeserializeAs<'de, alloy::U256> for U256 { + fn deserialize_as>(deserializer: D) -> Result { struct Visitor; impl de::Visitor<'_> for Visitor { - type Value = eth::U256; + type Value = alloy::U256; fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { write!(formatter, "a 256-bit decimal string") @@ -25,7 +25,7 @@ impl<'de> DeserializeAs<'de, eth::U256> for U256 { where E: de::Error, { - eth::U256::from_str_radix(s, 10).map_err(|err| { + alloy::U256::from_str_radix(s, 10).map_err(|err| { de::Error::custom(format!("failed to decode {s:?} as a 256-bit number: {err}")) }) } @@ -35,8 +35,8 @@ impl<'de> DeserializeAs<'de, eth::U256> for U256 { } } -impl SerializeAs for U256 { - fn serialize_as(source: ð::U256, serializer: S) -> Result { +impl SerializeAs for U256 { + fn serialize_as(source: &alloy::U256, serializer: S) -> Result { serializer.serialize_str(&source.to_string()) } } diff --git a/crates/shared/src/arguments.rs b/crates/shared/src/arguments.rs index a01f2a7dc3..f261e58d74 100644 --- a/crates/shared/src/arguments.rs +++ b/crates/shared/src/arguments.rs @@ -143,7 +143,7 @@ impl Display for DatabasePoolConfig { #[group(skip)] pub struct Arguments { #[clap(flatten)] - pub ethrpc: crate::ethrpc::Arguments, + pub ethrpc: crate::web3::Arguments, #[clap(flatten)] pub current_block: crate::current_block::Arguments, diff --git a/crates/shared/src/bad_token/trace_call.rs b/crates/shared/src/bad_token/trace_call.rs index 8b529edcf4..67c6633a1f 100644 --- a/crates/shared/src/bad_token/trace_call.rs +++ b/crates/shared/src/bad_token/trace_call.rs @@ -1,6 +1,6 @@ use { super::TokenQuality, - crate::{ethrpc::Web3, trace_many}, + crate::{trace_many, web3::Web3}, alloy::{ primitives::{Address, U256, keccak256}, rpc::{ diff --git a/crates/driver/src/util/bytes.rs b/crates/shared/src/bytes.rs similarity index 100% rename from crates/driver/src/util/bytes.rs rename to crates/shared/src/bytes.rs diff --git a/crates/shared/src/code_fetching.rs b/crates/shared/src/code_fetching.rs index a1e78da06d..a1083f5ebc 100644 --- a/crates/shared/src/code_fetching.rs +++ b/crates/shared/src/code_fetching.rs @@ -2,7 +2,7 @@ //! more easily be tested with mocked versions of these behaviours. use { - crate::ethrpc::Web3, + crate::web3::Web3, alloy::{ primitives::{Address, Bytes}, providers::Provider, diff --git a/crates/shared/src/gas_price_estimation/mod.rs b/crates/shared/src/gas_price_estimation/mod.rs index 040eaf41d6..e4dcdc772c 100644 --- a/crates/shared/src/gas_price_estimation/mod.rs +++ b/crates/shared/src/gas_price_estimation/mod.rs @@ -6,7 +6,6 @@ pub mod priority; use { crate::{ - ethrpc::Web3, gas_price_estimation::{ configurable_alloy::{ ConfigurableGasPriceEstimator, @@ -18,6 +17,7 @@ use { priority::PriorityGasPriceEstimating, }, http_client::HttpClientFactory, + web3::Web3, }, ::alloy::{ eips::eip1559::{Eip1559Estimation, calc_effective_gas_price}, diff --git a/crates/shared/src/lib.rs b/crates/shared/src/lib.rs index 15253452b6..9cbd012506 100644 --- a/crates/shared/src/lib.rs +++ b/crates/shared/src/lib.rs @@ -11,7 +11,6 @@ pub mod code_fetching; pub mod current_block; pub mod db_order_conversions; pub mod encoded_settlement; -pub mod ethrpc; pub mod event_handling; pub mod event_storing_helpers; pub mod external_prices; @@ -37,6 +36,7 @@ pub mod token_list; pub mod trace_many; pub mod trade_finding; pub mod url; +pub mod web3; pub mod zeroex_api; /// anyhow errors are not clonable natively. This is a workaround that creates a diff --git a/crates/shared/src/price_estimation/factory.rs b/crates/shared/src/price_estimation/factory.rs index 43fe6e1e01..f46a9c8767 100644 --- a/crates/shared/src/price_estimation/factory.rs +++ b/crates/shared/src/price_estimation/factory.rs @@ -15,7 +15,6 @@ use { arguments, bad_token::list_based::DenyListedTokens, code_fetching::CachedCodeFetcher, - ethrpc::Web3, gas_price_estimation::GasPriceEstimating, http_client::HttpClientFactory, price_estimation::{ @@ -25,6 +24,7 @@ use { }, tenderly_api::TenderlyCodeSimulator, token_info::TokenInfoFetching, + web3::Web3, }, alloy::primitives::Address, anyhow::{Context as _, Result}, diff --git a/crates/shared/src/sources/balancer_v2/pool_fetching/mod.rs b/crates/shared/src/sources/balancer_v2/pool_fetching/mod.rs index 6c99612470..dae2f87569 100644 --- a/crates/shared/src/sources/balancer_v2/pool_fetching/mod.rs +++ b/crates/shared/src/sources/balancer_v2/pool_fetching/mod.rs @@ -26,9 +26,9 @@ use { swap::fixed_point::Bfp, }, crate::{ - ethrpc::Web3, recent_block_cache::{Block, CacheConfig}, token_info::TokenInfoFetching, + web3::Web3, }, alloy::{ eips::BlockNumberOrTag, diff --git a/crates/shared/src/sources/swapr.rs b/crates/shared/src/sources/swapr.rs index d4a3062f97..bb2d3a6d73 100644 --- a/crates/shared/src/sources/swapr.rs +++ b/crates/shared/src/sources/swapr.rs @@ -55,9 +55,9 @@ mod tests { use { super::*, crate::{ - ethrpc::Web3, recent_block_cache::Block, sources::{BaselineSource, uniswap_v2}, + web3::Web3, }, alloy::{ primitives::{Address, address}, diff --git a/crates/shared/src/sources/uniswap_v2/mod.rs b/crates/shared/src/sources/uniswap_v2/mod.rs index aee970fc35..8d6025909d 100644 --- a/crates/shared/src/sources/uniswap_v2/mod.rs +++ b/crates/shared/src/sources/uniswap_v2/mod.rs @@ -10,8 +10,8 @@ use { pool_fetching::{DefaultPoolReader, PoolFetching, PoolReading}, }, crate::{ - ethrpc::Web3, sources::{BaselineSource, swapr::SwaprPoolReader}, + web3::Web3, }, alloy::primitives::{Address, B256}, anyhow::{Context, Result}, diff --git a/crates/shared/src/sources/uniswap_v2/pool_fetching.rs b/crates/shared/src/sources/uniswap_v2/pool_fetching.rs index 7e322491fe..d27e85fa14 100644 --- a/crates/shared/src/sources/uniswap_v2/pool_fetching.rs +++ b/crates/shared/src/sources/uniswap_v2/pool_fetching.rs @@ -1,6 +1,6 @@ use { super::pair_provider::PairProvider, - crate::{baseline_solver::BaselineSolvable, ethrpc::Web3, recent_block_cache::Block}, + crate::{baseline_solver::BaselineSolvable, recent_block_cache::Block, web3::Web3}, alloy::{ eips::BlockId, primitives::{Address, U256}, diff --git a/crates/shared/src/trace_many.rs b/crates/shared/src/trace_many.rs index a4033a6799..96c9ddc473 100644 --- a/crates/shared/src/trace_many.rs +++ b/crates/shared/src/trace_many.rs @@ -1,5 +1,5 @@ use { - crate::ethrpc::Web3, + crate::web3::Web3, alloy::{ providers::ext::TraceApi, rpc::types::{ diff --git a/crates/shared/src/ethrpc.rs b/crates/shared/src/web3.rs similarity index 100% rename from crates/shared/src/ethrpc.rs rename to crates/shared/src/web3.rs diff --git a/crates/solver/src/liquidity/balancer_v2.rs b/crates/solver/src/liquidity/balancer_v2.rs index 9ce7a836de..1ca1e2475f 100644 --- a/crates/solver/src/liquidity/balancer_v2.rs +++ b/crates/solver/src/liquidity/balancer_v2.rs @@ -20,10 +20,10 @@ use { anyhow::Result, model::TokenPair, shared::{ - ethrpc::Web3, http_solver::model::TokenAmount, recent_block_cache::Block, sources::balancer_v2::pool_fetching::BalancerPoolFetching, + web3::Web3, }, std::{collections::HashSet, sync::Arc}, tracing::instrument, diff --git a/crates/solver/src/liquidity/uniswap_v2.rs b/crates/solver/src/liquidity/uniswap_v2.rs index 4b917d5dc3..f6236a966c 100644 --- a/crates/solver/src/liquidity/uniswap_v2.rs +++ b/crates/solver/src/liquidity/uniswap_v2.rs @@ -13,10 +13,10 @@ use { anyhow::Result, model::TokenPair, shared::{ - ethrpc::Web3, http_solver::model::TokenAmount, recent_block_cache::Block, sources::uniswap_v2::pool_fetching::PoolFetching, + web3::Web3, }, std::{ collections::HashSet, diff --git a/crates/solver/src/liquidity/uniswap_v3.rs b/crates/solver/src/liquidity/uniswap_v3.rs index cd3d2b4f02..b3e2b4034b 100644 --- a/crates/solver/src/liquidity/uniswap_v3.rs +++ b/crates/solver/src/liquidity/uniswap_v3.rs @@ -15,10 +15,10 @@ use { model::TokenPair, num::{CheckedMul, rational::Ratio}, shared::{ - ethrpc::Web3, http_solver::model::TokenAmount, recent_block_cache::Block, sources::uniswap_v3::pool_fetching::PoolFetching, + web3::Web3, }, std::{ collections::HashSet, diff --git a/crates/solver/src/liquidity/zeroex.rs b/crates/solver/src/liquidity/zeroex.rs index 503be0c60f..e6cf0f13e9 100644 --- a/crates/solver/src/liquidity/zeroex.rs +++ b/crates/solver/src/liquidity/zeroex.rs @@ -18,9 +18,9 @@ use { itertools::Itertools, model::{TokenPair, order::OrderKind}, shared::{ - ethrpc::Web3, http_solver::model::TokenAmount, recent_block_cache::Block, + web3::Web3, zeroex_api::{OrderRecord, OrdersQuery, ZeroExApi}, }, std::{ diff --git a/crates/solvers-dto/Cargo.toml b/crates/solvers-dto/Cargo.toml index fa1504ab30..3ac3c8df92 100644 --- a/crates/solvers-dto/Cargo.toml +++ b/crates/solvers-dto/Cargo.toml @@ -14,6 +14,7 @@ chrono = { workspace = true, features = ["serde"] } const-hex = { workspace = true } number = { workspace = true } serde = { workspace = true } +serde-ext = { workspace = true } serde_with = { workspace = true } [lints] diff --git a/crates/solvers-dto/src/auction.rs b/crates/solvers-dto/src/auction.rs index ecb11ca914..f0d244de8d 100644 --- a/crates/solvers-dto/src/auction.rs +++ b/crates/solvers-dto/src/auction.rs @@ -1,5 +1,4 @@ use { - super::serialize, alloy::primitives::{Address, B256, U256}, app_data::AppDataHash, bigdecimal::BigDecimal, @@ -28,7 +27,7 @@ pub struct Auction { #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Order { - #[serde_as(as = "serialize::Hex")] + #[serde_as(as = "serde_ext::Hex")] pub uid: [u8; 56], pub sell_token: Address, pub buy_token: Address, @@ -273,7 +272,7 @@ pub struct ForeignLimitOrder { pub address: Address, #[serde_as(as = "HexOrDecimalU256")] pub gas_estimate: U256, - #[serde_as(as = "serialize::Hex")] + #[serde_as(as = "serde_ext::Hex")] pub hash: [u8; 32], pub maker_token: Address, pub taker_token: Address, diff --git a/crates/solvers-dto/src/lib.rs b/crates/solvers-dto/src/lib.rs index 2fe62793e6..d7c9d518dc 100644 --- a/crates/solvers-dto/src/lib.rs +++ b/crates/solvers-dto/src/lib.rs @@ -4,109 +4,3 @@ pub mod auction; pub mod notification; pub mod solution; - -mod serialize { - use { - serde::{Deserializer, Serializer, de}, - serde_with::{DeserializeAs, SerializeAs}, - }; - - /// Serialize and deserialize binary data as a hexadecimal string. - #[derive(Debug)] - pub struct Hex; - - impl<'de> DeserializeAs<'de, Vec> for Hex { - fn deserialize_as>(deserializer: D) -> Result, D::Error> { - struct Visitor; - - impl de::Visitor<'_> for Visitor { - type Value = Vec; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(formatter, "a hex-encoded string starting with \"0x\"") - } - - fn visit_str(self, s: &str) -> Result - where - E: de::Error, - { - if !s.starts_with("0x") { - return Err(de::Error::custom(format!( - "failed to decode {s:?} as a hex string: missing \"0x\" prefix", - ))); - } - const_hex::decode(&s[2..]).map_err(|err| { - de::Error::custom(format!("failed to decode {s:?} as a hex string: {err}",)) - }) - } - } - - deserializer.deserialize_str(Visitor) - } - } - - impl SerializeAs> for Hex { - fn serialize_as(source: &Vec, serializer: S) -> Result { - serializer.serialize_str(&bytes_to_hex_string(source.as_ref())) - } - } - - impl<'de, const N: usize> DeserializeAs<'de, [u8; N]> for Hex { - fn deserialize_as>(deserializer: D) -> Result<[u8; N], D::Error> { - struct Visitor { - result: [u8; N], - } - - impl de::Visitor<'_> for Visitor { - type Value = [u8; N]; - - fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { - write!( - formatter, - "a hex-encoded string starting with \"0x\" containing {N} bytes", - ) - } - - fn visit_str(mut self, s: &str) -> Result - where - E: de::Error, - { - if !s.starts_with("0x") { - return Err(de::Error::custom(format!( - "failed to decode {s:?} as a hex string: missing \"0x\" prefix", - ))); - } - let decoded = const_hex::decode(&s[2..]).map_err(|err| { - de::Error::custom(format!("failed to decode {s:?} as a hex string: {err}",)) - })?; - if decoded.len() != N { - return Err(de::Error::custom(format!( - "failed to decode {s:?} as a hex string: expected {N} bytes, got {}", - decoded.len() - ))); - } - self.result.copy_from_slice(&decoded); - Ok(self.result) - } - } - - deserializer.deserialize_str(Visitor { result: [0; N] }) - } - } - - impl SerializeAs<[u8; N]> for Hex { - fn serialize_as(source: &[u8; N], serializer: S) -> Result { - serializer.serialize_str(&bytes_to_hex_string(source)) - } - } - - fn bytes_to_hex_string(bytes: &[u8]) -> String { - let mut v = vec![0u8; 2 + bytes.len() * 2]; - v[0] = b'0'; - v[1] = b'x'; - // Unwrap because only possible error is vector wrong size which cannot happen. - const_hex::encode_to_slice(bytes, &mut v[2..]).unwrap(); - // Unwrap because encoded data is always valid utf8. - String::from_utf8(v).unwrap() - } -} diff --git a/crates/solvers-dto/src/notification.rs b/crates/solvers-dto/src/notification.rs index 7abdb0b53f..2a12552b61 100644 --- a/crates/solvers-dto/src/notification.rs +++ b/crates/solvers-dto/src/notification.rs @@ -1,5 +1,4 @@ use { - super::serialize, alloy::{ primitives::{Address, B256, U256}, rpc::types::AccessList, @@ -81,7 +80,7 @@ type BlockNo = u64; pub struct Tx { pub from: Address, pub to: Address, - #[serde_as(as = "serialize::Hex")] + #[serde_as(as = "serde_ext::Hex")] pub input: Vec, #[serde_as(as = "HexOrDecimalU256")] pub value: U256, diff --git a/crates/solvers-dto/src/solution.rs b/crates/solvers-dto/src/solution.rs index 701d80f87c..0f457cd424 100644 --- a/crates/solvers-dto/src/solution.rs +++ b/crates/solvers-dto/src/solution.rs @@ -1,5 +1,4 @@ use { - super::serialize, alloy::primitives::{Address, U256}, number::serialization::HexOrDecimalU256, serde::{Deserialize, Serialize}, @@ -36,7 +35,7 @@ pub struct Solution { #[serde_as] #[derive(Clone, Debug, Serialize, Deserialize, Hash, Eq, PartialEq)] -pub struct OrderUid(#[serde_as(as = "serialize::Hex")] pub [u8; 56]); +pub struct OrderUid(#[serde_as(as = "serde_ext::Hex")] pub [u8; 56]); #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(tag = "kind", rename_all = "camelCase")] @@ -83,13 +82,13 @@ pub struct JitOrder { #[serde(default)] pub partially_fillable: bool, pub valid_to: u32, - #[serde_as(as = "serialize::Hex")] + #[serde_as(as = "serde_ext::Hex")] pub app_data: [u8; 32], pub kind: Kind, pub sell_token_balance: SellTokenBalance, pub buy_token_balance: BuyTokenBalance, pub signing_scheme: SigningScheme, - #[serde_as(as = "serialize::Hex")] + #[serde_as(as = "serde_ext::Hex")] pub signature: Vec, } @@ -114,7 +113,7 @@ pub struct Call { pub target: Address, pub value: U256, #[serde(rename = "callData")] - #[serde_as(as = "serialize::Hex")] + #[serde_as(as = "serde_ext::Hex")] pub calldata: Vec, } @@ -141,7 +140,7 @@ pub struct CustomInteraction { #[serde_as(as = "HexOrDecimalU256")] pub value: U256, #[serde(rename = "callData")] - #[serde_as(as = "serialize::Hex")] + #[serde_as(as = "serde_ext::Hex")] pub calldata: Vec, pub allowances: Vec, pub inputs: Vec, @@ -158,7 +157,7 @@ pub struct OrderInteraction { #[serde_as(as = "HexOrDecimalU256")] pub value: U256, #[serde(rename = "callData")] - #[serde_as(as = "serialize::Hex")] + #[serde_as(as = "serde_ext::Hex")] pub calldata: Vec, } @@ -224,7 +223,7 @@ pub struct Flashloan { #[serde(rename_all = "camelCase")] pub struct WrapperCall { pub address: Address, - #[serde_as(as = "serialize::Hex")] + #[serde_as(as = "serde_ext::Hex")] #[serde(default)] pub data: Vec, } From 89bdca31a7e4ee05834f52033104f8eb5d0379c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Duarte?= Date: Tue, 17 Feb 2026 10:18:27 +0000 Subject: [PATCH 067/219] Add TOML configuration to the autopilot (#4147) # Description Kicks off #4005; to start only one of the more annoying arguments was migrated to validate the approach. # Changes - [ ] Add an optional config path - [ ] Remove the drivers CLI arg - [ ] Move the drivers argument to the TOML parsing ## How to test * Migrate the drivers argument to TOML in the infra repo * Run in staging --- Cargo.lock | 2 + crates/autopilot/Cargo.toml | 6 +- crates/autopilot/src/arguments.rs | 147 ++----------- crates/autopilot/src/config/mod.rs | 60 ++++++ crates/autopilot/src/config/solver.rs | 201 ++++++++++++++++++ .../domain/competition/winner_selection.rs | 4 +- crates/autopilot/src/infra/solvers/mod.rs | 16 +- crates/autopilot/src/lib.rs | 1 + crates/autopilot/src/run.rs | 46 ++-- crates/e2e/Cargo.toml | 2 +- crates/e2e/src/setup/services.rs | 51 +++-- crates/e2e/tests/e2e/autopilot_leader.rs | 75 +++++-- crates/e2e/tests/e2e/buffers.rs | 20 +- crates/e2e/tests/e2e/cow_amm.rs | 58 ++++- crates/e2e/tests/e2e/jit_orders.rs | 22 +- crates/e2e/tests/e2e/limit_orders.rs | 71 +++++-- crates/e2e/tests/e2e/liquidity.rs | 20 +- .../e2e/liquidity_source_notification.rs | 21 +- crates/e2e/tests/e2e/order_cancellation.rs | 3 + crates/e2e/tests/e2e/solver_competition.rs | 86 ++++++-- 20 files changed, 645 insertions(+), 267 deletions(-) create mode 100644 crates/autopilot/src/config/mod.rs create mode 100644 crates/autopilot/src/config/solver.rs diff --git a/Cargo.lock b/Cargo.lock index f2f53f5498..2700c81635 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1201,10 +1201,12 @@ dependencies = [ "shared", "sqlx", "strum", + "tempfile", "thiserror 1.0.69", "tikv-jemallocator", "tokio", "tokio-stream", + "toml", "tower 0.4.13", "tower-http 0.4.4", "tracing", diff --git a/crates/autopilot/Cargo.toml b/crates/autopilot/Cargo.toml index feddf27dbc..727cd5ef4b 100644 --- a/crates/autopilot/Cargo.toml +++ b/crates/autopilot/Cargo.toml @@ -63,14 +63,17 @@ tokio-stream = { workspace = true } tower = { workspace = true } tower-http = { workspace = true, features = ["trace"] } tracing = { workspace = true } -url = { workspace = true } +url = { workspace = true, features = ["serde"] } winner-selection = { workspace = true } +toml = { workspace = true } +tempfile = { workspace = true, optional = true } [dev-dependencies] maplit = { workspace = true } mockall = { workspace = true } tokio = { workspace = true, features = ["test-util"] } shared = { workspace = true, features = ["test-util"] } +tempfile = { workspace = true } [build-dependencies] anyhow = { workspace = true } @@ -82,3 +85,4 @@ workspace = true [features] mimalloc-allocator = ["dep:mimalloc"] tokio-console = ["observe/tokio-console"] +test-util = ["dep:tempfile"] diff --git a/crates/autopilot/src/arguments.rs b/crates/autopilot/src/arguments.rs index 1b8c592e7e..55f27f5104 100644 --- a/crates/autopilot/src/arguments.rs +++ b/crates/autopilot/src/arguments.rs @@ -1,26 +1,23 @@ use { crate::{database::INSERT_BATCH_SIZE_DEFAULT, infra}, - alloy::primitives::{Address, U256}, - anyhow::{Context, anyhow, ensure}, + alloy::primitives::Address, + anyhow::Context, chrono::{DateTime, Utc}, clap::ValueEnum, shared::{ - arguments::{FeeFactor, display_list, display_option, display_secret_option}, + arguments::{FeeFactor, display_option, display_secret_option}, http_client, price_estimation::{self, NativePriceEstimators}, }, - std::{ - fmt::{self, Display, Formatter}, - net::SocketAddr, - num::NonZeroUsize, - str::FromStr, - time::Duration, - }, + std::{net::SocketAddr, num::NonZeroUsize, path::PathBuf, str::FromStr, time::Duration}, url::Url, }; #[derive(clap::Parser)] -pub struct Arguments { +pub struct CliArguments { + #[clap(long, env)] + pub config: PathBuf, + #[clap(flatten)] pub shared: shared::arguments::Arguments, @@ -139,11 +136,6 @@ pub struct Arguments { )] pub trusted_tokens_update_interval: Duration, - /// A list of drivers in the following format: - /// `|||` - #[clap(long, env, use_value_delimiter = true)] - pub drivers: Vec, - /// The maximum number of blocks to wait for a settlement to appear on /// chain. #[clap(long, env, default_value = "5")] @@ -272,9 +264,10 @@ pub struct Arguments { pub native_price_prefetch_time: Duration, } -impl std::fmt::Display for Arguments { +impl std::fmt::Display for CliArguments { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let Self { + config, shared, order_quoting, http_client, @@ -295,7 +288,6 @@ impl std::fmt::Display for Arguments { trusted_tokens_url, trusted_tokens, trusted_tokens_update_interval, - drivers, submission_deadline, shadow, solve_deadline, @@ -319,7 +311,7 @@ impl std::fmt::Display for Arguments { native_price_cache_refresh, native_price_prefetch_time, } = self; - + write!(f, "{}", config.display())?; write!(f, "{shared}")?; write!(f, "{order_quoting}")?; write!(f, "{http_client}")?; @@ -354,7 +346,6 @@ impl std::fmt::Display for Arguments { f, "trusted_tokens_update_interval: {trusted_tokens_update_interval:?}" )?; - display_list(f, "drivers", drivers.iter())?; writeln!(f, "submission_deadline: {submission_deadline}")?; display_option(f, "shadow", shadow)?; writeln!(f, "solve_deadline: {solve_deadline:?}")?; @@ -404,75 +395,6 @@ impl std::fmt::Display for Arguments { } } -/// External solver driver configuration -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct Solver { - pub name: String, - pub url: Url, - pub submission_account: Account, - pub fairness_threshold: Option, -} - -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub enum Account { - /// AWS KMS is used to retrieve the solver public key - Kms(Arn), - /// Solver public key - Address(Address), -} - -// Wrapper type for AWS ARN identifiers -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct Arn(pub String); - -impl FromStr for Arn { - type Err = anyhow::Error; - - fn from_str(s: &str) -> Result { - // Could be more strict here, but this should suffice to catch unintended - // configuration mistakes - if s.starts_with("arn:aws:kms:") { - Ok(Self(s.to_string())) - } else { - Err(anyhow!("Invalid ARN identifier: {}", s)) - } - } -} - -impl Display for Solver { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - write!(f, "{}({})", self.name, self.url) - } -} - -impl FromStr for Solver { - type Err = anyhow::Error; - - fn from_str(solver: &str) -> anyhow::Result { - let parts: Vec<&str> = solver.split('|').collect(); - ensure!(parts.len() >= 3, "not enough arguments for external solver"); - let (name, url) = (parts[0], parts[1]); - let url: Url = url.parse()?; - let submission_account = match Arn::from_str(parts[2]) { - Ok(value) => Account::Kms(value), - _ => { - Account::Address(Address::from_str(parts[2]).context("failed to parse submission")?) - } - }; - - let fairness_threshold = parts - .get(3) - .and_then(|value| U256::from_str_radix(value, 10).ok()); - - Ok(Self { - name: name.to_owned(), - url, - fairness_threshold, - submission_account, - }) - } -} - #[derive(clap::Parser, Debug, Clone)] pub struct FeePoliciesConfig { /// Describes how the protocol fees should be calculated. @@ -665,7 +587,7 @@ impl FromStr for CowAmmConfig { #[cfg(test)] mod test { - use {super::*, alloy::primitives::address}; + use super::*; #[test] fn test_fee_factor_limits() { @@ -692,49 +614,4 @@ mod test { ) } } - - #[test] - fn parse_driver_submission_account_address() { - let argument = "name1|http://localhost:8080|0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"; - let driver = Solver::from_str(argument).unwrap(); - let expected = Solver { - name: "name1".into(), - url: Url::parse("http://localhost:8080").unwrap(), - fairness_threshold: None, - submission_account: Account::Address(address!( - "C02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2" - )), - }; - assert_eq!(driver, expected); - } - - #[test] - fn parse_driver_submission_account_arn() { - let argument = "name1|http://localhost:8080|arn:aws:kms:supersecretstuff"; - let driver = Solver::from_str(argument).unwrap(); - let expected = Solver { - name: "name1".into(), - url: Url::parse("http://localhost:8080").unwrap(), - fairness_threshold: None, - submission_account: Account::Kms( - Arn::from_str("arn:aws:kms:supersecretstuff").unwrap(), - ), - }; - assert_eq!(driver, expected); - } - - #[test] - fn parse_driver_with_threshold() { - let argument = "name1|http://localhost:8080|0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2|1000000000000000000"; - let driver = Solver::from_str(argument).unwrap(); - let expected = Solver { - name: "name1".into(), - url: Url::parse("http://localhost:8080").unwrap(), - submission_account: Account::Address(address!( - "C02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2" - )), - fairness_threshold: Some(U256::from(10).pow(U256::from(18))), - }; - assert_eq!(driver, expected); - } } diff --git a/crates/autopilot/src/config/mod.rs b/crates/autopilot/src/config/mod.rs new file mode 100644 index 0000000000..68f348ffb5 --- /dev/null +++ b/crates/autopilot/src/config/mod.rs @@ -0,0 +1,60 @@ +use { + crate::config::solver::Solver, + anyhow::{anyhow, ensure}, + serde::{Deserialize, Serialize}, + std::path::Path, +}; + +pub mod solver; + +#[derive(Debug, Default, Deserialize, Serialize)] +#[serde(rename_all = "kebab-case", deny_unknown_fields)] +pub struct Configuration { + // #[serde(default)] + pub drivers: Vec, +} + +impl Configuration { + pub async fn from_path>(path: P) -> anyhow::Result { + match toml::from_str(&tokio::fs::read_to_string(&path).await?) { + Ok(self_) => Ok(self_), + Err(err) if std::env::var("TOML_TRACE_ERROR").is_ok_and(|v| v == "1") => Err(anyhow!( + "failed to parse TOML config at {}: {err:#?}", + path.as_ref().display() + )), + Err(_) => Err(anyhow!( + "failed to parse TOML config at: {}. Set TOML_TRACE_ERROR=1 to print parsing \ + error but this may leak secrets.", + path.as_ref().display() + )), + } + } + + pub async fn to_path>(&self, path: P) -> anyhow::Result<()> { + Ok(tokio::fs::write(path, toml::to_string_pretty(self)?).await?) + } + + #[cfg(any(test, feature = "test-util"))] + pub fn to_temp_path(&self) -> tempfile::NamedTempFile { + use std::io::Write; + let mut file = tempfile::NamedTempFile::new().expect("temp file creation should not fail"); + file.write_all( + toml::to_string_pretty(self) + .expect("serialization should not fail") + .as_bytes(), + ) + .expect("writing to temp file should not fail"); + file + } + + // Note for reviewers: if this and other validations are always applied, + // we should instead move them to the deserialization stage + // https://lexi-lambda.github.io/blog/2019/11/05/parse-don-t-validate/ + pub fn validate(self) -> anyhow::Result { + ensure!( + !self.drivers.is_empty(), + "colocation is enabled but no drivers are configured" + ); + Ok(self) + } +} diff --git a/crates/autopilot/src/config/solver.rs b/crates/autopilot/src/config/solver.rs new file mode 100644 index 0000000000..c728f3e477 --- /dev/null +++ b/crates/autopilot/src/config/solver.rs @@ -0,0 +1,201 @@ +use { + alloy::primitives::Address, + core::fmt, + serde::{Deserialize, Deserializer, Serialize}, + std::fmt::{Display, Formatter}, + url::Url, +}; + +/// External solver driver configuration +#[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] +#[serde(rename_all = "kebab-case", deny_unknown_fields)] +pub struct Solver { + pub name: String, + pub url: Url, + pub submission_account: Account, +} + +impl Solver { + pub fn new(name: String, url: Url, account: Account) -> Self { + Self { + name, + url, + submission_account: account, + } + } +} + +impl Display for Solver { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "{}({})", self.name, self.url) + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize, Serialize)] +#[serde(rename_all = "kebab-case")] +pub enum Account { + /// AWS KMS is used to retrieve the solver public key + #[serde(deserialize_with = "deserialize_arn")] + Kms(Arn), + /// Solver public key + Address(Address), +} + +// Wrapper type for AWS ARN identifiers +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize)] +pub struct Arn(pub String); + +fn deserialize_arn<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let raw_arn = String::deserialize(deserializer)?; + if raw_arn.starts_with("arn:aws:kms:") { + Ok(Arn(raw_arn)) + } else { + Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Str(raw_arn.as_str()), + &"expected value starting with \"arn:aws:kms\"", + )) + } +} + +#[cfg(test)] +mod test { + use {super::*, alloy::primitives::address}; + + #[test] + fn parse_driver_submission_account_address() { + let toml = r#" + name = "name1" + url = "http://localhost:8080" + submission-account.address = "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2" + "#; + let driver = toml::from_str::(toml).unwrap(); + + let expected = Solver::new( + "name1".into(), + Url::parse("http://localhost:8080").unwrap(), + Account::Address(address!("C02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2")), + ); + assert_eq!(driver, expected); + } + + #[test] + fn parse_driver_submission_account_arn() { + let toml = r#" + name = "name1" + url = "http://localhost:8080" + submission-account.kms = "arn:aws:kms:supersecretstuff" + "#; + let driver = toml::from_str::(toml).unwrap(); + + let expected = Solver::new( + "name1".into(), + Url::parse("http://localhost:8080").unwrap(), + Account::Kms(Arn("arn:aws:kms:supersecretstuff".into())), + ); + assert_eq!(driver, expected); + } + + #[test] + fn parse_driver_with_threshold() { + let toml = r#" + name = "name1" + url = "http://localhost:8080" + submission-account.address = "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2" + "#; + let driver = toml::from_str::(toml).unwrap(); + + let expected = Solver::new( + "name1".into(), + Url::parse("http://localhost:8080").unwrap(), + Account::Address(address!("C02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2")), + ); + assert_eq!(driver, expected); + } + + #[test] + fn deserialize_valid_arn() { + let toml = r#"kms = "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012""#; + let account = toml::from_str::(toml).unwrap(); + + let expected = Account::Kms(Arn("arn:aws:kms:us-east-1:123456789012:key/\ + 12345678-1234-1234-1234-123456789012" + .into())); + assert_eq!(account, expected); + } + + #[test] + fn deserialize_invalid_arn_wrong_prefix() { + let toml = r#"kms = "arn:aws:s3:us-east-1:123456789012:bucket/mybucket""#; + let result = toml::from_str::(toml); + + assert!(result.is_err()); + let err = result.unwrap_err(); + assert!( + err.to_string() + .contains("expected value starting with \"arn:aws:kms\""), + "Error message: {}", + err + ); + } + + #[test] + fn deserialize_invalid_arn_not_arn() { + let toml = r#"kms = "not-an-arn""#; + let result = toml::from_str::(toml); + + assert!(result.is_err()); + let err = result.unwrap_err(); + assert!( + err.to_string() + .contains("expected value starting with \"arn:aws:kms\""), + "Error message: {}", + err + ); + } + + #[test] + fn parse_multiple_solvers() { + let toml = r#" + [[drivers]] + name = "solver1" + url = "http://localhost:8080" + submission-account.address = "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2" + + [[drivers]] + name = "solver2" + url = "http://localhost:8081" + # test the format used in the infra repo + [drivers.submission-account] + kms = "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012" + "#; + + #[derive(Deserialize)] + struct Config { + drivers: Vec, + } + + let config = toml::from_str::(toml).unwrap(); + + assert_eq!(config.drivers.len(), 2); + + let expected_solver1 = Solver::new( + "solver1".into(), + Url::parse("http://localhost:8080").unwrap(), + Account::Address(address!("C02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2")), + ); + + let expected_solver2 = Solver::new( + "solver2".into(), + Url::parse("http://localhost:8081").unwrap(), + Account::Kms(Arn("arn:aws:kms:us-east-1:123456789012:key/\ + 12345678-1234-1234-1234-123456789012" + .into())), + ); + + assert_eq!(config.drivers[0], expected_solver1); + assert_eq!(config.drivers[1], expected_solver2); + } +} diff --git a/crates/autopilot/src/domain/competition/winner_selection.rs b/crates/autopilot/src/domain/competition/winner_selection.rs index 2a5b6f12dd..33f6346050 100644 --- a/crates/autopilot/src/domain/competition/winner_selection.rs +++ b/crates/autopilot/src/domain/competition/winner_selection.rs @@ -281,6 +281,7 @@ impl Ranking { mod tests { use { crate::{ + config::solver::Account, domain::{ Auction, Order, @@ -1217,8 +1218,7 @@ mod tests { let driver = Driver::try_new( url::Url::parse("http://localhost").unwrap(), solver_address.to_string(), - None, - crate::arguments::Account::Address(solver_address), + Account::Address(solver_address), ) .await .unwrap(); diff --git a/crates/autopilot/src/infra/solvers/mod.rs b/crates/autopilot/src/infra/solvers/mod.rs index 38334631b8..a376bda7a9 100644 --- a/crates/autopilot/src/infra/solvers/mod.rs +++ b/crates/autopilot/src/infra/solvers/mod.rs @@ -1,6 +1,6 @@ use { self::dto::{reveal, settle, solve}, - crate::{arguments::Account, domain::eth, util}, + crate::{config::solver::Account, domain::eth, util}, alloy::signers::{Signer, aws::AwsSigner}, anyhow::{Context, Result, anyhow}, observe::tracing::tracing_headers, @@ -19,10 +19,6 @@ const RESPONSE_TIME_LIMIT: Duration = Duration::from_secs(60); pub struct Driver { pub name: String, pub url: Url, - // An optional threshold used to check "fairness" of provided solutions. If specified, a - // winning solution should be discarded if it contains at least one order, which - // another driver solved with surplus exceeding this driver's surplus by `threshold` - pub fairness_threshold: Option, pub submission_address: eth::Address, client: Client, } @@ -40,7 +36,6 @@ impl Driver { pub async fn try_new( url: Url, name: String, - fairness_threshold: Option, submission_account: Account, ) -> Result { let submission_address = match submission_account { @@ -57,18 +52,11 @@ impl Driver { } Account::Address(address) => address, }; - tracing::info!( - ?name, - ?url, - ?fairness_threshold, - ?submission_address, - "Creating solver" - ); + tracing::info!(?name, ?url, ?submission_address, "Creating solver"); Ok(Self { name, url, - fairness_threshold, client: Client::builder() .timeout(RESPONSE_TIME_LIMIT) .tcp_keepalive(Duration::from_secs(60)) diff --git a/crates/autopilot/src/lib.rs b/crates/autopilot/src/lib.rs index a7742f2cb0..d4da505be8 100644 --- a/crates/autopilot/src/lib.rs +++ b/crates/autopilot/src/lib.rs @@ -1,5 +1,6 @@ pub mod arguments; pub mod boundary; +pub mod config; pub mod database; pub mod domain; pub mod event_updater; diff --git a/crates/autopilot/src/run.rs b/crates/autopilot/src/run.rs index 39b4224be5..abc6edc2ba 100644 --- a/crates/autopilot/src/run.rs +++ b/crates/autopilot/src/run.rs @@ -1,7 +1,8 @@ use { crate::{ - arguments::{Account, Arguments}, + arguments::CliArguments, boundary, + config::{Configuration, solver::Account}, database::{ Postgres, ethflow_events::event_retriever::EthFlowRefundRetriever, @@ -123,7 +124,14 @@ async fn ethereum( } pub async fn start(args: impl Iterator) { - let args = Arguments::parse_from(args); + let args = CliArguments::parse_from(args); + + let config = Configuration::from_path(&args.config) + .await + .expect("failed to load configuration file") + .validate() + .expect("failed to validate configuration file"); + let obs_config = observe::Config::new( args.shared.logging.log_filter.as_str(), args.shared.logging.log_stderr_threshold, @@ -141,19 +149,19 @@ pub async fn start(args: impl Iterator) { observe::metrics::setup_registry(Some("gp_v2_autopilot".into()), None); - if args.drivers.is_empty() { - panic!("colocation is enabled but no drivers are configured"); - } - if args.shadow.is_some() { - shadow_mode(args).await; + shadow_mode(args, config).await; } else { - run(args, ShutdownController::default()).await; + run(args, config, ShutdownController::default()).await; } } /// Assumes tracing and metrics registry have already been set up. -pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { +pub async fn run( + args: CliArguments, + config: Configuration, + shutdown_controller: ShutdownController, +) { assert!(args.shadow.is_none(), "cannot run in shadow mode"); let db_write = Postgres::new( args.db_write_url.as_str(), @@ -572,19 +580,14 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { enable_leader_lock: args.enable_leader_lock, }; - let drivers_futures = args + let drivers_futures = config .drivers .into_iter() .map(|driver| async move { - infra::Driver::try_new( - driver.url, - driver.name.clone(), - driver.fairness_threshold.map(Into::into), - driver.submission_account, - ) - .await - .map(Arc::new) - .expect("failed to load solver configuration") + infra::Driver::try_new(driver.url, driver.name.clone(), driver.submission_account) + .await + .map(Arc::new) + .expect("failed to load solver configuration") }) .collect::>(); @@ -616,7 +619,7 @@ pub async fn run(args: Arguments, shutdown_controller: ShutdownController) { api_task.await.ok(); } -async fn shadow_mode(args: Arguments) -> ! { +async fn shadow_mode(args: CliArguments, config: Configuration) -> ! { let http_factory = HttpClientFactory::new(&args.http_client); let orderbook = infra::shadow::Orderbook::new( @@ -624,14 +627,13 @@ async fn shadow_mode(args: Arguments) -> ! { args.shadow.expect("missing shadow mode configuration"), ); - let drivers_futures = args + let drivers_futures = config .drivers .into_iter() .map(|driver| async move { infra::Driver::try_new( driver.url, driver.name.clone(), - driver.fairness_threshold.map(Into::into), // HACK: the auction logic expects all drivers // to use a different submission address. But // in the shadow environment all drivers use diff --git a/crates/e2e/Cargo.toml b/crates/e2e/Cargo.toml index 5507ec5045..df12e6e6a7 100644 --- a/crates/e2e/Cargo.toml +++ b/crates/e2e/Cargo.toml @@ -13,7 +13,7 @@ alloy = { workspace = true, default-features = false, features = ["json-rpc", "p alloy-signer = {workspace = true, default-features = false, features = ["eip712"]} anyhow = { workspace = true } app-data = { workspace = true } -autopilot = { workspace = true } +autopilot = { workspace = true, features = ["test-util"] } axum = { workspace = true } bigdecimal = { workspace = true } chrono = { workspace = true } diff --git a/crates/e2e/src/setup/services.rs b/crates/e2e/src/setup/services.rs index 49ee87202c..25f6958fa1 100644 --- a/crates/e2e/src/setup/services.rs +++ b/crates/e2e/src/setup/services.rs @@ -15,7 +15,13 @@ use { providers::ext::AnvilApi, }, app_data::{AppDataDocument, AppDataHash}, - autopilot::infra::persistence::dto, + autopilot::{ + config::{ + Configuration, + solver::{Account, Solver}, + }, + infra::persistence::dto, + }, clap::Parser, model::{ AuctionId, @@ -216,8 +222,13 @@ impl<'a> Services<'a> { .collect(); let args = ignore_overwritten_cli_params(args); - let args = autopilot::arguments::Arguments::try_parse_from(args).unwrap(); - let join_handle = tokio::task::spawn(autopilot::run(args, control)); + let args = autopilot::arguments::CliArguments::try_parse_from(args) + .map_err(|err| err.to_string()) + .unwrap(); + let config = autopilot::config::Configuration::from_path(&args.config) + .await + .unwrap(); + let join_handle = tokio::task::spawn(autopilot::run(args, config, control)); self.wait_until_autopilot_ready().await; join_handle @@ -298,14 +309,23 @@ impl<'a> Services<'a> { colocation::LiquidityProvider::UniswapV2, false, ); + + let config_file = Configuration { + // replace the --drivers argument with a vec of Solver structs + drivers: vec![Solver::new( + "test_solver".to_string(), + Url::from_str("http://localhost:11088/test_solver").unwrap(), + Account::Address(solver.address()), + )], + } + .to_temp_path(); + self.start_autopilot( None, [ vec![ - format!( - "--drivers=test_solver|http://localhost:11088/test_solver|{}|requested-timeout-on-problems", - const_hex::encode(solver.address()) - ), + // The config gets parsed as an extra argument, it will read the correct path + format!("--config={}", config_file.path().display()), "--price-estimation-drivers=test_quoter|http://localhost:11088/test_solver" .to_string(), "--gas-estimators=http://localhost:11088/gasprice".to_string(), @@ -349,6 +369,16 @@ impl<'a> Services<'a> { haircut_bps: 0, }]; + // Create TOML config file for the driver + let config_file = Configuration { + drivers: vec![Solver::new( + "test_solver".to_string(), + Url::parse("http://localhost:11088/test_solver").unwrap(), + Account::Address(solver.address()), + )], + } + .to_temp_path(); + let (autopilot_args, api_args) = if run_baseline { solvers.push( colocation::start_baseline_solver( @@ -365,7 +395,7 @@ impl<'a> Services<'a> { // Here we call the baseline_solver "test_quoter" to make the native price // estimation use the baseline_solver instead of the test_quoter let autopilot_args = vec![ - format!("--drivers=test_solver|http://localhost:11088/test_solver|{}", const_hex::encode(solver.address())), + format!("--config={}", config_file.path().display()), "--price-estimation-drivers=test_quoter|http://localhost:11088/baseline_solver,test_solver|http://localhost:11088/test_solver".to_string(), "--native-price-estimators=Driver|test_quoter|http://localhost:11088/baseline_solver,Driver|test_solver|http://localhost:11088/test_solver".to_string(), ]; @@ -375,10 +405,7 @@ impl<'a> Services<'a> { (autopilot_args, api_args) } else { let autopilot_args = vec![ - format!( - "--drivers=test_solver|http://localhost:11088/test_solver|{}", - const_hex::encode(solver.address()) - ), + format!("--config={}", config_file.path().display()), "--price-estimation-drivers=test_quoter|http://localhost:11088/test_solver" .to_string(), "--native-price-estimators=Driver|test_quoter|http://localhost:11088/test_solver" diff --git a/crates/e2e/tests/e2e/autopilot_leader.rs b/crates/e2e/tests/e2e/autopilot_leader.rs index 02f47a2c6d..bbcbf689e0 100644 --- a/crates/e2e/tests/e2e/autopilot_leader.rs +++ b/crates/e2e/tests/e2e/autopilot_leader.rs @@ -1,5 +1,11 @@ use { - autopilot::shutdown_controller::ShutdownController, + autopilot::{ + config::{ + Configuration, + solver::{Account, Solver}, + }, + shutdown_controller::ShutdownController, + }, e2e::setup::{ OnchainComponents, Services, @@ -12,6 +18,8 @@ use { ethrpc::{Web3, alloy::CallBuilderExt}, model::order::{OrderCreation, OrderKind}, number::units::EthUnit, + std::str::FromStr, + url::Url, }; #[tokio::test] @@ -86,26 +94,55 @@ async fn dual_autopilot_only_leader_produces_auctions(web3: Web3) { ); // Configure autopilot-leader only with test_solver - let autopilot_leader = services.start_autopilot_with_shutdown_controller(None, vec![ - format!("--drivers=test_solver|http://localhost:11088/test_solver|{}|requested-timeout-on-problems", - const_hex::encode(solver1.address())), - "--price-estimation-drivers=test_quoter|http://localhost:11088/test_solver".to_string(), - "--gas-estimators=http://localhost:11088/gasprice".to_string(), - "--metrics-address=0.0.0.0:9590".to_string(), - "--api-address=0.0.0.0:12088".to_string(), - "--enable-leader-lock=true".to_string(), - ], control).await; + let config_file_leader = Configuration { + drivers: vec![Solver::new( + "test_solver".to_string(), + Url::from_str("http://localhost:11088/test_solver").unwrap(), + Account::Address(solver1.address()), + )], + } + .to_temp_path(); + + let autopilot_leader = services + .start_autopilot_with_shutdown_controller( + None, + vec![ + format!("--config={}", config_file_leader.path().display()), + "--price-estimation-drivers=test_quoter|http://localhost:11088/test_solver" + .to_string(), + "--gas-estimators=http://localhost:11088/gasprice".to_string(), + "--metrics-address=0.0.0.0:9590".to_string(), + "--api-address=0.0.0.0:12088".to_string(), + "--enable-leader-lock=true".to_string(), + ], + control, + ) + .await; // Configure autopilot-backup only with test_solver2 - let _autopilot_follower = services.start_autopilot(None, vec![ - format!("--drivers=test_solver2|http://localhost:11088/test_solver2|{}|requested-timeout-on-problems", - const_hex::encode(solver2.address())), - "--price-estimation-drivers=test_quoter|http://localhost:11088/test_solver2".to_string(), - "--gas-estimators=http://localhost:11088/gasprice".to_string(), - "--metrics-address=0.0.0.0:9591".to_string(), - "--api-address=0.0.0.0:12089".to_string(), - "--enable-leader-lock=true".to_string(), - ]).await; + let config_file_follower = Configuration { + drivers: vec![Solver::new( + "test_solver2".to_string(), + Url::from_str("http://localhost:11088/test_solver2").unwrap(), + Account::Address(solver2.address()), + )], + } + .to_temp_path(); + + let _autopilot_follower = services + .start_autopilot( + None, + vec![ + format!("--config={}", config_file_follower.path().display()), + "--price-estimation-drivers=test_quoter|http://localhost:11088/test_solver2" + .to_string(), + "--gas-estimators=http://localhost:11088/gasprice".to_string(), + "--metrics-address=0.0.0.0:9591".to_string(), + "--api-address=0.0.0.0:12089".to_string(), + "--enable-leader-lock=true".to_string(), + ], + ) + .await; services .start_api(vec![ diff --git a/crates/e2e/tests/e2e/buffers.rs b/crates/e2e/tests/e2e/buffers.rs index e22e401ca7..66045b5d2b 100644 --- a/crates/e2e/tests/e2e/buffers.rs +++ b/crates/e2e/tests/e2e/buffers.rs @@ -1,5 +1,9 @@ use { ::alloy::primitives::U256, + autopilot::config::{ + Configuration, + solver::{Account, Solver}, + }, e2e::setup::*, ethrpc::alloy::CallBuilderExt, model::{ @@ -8,6 +12,8 @@ use { }, number::units::EthUnit, shared::web3::Web3, + std::str::FromStr, + url::Url, }; #[tokio::test] @@ -60,6 +66,15 @@ async fn onchain_settlement_without_liquidity(web3: Web3) { false, ); let services = Services::new(&onchain).await; + let config_file = Configuration { + drivers: vec![Solver::new( + "test_solver".to_string(), + Url::from_str("http://localhost:11088/test_solver").unwrap(), + Account::Address(solver.address()), + )], + } + .to_temp_path(); + services .start_autopilot( None, @@ -70,10 +85,7 @@ async fn onchain_settlement_without_liquidity(web3: Web3) { token_a = token_a.address(), token_b = token_b.address() ), - format!( - "--drivers=test_solver|http://localhost:11088/test_solver|{}", - const_hex::encode(solver.address()) - ), + format!("--config={}", config_file.path().display()), "--price-estimation-drivers=test_quoter|http://localhost:11088/test_solver" .to_string(), ], diff --git a/crates/e2e/tests/e2e/cow_amm.rs b/crates/e2e/tests/e2e/cow_amm.rs index 7faa156388..77987a81bf 100644 --- a/crates/e2e/tests/e2e/cow_amm.rs +++ b/crates/e2e/tests/e2e/cow_amm.rs @@ -6,6 +6,10 @@ use { ext::{AnvilApi, ImpersonateConfig}, }, }, + autopilot::config::{ + Configuration, + solver::{Account, Solver}, + }, contracts::alloy::{ ERC20, support::{Balances, Signatures}, @@ -38,7 +42,11 @@ use { SigningScheme, Solution, }, - std::collections::{HashMap, HashSet}, + std::{ + collections::{HashMap, HashSet}, + str::FromStr, + }, + url::Url, }; #[tokio::test] @@ -179,14 +187,21 @@ async fn cow_amm_jit(web3: Web3) { false, ); let services = Services::new(&onchain).await; + + let config_file = Configuration { + drivers: vec![Solver::new( + "mock_solver".to_string(), + Url::from_str("http://localhost:11088/mock_solver").unwrap(), + Account::Address(solver.address()), + )], + } + .to_temp_path(); + services .start_autopilot( None, vec![ - format!( - "--drivers=mock_solver|http://localhost:11088/mock_solver|{}", - const_hex::encode(solver.address()) - ), + format!("--config={}", config_file.path().display()), "--price-estimation-drivers=test_solver|http://localhost:11088/test_solver" .to_string(), ], @@ -551,11 +566,27 @@ factory = "0xf76c421bAb7df8548604E60deCCcE50477C10462" ); let services = Services::new(&onchain).await; + let config_file = Configuration { + drivers: vec![ + Solver::new( + "test_solver".to_string(), + Url::from_str("http://localhost:11088/test_solver").unwrap(), + Account::Address(solver.address()), + ), + Solver::new( + "mock_solver".to_string(), + Url::from_str("http://localhost:11088/mock_solver").unwrap(), + Account::Address(solver.address()), + ), + ], + } + .to_temp_path(); + services .start_autopilot( None, vec![ - format!("--drivers=test_solver|http://localhost:11088/test_solver|{},mock_solver|http://localhost:11088/mock_solver|{}", const_hex::encode(solver.address()), const_hex::encode(solver.address())), + format!("--config={}", config_file.path().display()), "--price-estimation-drivers=test_solver|http://localhost:11088/test_solver" .to_string(), // it uses an older helper contract that was deployed before the desired cow amm @@ -812,14 +843,21 @@ async fn cow_amm_opposite_direction(web3: Web3) { true, ); let services = Services::new(&onchain).await; + + let config_file = Configuration { + drivers: vec![Solver::new( + "mock_solver".to_string(), + Url::from_str("http://localhost:11088/mock_solver").unwrap(), + Account::Address(solver.address()), + )], + } + .to_temp_path(); + services .start_autopilot( None, vec![ - format!( - "--drivers=mock_solver|http://localhost:11088/mock_solver|{}", - const_hex::encode(solver.address()) - ), + format!("--config={}", config_file.path().display()), "--price-estimation-drivers=mock_solver|http://localhost:11088/mock_solver" .to_string(), ], diff --git a/crates/e2e/tests/e2e/jit_orders.rs b/crates/e2e/tests/e2e/jit_orders.rs index b93218458a..146bb701b5 100644 --- a/crates/e2e/tests/e2e/jit_orders.rs +++ b/crates/e2e/tests/e2e/jit_orders.rs @@ -1,5 +1,9 @@ use { ::alloy::primitives::U256, + autopilot::config::{ + Configuration, + solver::{Account, Solver}, + }, e2e::setup::{colocation::SolverEngine, mock::Mock, solution::JitOrder, *}, ethrpc::alloy::CallBuilderExt, model::{ @@ -9,7 +13,8 @@ use { number::units::EthUnit, shared::web3::Web3, solvers_dto::solution::{Asset, Solution}, - std::collections::HashMap, + std::{collections::HashMap, str::FromStr}, + url::Url, }; #[tokio::test] @@ -87,14 +92,21 @@ async fn single_limit_order_test(web3: Web3) { // We start the quoter as the baseline solver, and the mock solver as the one // returning the solution + + let config_file = Configuration { + drivers: vec![Solver::new( + "mock_solver".to_string(), + Url::from_str("http://localhost:11088/mock_solver").unwrap(), + Account::Address(solver.address()), + )], + } + .to_temp_path(); + services .start_autopilot( None, vec![ - format!( - "--drivers=mock_solver|http://localhost:11088/mock_solver|{}", - const_hex::encode(solver.address()) - ), + format!("--config={}", config_file.path().display()), "--price-estimation-drivers=test_solver|http://localhost:11088/test_solver" .to_string(), ], diff --git a/crates/e2e/tests/e2e/limit_orders.rs b/crates/e2e/tests/e2e/limit_orders.rs index 9777bb14b3..f847366195 100644 --- a/crates/e2e/tests/e2e/limit_orders.rs +++ b/crates/e2e/tests/e2e/limit_orders.rs @@ -4,6 +4,10 @@ use { primitives::{Address, U256, address}, providers::ext::{AnvilApi, ImpersonateConfig}, }, + autopilot::config::{ + Configuration, + solver::{Account, Solver}, + }, bigdecimal::BigDecimal, contracts::alloy::ERC20, database::byte_array::ByteArray, @@ -18,7 +22,8 @@ use { }, number::{conversions::big_decimal_to_big_uint, units::EthUnit}, shared::web3::Web3, - std::{collections::HashMap, ops::DerefMut}, + std::{collections::HashMap, ops::DerefMut, str::FromStr}, + url::Url, }; #[tokio::test] @@ -482,15 +487,33 @@ async fn two_limit_orders_multiple_winners_test(web3: Web3) { let uid_b = services.create_order(&order_b).await.unwrap(); // Start autopilot only once all the orders are created. - services.start_autopilot( - None, - vec![ - format!("--drivers=solver1|http://localhost:11088/test_solver|{}|10000000000000000,solver2|http://localhost:11088/solver2|{}", - const_hex::encode(solver_a.address()), const_hex::encode(solver_b.address())), - "--price-estimation-drivers=solver1|http://localhost:11088/test_solver".to_string(), - "--max-winners-per-auction=2".to_string(), + + let config_file = Configuration { + drivers: vec![ + Solver::new( + "solver1".to_string(), + Url::from_str("http://localhost:11088/test_solver").unwrap(), + Account::Address(solver_a.address()), + ), + Solver::new( + "solver2".to_string(), + Url::from_str("http://localhost:11088/solver2").unwrap(), + Account::Address(solver_b.address()), + ), ], - ).await; + } + .to_temp_path(); + + services + .start_autopilot( + None, + vec![ + format!("--config={}", config_file.path().display()), + "--price-estimation-drivers=solver1|http://localhost:11088/test_solver".to_string(), + "--max-winners-per-auction=2".to_string(), + ], + ) + .await; // Wait for trade let indexed_trades = || async { @@ -651,14 +674,21 @@ async fn too_many_limit_orders_test(web3: Web3) { colocation::LiquidityProvider::UniswapV2, false, ); + + let config_file = Configuration { + drivers: vec![Solver::new( + "test_solver".to_string(), + Url::from_str("http://localhost:11088/test_solver").unwrap(), + Account::Address(solver_address), + )], + } + .to_temp_path(); + services .start_autopilot( None, vec![ - format!( - "--drivers=test_solver|http://localhost:11088/test_solver|{}", - const_hex::encode(solver_address) - ), + format!("--config={}", config_file.path().display()), "--price-estimation-drivers=test_quoter|http://localhost:11088/test_solver" .to_string(), ], @@ -747,14 +777,21 @@ async fn limit_does_not_apply_to_in_market_orders_test(web3: Web3) { colocation::LiquidityProvider::UniswapV2, false, ); + + let config_file = Configuration { + drivers: vec![Solver::new( + "test_solver".to_string(), + Url::from_str("http://localhost:11088/test_solver").unwrap(), + Account::Address(solver_address), + )], + } + .to_temp_path(); + services .start_autopilot( None, vec![ - format!( - "--drivers=test_solver|http://localhost:11088/test_solver|{}", - const_hex::encode(solver_address) - ), + format!("--config={}", config_file.path().display()), "--price-estimation-drivers=test_quoter|http://localhost:11088/test_solver" .to_string(), ], diff --git a/crates/e2e/tests/e2e/liquidity.rs b/crates/e2e/tests/e2e/liquidity.rs index 5e13e3baf2..c7717377e4 100644 --- a/crates/e2e/tests/e2e/liquidity.rs +++ b/crates/e2e/tests/e2e/liquidity.rs @@ -6,6 +6,10 @@ use { ext::{AnvilApi, ImpersonateConfig}, }, }, + autopilot::config::{ + Configuration, + solver::{Account, Solver}, + }, chrono::{NaiveDateTime, Utc}, contracts::alloy::{ERC20, IZeroex}, e2e::{ @@ -27,6 +31,8 @@ use { signature::EcdsaSigningScheme, }, number::units::EthUnit, + std::str::FromStr, + url::Url, }; /// The block number from which we will fetch state for the forked tests. @@ -189,16 +195,22 @@ async fn zero_ex_liquidity(web3: Web3) { }, false, ); + let config_file = Configuration { + drivers: vec![Solver::new( + "test_solver".to_string(), + Url::from_str("http://localhost:11088/test_solver").unwrap(), + Account::Address(solver.address()), + )], + } + .to_temp_path(); + services .start_autopilot( None, vec![ "--price-estimation-drivers=test_quoter|http://localhost:11088/test_solver" .to_string(), - format!( - "--drivers=test_solver|http://localhost:11088/test_solver|{}", - const_hex::encode(solver.address()) - ), + format!("--config={}", config_file.path().display()), ], ) .await; diff --git a/crates/e2e/tests/e2e/liquidity_source_notification.rs b/crates/e2e/tests/e2e/liquidity_source_notification.rs index 8a2cb42b1f..5e1e465904 100644 --- a/crates/e2e/tests/e2e/liquidity_source_notification.rs +++ b/crates/e2e/tests/e2e/liquidity_source_notification.rs @@ -4,6 +4,10 @@ use { providers::ext::{AnvilApi, ImpersonateConfig}, signers::SignerSync, }, + autopilot::config::{ + Configuration, + solver::{Account, Solver}, + }, chrono::Utc, contracts::alloy::{ERC20, LiquoriceSettlement}, driver::infra, @@ -26,7 +30,8 @@ use { }, number::units::EthUnit, solvers_dto::solution::Solution, - std::collections::HashMap, + std::{collections::HashMap, str::FromStr}, + url::Url, }; /// The block number from which we will fetch state for the forked tests. @@ -199,16 +204,22 @@ http-timeout = "10s" liquorice_api.port )), ); + let config_file = Configuration { + drivers: vec![Solver::new( + "liquorice_solver".to_string(), + Url::from_str("http://localhost:11088/liquorice_solver").unwrap(), + Account::Address(solver.address()), + )], + } + .to_temp_path(); + services .start_autopilot( None, vec![ "--price-estimation-drivers=test_quoter|http://localhost:11088/test_solver" .to_string(), - format!( - "--drivers=liquorice_solver|http://localhost:11088/liquorice_solver|{}", - const_hex::encode(solver.address()) - ), + format!("--config={}", config_file.path().display()), ], ) .await; diff --git a/crates/e2e/tests/e2e/order_cancellation.rs b/crates/e2e/tests/e2e/order_cancellation.rs index a2dd9a5cdf..583ff81eed 100644 --- a/crates/e2e/tests/e2e/order_cancellation.rs +++ b/crates/e2e/tests/e2e/order_cancellation.rs @@ -1,5 +1,6 @@ use { ::alloy::primitives::U256, + autopilot::config::Configuration, database::order_events::OrderEventLabel, e2e::setup::*, ethrpc::alloy::CallBuilderExt, @@ -65,10 +66,12 @@ async fn order_cancellation(web3: Web3) { colocation::LiquidityProvider::UniswapV2, false, ); + let config_file = Configuration::default().to_temp_path(); services .start_autopilot( None, vec![ + format!("--config={}", config_file.path().display()), "--price-estimation-drivers=test_quoter|http://localhost:11088/test_solver" .to_string(), ], diff --git a/crates/e2e/tests/e2e/solver_competition.rs b/crates/e2e/tests/e2e/solver_competition.rs index 1b03cea819..0eb4951290 100644 --- a/crates/e2e/tests/e2e/solver_competition.rs +++ b/crates/e2e/tests/e2e/solver_competition.rs @@ -1,5 +1,9 @@ use { - ::alloy::primitives::U256, + ::alloy::primitives::{U256, address}, + autopilot::config::{ + Configuration, + solver::{Account, Solver}, + }, e2e::setup::{colocation::SolverEngine, mock::Mock, *}, ethrpc::alloy::CallBuilderExt, model::{ @@ -9,7 +13,8 @@ use { number::units::EthUnit, shared::web3::Web3, solvers_dto::solution::Solution, - std::collections::HashMap, + std::{collections::HashMap, str::FromStr}, + url::Url, }; #[tokio::test] @@ -80,11 +85,27 @@ async fn solver_competition(web3: Web3) { ); let services = Services::new(&onchain).await; + + let config_file = Configuration { + drivers: vec![ + Solver::new( + "test_solver".to_string(), + Url::from_str("http://localhost:11088/test_solver").unwrap(), + Account::Address(solver.address()), + ), + Solver::new( + "solver2".to_string(), + Url::from_str("http://localhost:11088/solver2").unwrap(), + Account::Address(solver.address()), + ), + ], + } + .to_temp_path(); + services.start_autopilot( None, vec![ - format!("--drivers=test_solver|http://localhost:11088/test_solver|{},solver2|http://localhost:11088/solver2|{}", const_hex::encode(solver.address()), const_hex::encode(solver.address()) - ), + format!("--config={}", config_file.path().display()), "--price-estimation-drivers=test_quoter|http://localhost:11088/test_solver,solver2|http://localhost:11088/solver2".to_string(), ], ).await; @@ -220,14 +241,34 @@ async fn wrong_solution_submission_address(web3: Web3) { ); let services = Services::new(&onchain).await; - services.start_autopilot( - None, - // Solver 1 has a wrong submission address, meaning that the solutions should be discarded from solver1 - vec![ - format!("--drivers=solver1|http://localhost:11088/test_solver|0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2,solver2|http://localhost:11088/solver2|{}", const_hex::encode(solver.address())), - "--price-estimation-drivers=solver1|http://localhost:11088/test_solver".to_string(), + + // Solver 1 has a wrong submission address, meaning that the solutions should be + // discarded from solver1 + let config_file = Configuration { + drivers: vec![ + Solver::new( + "solver1".to_string(), + Url::from_str("http://localhost:11088/test_solver").unwrap(), + Account::Address(address!("C02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2")), + ), + Solver::new( + "solver2".to_string(), + Url::from_str("http://localhost:11088/solver2").unwrap(), + Account::Address(solver.address()), + ), ], - ).await; + } + .to_temp_path(); + + services + .start_autopilot( + None, + vec![ + format!("--config={}", config_file.path().display()), + "--price-estimation-drivers=solver1|http://localhost:11088/test_solver".to_string(), + ], + ) + .await; services .start_api(vec![ "--price-estimation-drivers=solver1|http://localhost:11088/test_solver".to_string(), @@ -366,15 +407,28 @@ async fn store_filtered_solutions(web3: Web3) { // We start the quoter as the baseline solver, and the mock solver as the one // returning the solution + + let config_file = Configuration { + drivers: vec![ + Solver::new( + "good_solver".to_string(), + Url::from_str("http://localhost:11088/good_solver").unwrap(), + Account::Address(good_solver_account.address()), + ), + Solver::new( + "bad_solver".to_string(), + Url::from_str("http://localhost:11088/bad_solver").unwrap(), + Account::Address(bad_solver_account.address()), + ), + ], + } + .to_temp_path(); + services .start_autopilot( None, vec![ - format!( - "--drivers=good_solver|http://localhost:11088/good_solver|{},bad_solver|http://localhost:11088/bad_solver|{}", - const_hex::encode(good_solver_account.address()), - const_hex::encode(bad_solver_account.address()), - ), + format!("--config={}", config_file.path().display()), "--price-estimation-drivers=test_solver|http://localhost:11088/test_solver" .to_string(), "--max-winners-per-auction=10".to_string(), From 52f7d7f6621c550955106829123a359bf24cdeef Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Tue, 17 Feb 2026 11:37:51 +0100 Subject: [PATCH 068/219] autopilot: send `/solve` requests with ref-counted body (#4159) # Description Currently the way the autopilot serializes `/solve` requests is still sub optimal. While it does indeed only serialize the `/solve` requests once it still allocates memory for every HTTP request it ultimately sends. Since `reqwest` also supports `Bytes` as the request body which are already references counted we can save a bunch of allocating and copying data by serializing the auction data into a `Bytes` instance and cloning that into the individual `/solve` HTTP requests. # Changes - changes internal members of `solve::Request` from `serde_json::RawValue` to `Bytes` - adds a new `X-Auction-Id` header which is important for another optimization but that has to be merged before said optimization - moved original serialization from the executor thread to a background task that's intended for blocking operations since serialization takes quite a bit of time ## How to test existing e2e tests --- Cargo.lock | 15 +++++ Cargo.toml | 1 + crates/autopilot/Cargo.toml | 3 +- .../autopilot/src/infra/solvers/dto/solve.rs | 57 ++++++++++++++----- crates/autopilot/src/infra/solvers/mod.rs | 45 +++++++++------ crates/autopilot/src/run_loop.rs | 3 +- crates/autopilot/src/shadow.rs | 3 +- 7 files changed, 95 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2700c81635..7e6e51088b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1164,6 +1164,7 @@ dependencies = [ "async-trait", "axum", "bigdecimal", + "bytes", "bytes-hex", "chain", "chrono", @@ -5462,6 +5463,7 @@ dependencies = [ "url", "wasm-bindgen", "wasm-bindgen-futures", + "wasm-streams", "web-sys", "winreg", ] @@ -7616,6 +7618,19 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "wasm-streams" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" +dependencies = [ + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "wasmtimer" version = "0.4.3" diff --git a/Cargo.toml b/Cargo.toml index 61baa62db7..84bcbc9b5a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -60,6 +60,7 @@ autopilot = { path = "crates/autopilot" } aws-config = "1.5.1" aws-sdk-s3 = { version = "1.34.0", default-features = false } bytes-hex = { path = "crates/bytes-hex" } +bytes = "1.11.1" chain = { path = "crates/chain" } console-subscriber = "0.3.0" const_format = "0.2.32" diff --git a/crates/autopilot/Cargo.toml b/crates/autopilot/Cargo.toml index 727cd5ef4b..97750f6d16 100644 --- a/crates/autopilot/Cargo.toml +++ b/crates/autopilot/Cargo.toml @@ -19,6 +19,7 @@ alloy = { workspace = true, features = ["rand", "provider-debug-api", "provider- app-data = { workspace = true } axum = { workspace = true } bytes-hex = { workspace = true } # may get marked as unused but it's used with serde +bytes = { workspace = true } anyhow = { workspace = true } async-trait = { workspace = true } bigdecimal = { workspace = true } @@ -47,7 +48,7 @@ order-validation = { workspace = true } prometheus = { workspace = true } prometheus-metric-storage = { workspace = true } rand = { workspace = true } -reqwest = { workspace = true, features = ["gzip", "json"] } +reqwest = { workspace = true, features = ["gzip", "json", "stream"] } rust_decimal = { workspace = true } s3 = { workspace = true } serde = { workspace = true } diff --git a/crates/autopilot/src/infra/solvers/dto/solve.rs b/crates/autopilot/src/infra/solvers/dto/solve.rs index a66b065fa8..ed6aeb9d9f 100644 --- a/crates/autopilot/src/infra/solvers/dto/solve.rs +++ b/crates/autopilot/src/infra/solvers/dto/solve.rs @@ -2,17 +2,22 @@ use { crate::{ boundary, domain::{self, eth}, - infra::persistence::dto::{self, order::Order}, + infra::{ + persistence::dto::{self, order::Order}, + solvers::InjectIntoHttpRequest, + }, }, alloy::primitives::{Address, U256}, + bytes::Bytes, chrono::{DateTime, Utc}, itertools::Itertools, number::serialization::HexOrDecimalU256, + reqwest::RequestBuilder, serde::{Deserialize, Serialize}, serde_with::{DisplayFromStr, serde_as}, std::{ + borrow::Cow, collections::{HashMap, HashSet}, - sync::Arc, time::Duration, }, }; @@ -21,17 +26,14 @@ use { /// request. The purpose of this is to make it ergonomic /// to serialize a request once and reuse the resulting /// string in multiple HTTP requests. -#[derive(Clone, Debug, Serialize, derive_more::Display)] -pub struct Request(Arc); - -impl Request { - pub fn as_str(&self) -> &str { - self.0.get() - } +#[derive(Clone, Debug)] +pub struct Request { + auction_id: i64, + body: bytes::Bytes, } impl Request { - pub fn new( + pub async fn new( auction: &domain::Auction, trusted_tokens: &HashSet
, time_limit: Duration, @@ -64,9 +66,38 @@ impl Request { deadline: Utc::now() + chrono::Duration::from_std(time_limit).unwrap(), surplus_capturing_jit_order_owners: auction.surplus_capturing_jit_order_owners.to_vec(), }; - Self(Arc::from(serde_json::value::to_raw_value(&helper).expect( - "only fails with non-string keys which we do not have", - ))) + let auction_id = auction.id; + + let body = tokio::task::spawn_blocking(move || { + let serialized = serde_json::to_vec(&helper).expect("type should be JSON serializable"); + Bytes::from(serialized) + }) + .await + .expect("inner task should not panic as serialization should work for the given type"); + + Self { body, auction_id } + } +} + +impl InjectIntoHttpRequest for Request { + fn inject(&self, request: RequestBuilder) -> RequestBuilder { + request + .body(self.body.clone()) + // announce which auction this request is for in the + // headers to help the driver detect duplicated + // `/solve` requests before streaming the body + .header("X-Auction-Id", self.auction_id) + // manually set the content type header for JSON since + // we can't use `request.json(self)` + .header( + hyper::header::CONTENT_TYPE, + hyper::header::HeaderValue::from_static("application/json") + ) + } + + fn body_to_string(&self) -> Cow<'_, str> { + let string = str::from_utf8(self.body.as_ref()).unwrap(); + Cow::Borrowed(string) } } diff --git a/crates/autopilot/src/infra/solvers/mod.rs b/crates/autopilot/src/infra/solvers/mod.rs index a376bda7a9..c65155341f 100644 --- a/crates/autopilot/src/infra/solvers/mod.rs +++ b/crates/autopilot/src/infra/solvers/mod.rs @@ -4,8 +4,8 @@ use { alloy::signers::{Signer, aws::AwsSigner}, anyhow::{Context, Result, anyhow}, observe::tracing::tracing_headers, - reqwest::{Client, StatusCode}, - std::time::Duration, + reqwest::{Client, RequestBuilder, StatusCode}, + std::{borrow::Cow, time::Duration}, thiserror::Error, tracing::instrument, url::Url, @@ -110,28 +110,22 @@ impl Driver { async fn request_response( &self, path: &str, - request: Request, + payload: Request, ) -> Result where Response: serde::de::DeserializeOwned, - Request: serde::Serialize + Send + Sync + 'static, + Request: InjectIntoHttpRequest, { let url = util::join(&self.url, path); + tracing::trace!( - path=&url.path(), - body=%serde_json::to_string_pretty(&request).unwrap(), + path = &url.path(), + body = %payload.body_to_string(), "solver request", ); - let mut request = { - let builder = self.client.post(url.clone()).headers(tracing_headers()); - // If the payload is very big then serializing it will block the - // executor a long time (mostly relevant for solve requests). - // That's why we always do it on a thread specifically for - // running blocking tasks. - tokio::task::spawn_blocking(move || builder.json(&request)) - .await - .context("failed to build request")? - }; + + let request = self.client.post(url.clone()).headers(tracing_headers()); + let mut request = payload.inject(request); if let Some(request_id) = observe::distributed_tracing::request_id::from_current_span() { request = request.header("X-REQUEST-ID", request_id); @@ -169,3 +163,22 @@ pub async fn response_body_with_size_limit( } Ok(bytes) } + +trait InjectIntoHttpRequest { + fn inject(&self, request: RequestBuilder) -> RequestBuilder; + fn body_to_string(&self) -> Cow<'_, str>; +} + +impl InjectIntoHttpRequest for T +where + T: serde::ser::Serialize + Sized, +{ + fn inject(&self, request: RequestBuilder) -> RequestBuilder { + request.json(&self) + } + + fn body_to_string(&self) -> Cow<'_, str> { + let serialized = serde_json::to_string(&self).expect("type should be JSON serializable"); + Cow::Owned(serialized) + } +} diff --git a/crates/autopilot/src/run_loop.rs b/crates/autopilot/src/run_loop.rs index 7e36d04e68..1b037f3ed3 100644 --- a/crates/autopilot/src/run_loop.rs +++ b/crates/autopilot/src/run_loop.rs @@ -561,7 +561,8 @@ impl RunLoop { auction, &self.trusted_tokens.all(), self.config.solve_deadline, - ); + ) + .await; let mut bids = futures::future::join_all( self.drivers diff --git a/crates/autopilot/src/shadow.rs b/crates/autopilot/src/shadow.rs index 5c28857a7f..02e5de879f 100644 --- a/crates/autopilot/src/shadow.rs +++ b/crates/autopilot/src/shadow.rs @@ -179,7 +179,8 @@ impl RunLoop { /// Runs the solver competition, making all configured drivers participate. #[instrument(skip_all)] async fn competition(&self, auction: &domain::Auction) -> Vec> { - let request = solve::Request::new(auction, &self.trusted_tokens.all(), self.solve_deadline); + let request = + solve::Request::new(auction, &self.trusted_tokens.all(), self.solve_deadline).await; futures::future::join_all( self.drivers From 8f3b76cc6ec460b30d79e5923f4295256873ae32 Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Tue, 17 Feb 2026 12:05:02 +0100 Subject: [PATCH 069/219] Only stream 1 `/solve` request body in driver (#4160) # Description Because the driver serves multiple solvers it receives a bunch of duplicated `/solve` requests. There is already logic to deduplicate the pre-processing but we there is still one part left that's done unnecessarily often: streaming the HTTP body. Streaming the http body currently takes up to 700ms which is surprisingly slow considering that the HTTP request goes from one k8s pod to another and not via the public internet. I suspect the problem is that we are actually streaming ~10MB `/solve` requests 23 times in parallel (numbers from mainnet). https://github.com/cowprotocol/services/pull/4159 introduced a new header (`X-Auction-Id`) that can be used to detect which auction a request is related to without having to stream the entire body. With this change everything but prioritizing (i.e. sorting and allocating balances for orders) and the serialization of the driver's `/solve` request will be de-duplicated. That means adding more solvers to the driver will be less costly. If we consider enforcing the same prioritization logic for ALL solvers that could also be de-duplicated leading to more or less 0 overhead for adding more solvers to the same driver. # Changes - inspect `X-Auction-Id` header to figure out whether we have to process the request or just await an existing pre-processing task Note that this change must be released AFTER https://github.com/cowprotocol/services/pull/4159`. The reason is that k8s first rolls out `driver` pods so there would be a period where the old `autopilot` is still sending requests without the `X-Auction-Id` header. ## How to test e2e tests --------- Co-authored-by: ilya --- crates/driver/src/domain/competition/mod.rs | 7 +- .../src/domain/competition/pre_processing.rs | 70 ++++++++++++++----- .../driver/src/infra/api/routes/solve/mod.rs | 57 ++------------- crates/driver/src/tests/setup/mod.rs | 1 + 4 files changed, 60 insertions(+), 75 deletions(-) diff --git a/crates/driver/src/domain/competition/mod.rs b/crates/driver/src/domain/competition/mod.rs index afe65a25d3..f2761bbf32 100644 --- a/crates/driver/src/domain/competition/mod.rs +++ b/crates/driver/src/domain/competition/mod.rs @@ -22,8 +22,9 @@ use { util::math, }, alloy::primitives::Bytes, + axum::body::Body, futures::{StreamExt, future::Either, stream::FuturesUnordered}, - hyper::body::Bytes as RequestBytes, + hyper::Request, itertools::Itertools, std::{ cmp::Reverse, @@ -114,14 +115,14 @@ impl Competition { } /// Solve an auction as part of this competition. - pub async fn solve(&self, auction: RequestBytes) -> Result, Error> { + pub async fn solve(&self, request: Request) -> Result, Error> { let start = Instant::now(); let timer = ::observe::metrics::metrics() .on_auction_overhead_start("driver", "pre_processing_total"); let tasks = self .fetcher - .start_or_get_tasks_for_auction(auction) + .start_or_get_tasks_for_auction(request) .await .map_err(|err| { tracing::error!(?err, "pre-processing auction failed"); diff --git a/crates/driver/src/domain/competition/pre_processing.rs b/crates/driver/src/domain/competition/pre_processing.rs index ce18e013ee..ccd17ad8cc 100644 --- a/crates/driver/src/domain/competition/pre_processing.rs +++ b/crates/driver/src/domain/competition/pre_processing.rs @@ -11,9 +11,10 @@ use { }, alloy::primitives::{Bytes, FixedBytes}, anyhow::{Context, Result}, + axum::body::Body, chrono::Utc, futures::{FutureExt, StreamExt, future::BoxFuture, stream::FuturesUnordered}, - hyper::body::Bytes as RequestBytes, + hyper::{Request, body::Bytes as RequestBytes}, itertools::Itertools, model::{ interaction::InteractionData, @@ -25,9 +26,14 @@ use { price_estimation::trade_verifier::balance_overrides::BalanceOverrideRequest, signature_validator::SignatureValidating, }, - std::{collections::HashMap, future::Future, sync::Arc, time::Duration}, + std::{ + collections::HashMap, + future::Future, + sync::Arc, + time::{Duration, Instant}, + }, tokio::sync::Mutex, - tracing::Instrument, + tracing::{Instrument, instrument}, }; type Shared = futures::future::Shared>; @@ -74,7 +80,7 @@ impl std::fmt::Debug for Utilities { #[derive(Debug)] struct ControlBlock { /// Auction for which the data aggregation task was spawned. - solve_request: RequestBytes, + auction_id: i64, /// Data aggregation task. tasks: DataFetchingTasks, } @@ -91,26 +97,34 @@ impl DataAggregator { /// only once for all connected solvers to share. pub async fn start_or_get_tasks_for_auction( &self, - request: RequestBytes, + request: Request, ) -> Result { let mut lock = self.control.lock().await; - let current_auction = &lock.solve_request; - - // The autopilot ensures that all drivers receive identical - // requests per auction. That means we can use the significantly - // cheaper string comparison instead of parsing the JSON to compare - // the auction ids. - if request == current_auction { - let id = lock.tasks.auction.clone().await.id; - init_auction_id_in_span(id.map(|i| i.0)); + let current_auction = &lock.auction_id; + + // Figure out for which auction this `/solve` request was issued + // by looking at the `X-Auction-Id` header. + let request_auction_id: i64 = request + .headers() + .get("X-Auction-Id") + .context("request has no X-Auction-Id header")? + .to_str() + .context("X-Auction-Id header is not ASCII")? + .parse() + .context("could not parse X-Auction-Id header as i64")?; + + // Some other driver is already doing the pre-processing for this + // auction. Stop processing here and just await the existing task. + if request_auction_id == *current_auction { + init_auction_id_in_span(Some(request_auction_id)); tracing::debug!("await running data aggregation task"); return Ok(lock.tasks.clone()); } - let tasks = self.assemble_tasks(request.clone()).await?; + let tasks = self.assemble_tasks(request).await?; tracing::debug!("started new data aggregation task"); - lock.solve_request = request; + lock.auction_id = request_auction_id; lock.tasks = tasks.clone(); Ok(tasks) @@ -153,7 +167,7 @@ impl DataAggregator { cow_amm_cache, }), control: Mutex::new(ControlBlock { - solve_request: Default::default(), + auction_id: Default::default(), tasks: DataFetchingTasks { auction: futures::future::pending().boxed().shared(), balances: futures::future::pending().boxed().shared(), @@ -165,7 +179,7 @@ impl DataAggregator { } } - async fn assemble_tasks(&self, request: RequestBytes) -> Result { + async fn assemble_tasks(&self, request: Request) -> Result { let auction = self.utilities.parse_request(request).await?; let balances = @@ -212,7 +226,9 @@ impl Utilities { /// Parses the JSON body of the `/solve` request during the unified /// auction pre-processing since eagerly deserializing these requests /// is surprisingly costly because their are so big. - async fn parse_request(&self, solve_request: RequestBytes) -> Result> { + async fn parse_request(&self, solve_request: Request) -> Result> { + let solve_request = collect_request_body(solve_request).await?; + let auction_dto: SolveRequest = { let _timer = metrics::get().processing_stage_timer("parse_dto"); let _timer2 = @@ -539,3 +555,19 @@ fn init_auction_id_in_span(id: Option) { debug_assert!(current_span.has_field("auction_id")); current_span.record("auction_id", id); } + +#[instrument(skip_all)] +async fn collect_request_body(request: Request) -> Result { + tracing::trace!("start streaming request body"); + let _timer = + observe::metrics::metrics().on_auction_overhead_start("driver", "stream_http_body"); + let start = Instant::now(); + + let body_bytes = hyper::body::to_bytes(request.into_body()) + .await + .context("failed to stream request body")?; + + let duration = start.elapsed(); + tracing::debug!(?duration, "finished streaming request body"); + Ok(body_bytes) +} diff --git a/crates/driver/src/infra/api/routes/solve/mod.rs b/crates/driver/src/infra/api/routes/solve/mod.rs index b378dae435..417ddd8b5e 100644 --- a/crates/driver/src/infra/api/routes/solve/mod.rs +++ b/crates/driver/src/infra/api/routes/solve/mod.rs @@ -2,16 +2,11 @@ pub mod dto; pub use dto::AuctionError; use { - crate::{ - domain::competition, - infra::{ - api::{Error, State}, - observe, - }, + crate::infra::{ + api::{Error, State}, + observe, }, axum::{body::Body, http::Request}, - hyper::body::Bytes, - std::time::{Duration, Instant}, tracing::Instrument, }; @@ -29,9 +24,8 @@ async fn route( let solver = state.solver().name().as_str(); let handle_request = async { - let body_bytes = collect_request_body(request, solver).await?; let competition = state.competition(); - let result = competition.solve(body_bytes).await; + let result = competition.solve(request).await; // Solving takes some time, so there is a chance for the settlement queue to // have capacity again. competition.ensure_settle_queue_capacity()?; @@ -50,46 +44,3 @@ async fn route( )) .await } - -async fn collect_request_body( - request: Request, - solver: &str, -) -> Result { - tracing::trace!("start streaming request body"); - let start = Instant::now(); - - let body_bytes = hyper::body::to_bytes(request.into_body()) - .await - .map_err(|err| { - tracing::warn!(?err, "failed to stream request body"); - competition::Error::MalformedRequest - })?; - - let duration = start.elapsed(); - Metrics::measure_solve_transfer_time(solver, duration); - tracing::trace!(?duration, "finished streaming request body"); - Ok(body_bytes) -} - -#[derive(prometheus_metric_storage::MetricStorage)] -struct Metrics { - /// Time spent by the driver reading the full solve request body into - /// memory. - #[metric(labels("solver"))] - #[metric(buckets(0.0001, 0.0005, 0.002, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.75, 1, 1.5))] - solve_request_body_read_duration_seconds: prometheus::HistogramVec, -} - -impl Metrics { - fn get() -> &'static Metrics { - Metrics::instance(::observe::metrics::get_storage_registry()) - .expect("unexpected error getting metrics instance") - } - - fn measure_solve_transfer_time(solver: &str, time: Duration) { - Self::get() - .solve_request_body_read_duration_seconds - .with_label_values(&[solver]) - .observe(time.as_secs_f64()); - } -} diff --git a/crates/driver/src/tests/setup/mod.rs b/crates/driver/src/tests/setup/mod.rs index 5d1af8abde..65b5424475 100644 --- a/crates/driver/src/tests/setup/mod.rs +++ b/crates/driver/src/tests/setup/mod.rs @@ -1059,6 +1059,7 @@ impl Test { let res = self .client .post(format!("http://{}/{}/solve", self.driver.addr, solver)) + .header("X-Auction-Id", self.auction_id) .json(&driver::solve_req(self)) .send() .await From 889bfcef0dbe8cf24deec4995efb17d9a0104ab5 Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Tue, 17 Feb 2026 14:09:46 +0100 Subject: [PATCH 070/219] Add more tracing spans (#4162) # Description This PR just adds more tracing spans to further improve our coverage with tempo. Mostly focuses on the autopilot: building new auction, indexing events, winner selection --- .../domain/competition/winner_selection.rs | 2 + crates/autopilot/src/infra/persistence/mod.rs | 4 +- crates/autopilot/src/maintenance.rs | 3 +- crates/autopilot/src/run_loop.rs | 8 ++-- crates/autopilot/src/solvable_orders.rs | 38 ++++++++++++------- crates/cow-amm/src/registry.rs | 1 + crates/shared/src/event_handling.rs | 6 ++- crates/winner-selection/src/arbitrator.rs | 4 ++ 8 files changed, 46 insertions(+), 20 deletions(-) diff --git a/crates/autopilot/src/domain/competition/winner_selection.rs b/crates/autopilot/src/domain/competition/winner_selection.rs index 33f6346050..911cb63350 100644 --- a/crates/autopilot/src/domain/competition/winner_selection.rs +++ b/crates/autopilot/src/domain/competition/winner_selection.rs @@ -34,6 +34,7 @@ use { }, ::winner_selection::state::{HasState, RankedItem, ScoredItem, UnscoredItem}, std::collections::HashMap, + tracing::instrument, winner_selection::{self as winsel}, }; @@ -56,6 +57,7 @@ impl Arbitrator { } /// Runs the entire auction mechanism on the passed in solutions. + #[instrument(skip_all)] pub fn arbitrate(&self, bids: Vec>, auction: &domain::Auction) -> Ranking { let context = auction.into(); let mut bid_by_key = HashMap::with_capacity(bids.len()); diff --git a/crates/autopilot/src/infra/persistence/mod.rs b/crates/autopilot/src/infra/persistence/mod.rs index bf5f610a9d..879967cc4a 100644 --- a/crates/autopilot/src/infra/persistence/mod.rs +++ b/crates/autopilot/src/infra/persistence/mod.rs @@ -38,7 +38,7 @@ use { time::Duration, }, tokio::sync::mpsc, - tracing::Instrument, + tracing::{Instrument, instrument}, }; pub mod cli; @@ -301,6 +301,7 @@ impl Persistence { /// A variants of [`store_order_events`] where [`items`] is already an owned /// collection which allows us to move the logic to convert an item to a /// [`domain::OrderUid`] into the background task as well. + #[instrument(skip_all)] pub fn store_order_events_owned( &self, items: I, @@ -1005,6 +1006,7 @@ impl Persistence { /// Fetches orders which are currently inflight. Those orders should /// be omitted from the current auction to avoid onchain reverts. + #[instrument(skip_all)] pub async fn fetch_in_flight_orders( &self, current_block: u64, diff --git a/crates/autopilot/src/maintenance.rs b/crates/autopilot/src/maintenance.rs index f17e66abf6..fc7f48e2c8 100644 --- a/crates/autopilot/src/maintenance.rs +++ b/crates/autopilot/src/maintenance.rs @@ -29,7 +29,7 @@ use { }, tokio::sync::watch, tokio_stream::wrappers::WatchStream, - tracing::Instrument, + tracing::{Instrument, instrument}, }; /// Component to sync with the maintenance logic that runs in a background task. @@ -56,6 +56,7 @@ pub enum SyncTarget { } impl MaintenanceSync { + #[instrument(skip_all)] pub async fn wait_until_block_processed(&self, target: SyncTarget) { let _timer = observe::metrics::metrics() .on_auction_overhead_start("autopilot", "wait_for_maintenance"); diff --git a/crates/autopilot/src/run_loop.rs b/crates/autopilot/src/run_loop.rs index 1b037f3ed3..47fefb0f82 100644 --- a/crates/autopilot/src/run_loop.rs +++ b/crates/autopilot/src/run_loop.rs @@ -192,6 +192,7 @@ impl RunLoop { }); } + #[instrument(skip_all)] async fn update_caches(&self, prev_block: &mut Option, is_leader: bool) -> BlockInfo { let current_block = *self.eth.current_block().borrow(); let time_since_last_block = current_block.observed_at.elapsed(); @@ -365,8 +366,7 @@ impl RunLoop { solution, solution_uid, block_deadline, - ) - .await; + ); } tracing::trace!(auction_id = ?auction.id, "settlement execution started"); observe::unsettled(&ranking, &auction); @@ -374,7 +374,7 @@ impl RunLoop { /// Starts settlement execution in a background task. The function is async /// only to get access to the locks. - async fn start_settlement_execution( + fn start_settlement_execution( self: &Arc, auction_id: Id, single_run_start: Instant, @@ -692,6 +692,7 @@ impl RunLoop { /// Execute the solver's solution. Returns Ok when the corresponding /// transaction has been mined. + #[instrument(skip_all, fields(driver = driver.name, solution_uid))] async fn settle( &self, driver: &infra::Driver, @@ -867,6 +868,7 @@ impl RunLoop { /// Removes orders that are currently being settled to avoid solver /// solutions conflicting with each other. + #[instrument(skip_all)] async fn remove_in_flight_orders( &self, mut auction: domain::RawAuctionData, diff --git a/crates/autopilot/src/solvable_orders.rs b/crates/autopilot/src/solvable_orders.rs index 92cc5605c2..226e6f0d02 100644 --- a/crates/autopilot/src/solvable_orders.rs +++ b/crates/autopilot/src/solvable_orders.rs @@ -32,6 +32,7 @@ use { }, strum::VariantNames, tokio::sync::Mutex, + tracing::instrument, }; #[derive(prometheus_metric_storage::MetricStorage)] @@ -79,6 +80,7 @@ impl Metrics { Metrics::instance(observe::metrics::get_storage_registry()).unwrap() } + #[instrument(skip_all)] fn track_filtered_orders(reason: &'static str, invalid_orders: &[OrderUid]) { if invalid_orders.is_empty() { return; @@ -96,6 +98,7 @@ impl Metrics { ); } + #[instrument(skip_all)] fn track_orders_in_final_auction(orders: &[Arc]) { let metrics = Metrics::get(); metrics.auction_creations.inc(); @@ -191,6 +194,7 @@ impl SolvableOrdersCache { /// Usually this method is called from update_task. If it isn't, which is /// the case in unit tests, then concurrent calls might overwrite each /// other's results. + #[instrument(skip_all)] pub async fn update(&self, block: u64, store_events: bool) -> Result<()> { let start = Instant::now(); @@ -323,20 +327,22 @@ impl SolvableOrdersCache { .collect::>(); let auction = domain::RawAuctionData { block, - orders: orders - .into_iter() - .map(|order| { - let quote = db_solvable_orders - .quotes - .get(&order.metadata.uid.into()) - .map(|quote| quote.as_ref().clone()); - self.protocol_fees.apply( - order.as_ref(), - quote, - &surplus_capturing_jit_order_owners, - ) - }) - .collect(), + orders: tracing::info_span!("assemble_orders").in_scope(|| { + orders + .into_iter() + .map(|order| { + let quote = db_solvable_orders + .quotes + .get(&order.metadata.uid.into()) + .map(|quote| quote.as_ref().clone()); + self.protocol_fees.apply( + order.as_ref(), + quote, + &surplus_capturing_jit_order_owners, + ) + }) + .collect() + }), prices: prices .into_iter() .map(|(key, value)| { @@ -358,6 +364,7 @@ impl SolvableOrdersCache { Ok(()) } + #[instrument(skip_all)] async fn fetch_balances(&self, queries: Vec) -> HashMap { let fetched_balances = self .timed_future( @@ -425,6 +432,7 @@ impl SolvableOrdersCache { } /// Executed orders filtering in parallel. + #[instrument(skip_all)] async fn filter_invalid_orders( &self, mut orders: Vec>, @@ -526,6 +534,7 @@ fn find_presignature_pending_orders(orders: &[Arc]) -> Vec { /// Removes orders that can't possibly be settled because there isn't enough /// balance. +#[instrument(skip_all)] fn orders_with_balance( mut orders: Vec>, balances: &Balances, @@ -626,6 +635,7 @@ fn filter_dust_orders( (orders, removed) } +#[instrument(skip_all)] async fn get_orders_with_native_prices( orders: Vec>, native_price_estimator: &NativePriceUpdater, diff --git a/crates/cow-amm/src/registry.rs b/crates/cow-amm/src/registry.rs index 1a20c4b58f..1af451ac9e 100644 --- a/crates/cow-amm/src/registry.rs +++ b/crates/cow-amm/src/registry.rs @@ -68,6 +68,7 @@ impl Registry { } /// Returns all the deployed CoW AMMs + #[instrument(skip_all)] pub async fn amms(&self) -> Vec> { let mut result = vec![]; let lock = self.storage.read().await; diff --git a/crates/shared/src/event_handling.rs b/crates/shared/src/event_handling.rs index 3c785234ce..1c37aceca8 100644 --- a/crates/shared/src/event_handling.rs +++ b/crates/shared/src/event_handling.rs @@ -11,7 +11,7 @@ use { futures::{Stream, StreamExt, future}, std::{pin::Pin, sync::Arc}, tokio::sync::Mutex, - tracing::Instrument, + tracing::{Instrument, instrument}, }; // We expect that there is never a reorg that changes more than the last n @@ -261,6 +261,7 @@ where } /// Defines block range, for which events should be fetched + #[instrument(skip_all)] async fn event_block_range(&self) -> Result { let handled_blocks = if self.last_handled_blocks.is_empty() { let last_handled_block = self.store.last_event_block().await?; @@ -380,6 +381,7 @@ where } /// Get new events from the contract and insert them into the database. + #[instrument(skip_all)] pub async fn update_events(&mut self) -> Result<()> { let event_range = self.event_block_range().await?; @@ -396,6 +398,7 @@ where Ok(()) } + #[instrument(skip_all)] async fn update_events_from_old_blocks(&mut self, range: RangeInclusive) -> Result<()> { // first get the blocks needed to update `last_handled_blocks` because if it // fails, it's safer to fail at the beginning of the function before we @@ -466,6 +469,7 @@ where Ok(()) } + #[instrument(skip_all)] async fn update_events_from_latest_blocks( &mut self, latest_blocks: &[BlockNumberHash], diff --git a/crates/winner-selection/src/arbitrator.rs b/crates/winner-selection/src/arbitrator.rs index fa8740cd3a..b15c4cdc3a 100644 --- a/crates/winner-selection/src/arbitrator.rs +++ b/crates/winner-selection/src/arbitrator.rs @@ -19,6 +19,7 @@ use { cmp::Reverse, collections::{HashMap, HashSet}, }, + tracing::instrument, }; /// Auction arbitrator responsible for selecting winning solutions. @@ -33,6 +34,7 @@ impl Arbitrator { /// Runs the auction mechanism on solutions. /// /// Takes solutions and auction context, returns a ranking with winners. + #[instrument(skip_all)] pub fn arbitrate( &self, solutions: Vec>, @@ -55,6 +57,7 @@ impl Arbitrator { } /// Removes unfair solutions from the set of all solutions. + #[instrument(skip_all)] fn partition_unfair_solutions( &self, solutions: Vec>, @@ -611,6 +614,7 @@ impl Arbitrator { } /// Compute reference scores for winning solvers. + #[instrument(skip_all)] pub fn compute_reference_scores(&self, ranking: &Ranking) -> HashMap { let mut reference_scores = HashMap::default(); From 77bf1886844ad8ac6cf816ecc2c5a8740f2e2f19 Mon Sep 17 00:00:00 2001 From: Martin Magnus Date: Wed, 18 Feb 2026 10:48:18 +0100 Subject: [PATCH 071/219] [TRIVIAL] Fix granular autopilot sync (#4165) # Description https://github.com/cowprotocol/services/pull/4144 introduced more granular sync mechanisms for the autopilot run loop. In a commit addressing some comments an error must have sneaked which causes the autopilot run loop to wait for fully processed blocks and the `/settle` detection to wait for partially indexed blocks - the exact opposite. This causes us to wait a lot longer than necessary before building a new auction. Also our metrics still suggest that the run loop was 1 block longer than it actually was. --- crates/autopilot/src/maintenance.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/autopilot/src/maintenance.rs b/crates/autopilot/src/maintenance.rs index fc7f48e2c8..326f196495 100644 --- a/crates/autopilot/src/maintenance.rs +++ b/crates/autopilot/src/maintenance.rs @@ -68,8 +68,8 @@ impl MaintenanceSync { async fn wait_inner(&self, target: SyncTarget) { let (relevant_updates, target_block) = match target { - SyncTarget::FullyProcessed(block) => (&self.partially_processed_block, block), - SyncTarget::PartiallyProcessed(block) => (&self.fully_processed_block, block), + SyncTarget::FullyProcessed(block) => (&self.fully_processed_block, block), + SyncTarget::PartiallyProcessed(block) => (&self.partially_processed_block, block), }; if *relevant_updates.borrow() >= target_block { From 612246e08a74e6522d4e83878e6088aef460f2d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Duarte?= Date: Wed, 18 Feb 2026 09:49:43 +0000 Subject: [PATCH 072/219] =?UTF-8?q?Upgrade=20axum=200.6=20=E2=86=92=200.8?= =?UTF-8?q?=20and=20related=20HTTP=20dependencies=20(#4164)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description Upgrade the HTTP dependency stack: - axum: 0.6 → 0.8 - hyper: 0.14 → 1 - http-body: 0.4 → 1 - tower: 0.4 → 0.5 - tower-http: 0.4 → 0.6 - reqwest: 0.11 → 0.12 (version 0.13 changes the default TLS stack so it will be further upgraded separately) # Changes - [ ] Replace axum::Server with tokio::net::TcpListener + axum::serve - [ ] Update route parameter syntax from :param to {param} - [ ] Remove body generic from middleware Next type - [ ] Replace hyper::body::to_bytes with axum::body::to_bytes - [ ] Replace hyper::StatusCode with axum::http::StatusCode throughout - [ ] Update return types from hyper::Error to std::io::Error - [ ] Replace HttpBody trait usage with axum::body::to_bytes helper - [ ] Remove now-unused direct hyper/http-body deps from crates - [ ] Convert Mock solver Default impl to async new() since TcpListener::bind is async ## How to test Staging --- Cargo.lock | 302 ++++++++---------- Cargo.toml | 12 +- crates/autopilot/Cargo.toml | 1 - crates/autopilot/src/infra/api.rs | 8 +- .../autopilot/src/infra/solvers/dto/solve.rs | 4 +- crates/driver/Cargo.toml | 2 - crates/driver/src/domain/competition/mod.rs | 3 +- .../src/domain/competition/pre_processing.rs | 17 +- crates/driver/src/infra/api/error.rs | 14 +- crates/driver/src/infra/api/mod.rs | 15 +- .../driver/src/infra/api/routes/gasprice.rs | 2 +- .../driver/src/infra/api/routes/quote/mod.rs | 2 +- .../driver/src/infra/api/routes/reveal/mod.rs | 2 +- .../driver/src/infra/api/routes/settle/mod.rs | 2 +- .../driver/src/infra/api/routes/solve/mod.rs | 2 +- crates/driver/src/tests/setup/mod.rs | 14 +- crates/driver/src/tests/setup/orderbook.rs | 13 +- crates/driver/src/tests/setup/solver.rs | 7 +- crates/e2e/Cargo.toml | 1 - crates/e2e/src/api/liquorice/server.rs | 7 +- crates/e2e/src/api/zeroex.rs | 7 +- crates/e2e/src/setup/proxy.rs | 10 +- crates/e2e/src/setup/solver/mock.rs | 25 +- crates/e2e/tests/e2e/cow_amm.rs | 6 +- crates/e2e/tests/e2e/jit_orders.rs | 2 +- .../e2e/liquidity_source_notification.rs | 2 +- crates/e2e/tests/e2e/quoting.rs | 2 +- crates/e2e/tests/e2e/solver_competition.rs | 4 +- crates/observe/src/metrics.rs | 6 +- crates/orderbook/Cargo.toml | 1 - crates/orderbook/src/api.rs | 68 ++-- crates/orderbook/src/api/cancel_orders.rs | 2 +- .../src/api/get_solver_competition.rs | 3 +- crates/orderbook/src/run.rs | 16 +- crates/solvers/Cargo.toml | 1 - crates/solvers/src/api/mod.rs | 10 +- 36 files changed, 284 insertions(+), 311 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7e6e51088b..f4717183d6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -367,7 +367,7 @@ dependencies = [ "lru 0.16.3", "parking_lot", "pin-project", - "reqwest 0.12.28", + "reqwest", "serde", "serde_json", "thiserror 2.0.17", @@ -435,7 +435,7 @@ dependencies = [ "alloy-transport-ws", "futures", "pin-project", - "reqwest 0.12.28", + "reqwest", "serde", "serde_json", "tokio", @@ -714,7 +714,7 @@ checksum = "36dfa207caf6b528b9466c714626f5b2dfd5e8d4595a74631d5670672dac102b" dependencies = [ "alloy-json-rpc", "alloy-transport", - "reqwest 0.12.28", + "reqwest", "serde_json", "tower 0.5.3", "tracing", @@ -1162,7 +1162,7 @@ dependencies = [ "anyhow", "app-data", "async-trait", - "axum", + "axum 0.8.8", "bigdecimal", "bytes", "bytes-hex", @@ -1179,7 +1179,6 @@ dependencies = [ "futures", "hex-literal", "humantime", - "hyper 0.14.32", "indexmap 2.13.0", "itertools 0.14.0", "maplit", @@ -1193,7 +1192,7 @@ dependencies = [ "prometheus", "prometheus-metric-storage", "rand 0.8.5", - "reqwest 0.11.27", + "reqwest", "rust_decimal", "s3", "serde", @@ -1208,8 +1207,8 @@ dependencies = [ "tokio", "tokio-stream", "toml", - "tower 0.4.13", - "tower-http 0.4.4", + "tower 0.5.3", + "tower-http", "tracing", "url", "vergen", @@ -1670,7 +1669,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" dependencies = [ "async-trait", - "axum-core", + "axum-core 0.3.4", "bitflags 1.3.2", "bytes", "futures-util", @@ -1678,21 +1677,50 @@ dependencies = [ "http-body 0.4.6", "hyper 0.14.32", "itoa", - "matchit", + "matchit 0.7.3", "memchr", "mime", "percent-encoding", "pin-project-lite", "rustversion", "serde", + "sync_wrapper 0.1.2", + "tower 0.4.13", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b52af3cb4058c895d37317bb27508dccc8e5f2d39454016b297bf4a400597b8" +dependencies = [ + "axum-core 0.5.6", + "bytes", + "form_urlencoded", + "futures-util", + "http 1.4.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.8.1", + "hyper-util", + "itoa", + "matchit 0.8.4", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "serde_core", "serde_json", "serde_path_to_error", "serde_urlencoded", - "sync_wrapper 0.1.2", + "sync_wrapper 1.0.2", "tokio", - "tower 0.4.13", + "tower 0.5.3", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -1712,6 +1740,25 @@ dependencies = [ "tower-service", ] +[[package]] +name = "axum-core" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1" +dependencies = [ + "bytes", + "futures-core", + "http 1.4.0", + "http-body 1.0.1", + "http-body-util", + "mime", + "pin-project-lite", + "sync_wrapper 1.0.2", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "backtrace" version = "0.3.76" @@ -2259,7 +2306,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "reqwest 0.11.27", + "reqwest", "serde", "serde_json", "syn 2.0.114", @@ -2287,9 +2334,9 @@ dependencies = [ [[package]] name = "cookie" -version = "0.17.0" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7efb37c3e1ccb1ff97164ad95ac1606e8ccd35b3fa0a7d99a304c7f4a428cc24" +checksum = "4ddef33a339a91ea89fb53151bd0a4689cfce27055c291dfa69945475d22c747" dependencies = [ "percent-encoding", "time", @@ -2298,12 +2345,13 @@ dependencies = [ [[package]] name = "cookie_store" -version = "0.20.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "387461abbc748185c3a6e1673d826918b450b87ff22639429c694619a83b6cf6" +checksum = "15b2c103cf610ec6cae3da84a766285b42fd16aad564758459e6ecf128c75206" dependencies = [ "cookie", - "idna 0.3.0", + "document-features", + "idna", "log", "publicsuffix", "serde", @@ -2671,6 +2719,15 @@ dependencies = [ "syn 2.0.114", ] +[[package]] +name = "document-features" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4b8a88685455ed29a21542a33abd9cb6510b6b129abadabdcef0f4c55bc8f61" +dependencies = [ + "litrs", +] + [[package]] name = "dotenvy" version = "0.15.7" @@ -2691,7 +2748,7 @@ dependencies = [ "anyhow", "app-data", "async-trait", - "axum", + "axum 0.8.8", "bigdecimal", "bytes-hex", "chain", @@ -2705,10 +2762,8 @@ dependencies = [ "ethrpc", "futures", "hex-literal", - "http-body 0.4.6", "humantime", "humantime-serde", - "hyper 0.14.32", "itertools 0.14.0", "maplit", "mimalloc", @@ -2720,7 +2775,7 @@ dependencies = [ "prometheus", "prometheus-metric-storage", "rand 0.8.5", - "reqwest 0.11.27", + "reqwest", "s3", "serde", "serde-ext", @@ -2734,8 +2789,8 @@ dependencies = [ "tikv-jemallocator", "tokio", "toml", - "tower 0.4.13", - "tower-http 0.4.4", + "tower 0.5.3", + "tower-http", "tracing", "url", "vergen", @@ -2762,7 +2817,7 @@ dependencies = [ "anyhow", "app-data", "autopilot", - "axum", + "axum 0.8.8", "bigdecimal", "chrono", "clap", @@ -2774,14 +2829,13 @@ dependencies = [ "ethrpc", "futures", "hex-literal", - "hyper 0.14.32", "itertools 0.14.0", "model", "number", "observe", "orderbook", "refunder", - "reqwest 0.11.27", + "reqwest", "rstest", "serde_json", "shared", @@ -2924,13 +2978,13 @@ dependencies = [ "prometheus", "prometheus-metric-storage", "rand 0.8.5", - "reqwest 0.11.27", + "reqwest", "scopeguard", "serde", "serde_json", "tokio", "tokio-stream", - "tower 0.4.13", + "tower 0.5.3", "tracing", "url", ] @@ -3492,12 +3546,6 @@ dependencies = [ "pin-project-lite", ] -[[package]] -name = "http-range-header" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" - [[package]] name = "httparse" version = "1.10.1" @@ -3564,6 +3612,7 @@ dependencies = [ "http 1.4.0", "http-body 1.0.1", "httparse", + "httpdate", "itoa", "pin-project-lite", "pin-utils", @@ -3629,19 +3678,6 @@ dependencies = [ "tower-service", ] -[[package]] -name = "hyper-tls" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" -dependencies = [ - "bytes", - "hyper 0.14.32", - "native-tls", - "tokio", - "tokio-native-tls", -] - [[package]] name = "hyper-tls" version = "0.6.0" @@ -3677,9 +3713,11 @@ dependencies = [ "percent-encoding", "pin-project-lite", "socket2 0.6.1", + "system-configuration", "tokio", "tower-service", "tracing", + "windows-registry", ] [[package]] @@ -3793,16 +3831,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" -[[package]] -name = "idna" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" -dependencies = [ - "unicode-bidi", - "unicode-normalization", -] - [[package]] name = "idna" version = "1.1.0" @@ -4091,6 +4119,12 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" +[[package]] +name = "litrs" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11d3d7f243d5c5a8b9bb5d6dd2b1602c0cb0b9db1621bafc7ed66e35ff9fe092" + [[package]] name = "lock_api" version = "0.4.14" @@ -4180,6 +4214,12 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" +[[package]] +name = "matchit" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" + [[package]] name = "md-5" version = "0.10.6" @@ -4533,7 +4573,7 @@ name = "observe" version = "0.1.0" dependencies = [ "async-trait", - "axum", + "axum 0.8.8", "chrono", "console-subscriber", "futures", @@ -4641,7 +4681,7 @@ dependencies = [ "bytes", "http 1.4.0", "opentelemetry", - "reqwest 0.12.28", + "reqwest", ] [[package]] @@ -4656,7 +4696,7 @@ dependencies = [ "opentelemetry-proto", "opentelemetry_sdk", "prost 0.13.5", - "reqwest 0.12.28", + "reqwest", "thiserror 2.0.17", "tokio", "tonic 0.13.1", @@ -4712,7 +4752,7 @@ dependencies = [ "anyhow", "app-data", "async-trait", - "axum", + "axum 0.8.8", "bigdecimal", "cached", "chain", @@ -4725,7 +4765,6 @@ dependencies = [ "futures", "hex-literal", "humantime", - "hyper 0.14.32", "mimalloc", "mockall", "model", @@ -4736,7 +4775,7 @@ dependencies = [ "order-validation", "prometheus", "prometheus-metric-storage", - "reqwest 0.11.27", + "reqwest", "serde", "serde_json", "serde_with", @@ -4746,8 +4785,8 @@ dependencies = [ "thiserror 1.0.69", "tikv-jemallocator", "tokio", - "tower 0.4.13", - "tower-http 0.4.4", + "tower 0.5.3", + "tower-http", "tracing", "url", "vergen", @@ -5186,7 +5225,7 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f42ea446cab60335f76979ec15e12619a2165b5ae2c12166bef27d283a9fadf" dependencies = [ - "idna 1.1.0", + "idna", "psl-types", ] @@ -5308,7 +5347,7 @@ dependencies = [ "observe", "prometheus", "prometheus-metric-storage", - "reqwest 0.11.27", + "reqwest", "thiserror 1.0.69", "tokio", "tracing", @@ -5423,51 +5462,6 @@ version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" -[[package]] -name = "reqwest" -version = "0.11.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" -dependencies = [ - "async-compression", - "base64 0.21.7", - "bytes", - "cookie", - "cookie_store", - "encoding_rs", - "futures-core", - "futures-util", - "h2 0.3.27", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.32", - "hyper-tls 0.5.0", - "ipnet", - "js-sys", - "log", - "mime", - "native-tls", - "once_cell", - "percent-encoding", - "pin-project-lite", - "rustls-pemfile", - "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper 0.1.2", - "system-configuration", - "tokio", - "tokio-native-tls", - "tokio-util", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "wasm-streams", - "web-sys", - "winreg", -] - [[package]] name = "reqwest" version = "0.12.28" @@ -5476,17 +5470,23 @@ checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" dependencies = [ "base64 0.22.1", "bytes", + "cookie", + "cookie_store", + "encoding_rs", "futures-channel", "futures-core", "futures-util", + "h2 0.4.13", "http 1.4.0", "http-body 1.0.1", "http-body-util", "hyper 1.8.1", - "hyper-tls 0.6.0", + "hyper-rustls 0.27.7", + "hyper-tls", "hyper-util", "js-sys", "log", + "mime", "native-tls", "percent-encoding", "pin-project-lite", @@ -5497,12 +5497,14 @@ dependencies = [ "sync_wrapper 1.0.2", "tokio", "tokio-native-tls", + "tokio-util", "tower 0.5.3", - "tower-http 0.6.8", + "tower-http", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", + "wasm-streams", "web-sys", ] @@ -5732,15 +5734,6 @@ dependencies = [ "security-framework 3.5.1", ] -[[package]] -name = "rustls-pemfile" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" -dependencies = [ - "base64 0.21.7", -] - [[package]] name = "rustls-pki-types" version = "1.13.2" @@ -6179,7 +6172,7 @@ dependencies = [ "rand 0.8.5", "rate-limit", "regex", - "reqwest 0.11.27", + "reqwest", "rust_decimal", "serde", "serde_json", @@ -6297,7 +6290,7 @@ version = "0.1.0" dependencies = [ "alloy", "anyhow", - "axum", + "axum 0.8.8", "bigdecimal", "chain", "chrono", @@ -6307,7 +6300,6 @@ dependencies = [ "derive_more 1.0.0", "ethrpc", "futures", - "hyper 0.14.32", "itertools 0.14.0", "mimalloc", "model", @@ -6316,7 +6308,7 @@ dependencies = [ "observe", "prometheus", "prometheus-metric-storage", - "reqwest 0.11.27", + "reqwest", "serde", "serde_json", "serde_with", @@ -6327,8 +6319,8 @@ dependencies = [ "tikv-jemallocator", "tokio", "toml", - "tower 0.4.13", - "tower-http 0.4.4", + "tower 0.5.3", + "tower-http", "tracing", "url", "vergen", @@ -7104,7 +7096,7 @@ checksum = "76c4eb7a4e9ef9d4763600161f12f5070b92a578e1b634db88a6887844c91a13" dependencies = [ "async-stream", "async-trait", - "axum", + "axum 0.6.20", "base64 0.21.7", "bytes", "h2 0.3.27", @@ -7188,41 +7180,28 @@ dependencies = [ "tracing", ] -[[package]] -name = "tower-http" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" -dependencies = [ - "bitflags 2.10.0", - "bytes", - "futures-core", - "futures-util", - "http 0.2.12", - "http-body 0.4.6", - "http-range-header", - "pin-project-lite", - "tower-layer", - "tower-service", - "tracing", -] - [[package]] name = "tower-http" version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" dependencies = [ + "async-compression", "bitflags 2.10.0", "bytes", + "futures-core", "futures-util", "http 1.4.0", "http-body 1.0.1", + "http-body-util", "iri-string", "pin-project-lite", + "tokio", + "tokio-util", "tower 0.5.3", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -7444,7 +7423,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" dependencies = [ "form_urlencoded", - "idna 1.1.0", + "idna", "percent-encoding", "serde", "serde_derive", @@ -7734,6 +7713,17 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" +[[package]] +name = "windows-registry" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02752bf7fbdcce7f2a27a742f798510f3e5ad88dbe84871e5168e2120c3d5720" +dependencies = [ + "windows-link", + "windows-result", + "windows-strings", +] + [[package]] name = "windows-result" version = "0.4.1" @@ -7995,16 +7985,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "winreg" -version = "0.50.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" -dependencies = [ - "cfg-if", - "windows-sys 0.48.0", -] - [[package]] name = "wit-bindgen" version = "0.46.0" diff --git a/Cargo.toml b/Cargo.toml index 84bcbc9b5a..3099d89977 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,7 +9,7 @@ alloy = { version = "1.1.0", default-features = false } alloy-signer = { version = "1.1.0", default-features = false} anyhow = "1.0.100" async-trait = "0.1.80" -axum = "0.6" +axum = "0.8" bigdecimal = "0.3" cached = { version = "0.49.3", default-features = false } chrono = { version = "0.4.38", default-features = false } @@ -24,10 +24,10 @@ flate2 = "1.0.30" futures = "0.3.30" const-hex = "1.17.0" hex-literal = "0.4.1" -http-body = "0.4.6" +http-body = "1" +http-body-util = "0.1" humantime = "2.1.0" humantime-serde = "1.1.1" -hyper = "0.14.29" indexmap = "2.2.6" itertools = "0.14" maplit = "1.0.2" @@ -37,7 +37,7 @@ prometheus = "0.13.4" prometheus-metric-storage = "0.5.0" rand = "0.8.5" regex = "1.10.4" -reqwest = "0.11.27" +reqwest = "0.12" rstest = "0.26" ruint = { version = "1.17.2", default-features = false } serde = { version = "1.0.203", features = ["derive"] } @@ -94,8 +94,8 @@ testlib = { path = "crates/testlib" } winner-selection = { path = "crates/winner-selection" } time = "0.3.47" tiny-keccak = "2.0.2" -tower = "0.4" -tower-http = "0.4" +tower = "0.5" +tower-http = "0.6" tracing-opentelemetry = "0.31" tracing-serde = "0.2" vergen = "8" diff --git a/crates/autopilot/Cargo.toml b/crates/autopilot/Cargo.toml index 97750f6d16..ea51d503a6 100644 --- a/crates/autopilot/Cargo.toml +++ b/crates/autopilot/Cargo.toml @@ -33,7 +33,6 @@ database = { workspace = true } derive_more = { workspace = true } ethrpc = { workspace = true } futures = { workspace = true } -hyper = { workspace = true } observe = { workspace = true } const-hex = { workspace = true } hex-literal = { workspace = true } diff --git a/crates/autopilot/src/infra/api.rs b/crates/autopilot/src/infra/api.rs index d7c7477af5..4d98afe86b 100644 --- a/crates/autopilot/src/infra/api.rs +++ b/crates/autopilot/src/infra/api.rs @@ -45,14 +45,14 @@ pub async fn serve( estimator: Arc, max_timeout: Duration, shutdown: oneshot::Receiver<()>, -) -> Result<(), hyper::Error> { +) -> Result<(), std::io::Error> { let state = State { estimator, allowed_timeout: MIN_TIMEOUT..=max_timeout, }; let app = Router::new() - .route("/native_price/:token", get(get_native_price)) + .route("/native_price/{token}", get(get_native_price)) .with_state(state) .layer( tower::ServiceBuilder::new() @@ -60,10 +60,10 @@ pub async fn serve( .map_request(record_trace_id), ); - let server = axum::Server::bind(&addr).serve(app.into_make_service()); + let listener = tokio::net::TcpListener::bind(addr).await?; tracing::info!(?addr, "serving HTTP API"); - server + axum::serve(listener, app) .with_graceful_shutdown(async { shutdown.await.ok(); }) diff --git a/crates/autopilot/src/infra/solvers/dto/solve.rs b/crates/autopilot/src/infra/solvers/dto/solve.rs index ed6aeb9d9f..da80a42fd4 100644 --- a/crates/autopilot/src/infra/solvers/dto/solve.rs +++ b/crates/autopilot/src/infra/solvers/dto/solve.rs @@ -90,8 +90,8 @@ impl InjectIntoHttpRequest for Request { // manually set the content type header for JSON since // we can't use `request.json(self)` .header( - hyper::header::CONTENT_TYPE, - hyper::header::HeaderValue::from_static("application/json") + reqwest::header::CONTENT_TYPE, + reqwest::header::HeaderValue::from_static("application/json") ) } diff --git a/crates/driver/Cargo.toml b/crates/driver/Cargo.toml index f5a725802d..e174bb026f 100644 --- a/crates/driver/Cargo.toml +++ b/crates/driver/Cargo.toml @@ -30,10 +30,8 @@ ethrpc = { workspace = true } futures = { workspace = true } const-hex = { workspace = true } hex-literal = { workspace = true } -http-body = { workspace = true } humantime = { workspace = true } humantime-serde = { workspace = true } -hyper = { workspace = true } itertools = { workspace = true } mimalloc = { workspace = true, optional = true } tikv-jemallocator = { workspace = true } diff --git a/crates/driver/src/domain/competition/mod.rs b/crates/driver/src/domain/competition/mod.rs index f2761bbf32..392ea46051 100644 --- a/crates/driver/src/domain/competition/mod.rs +++ b/crates/driver/src/domain/competition/mod.rs @@ -22,9 +22,8 @@ use { util::math, }, alloy::primitives::Bytes, - axum::body::Body, + axum::{body::Body, http::Request}, futures::{StreamExt, future::Either, stream::FuturesUnordered}, - hyper::Request, itertools::Itertools, std::{ cmp::Reverse, diff --git a/crates/driver/src/domain/competition/pre_processing.rs b/crates/driver/src/domain/competition/pre_processing.rs index ccd17ad8cc..a4d5105705 100644 --- a/crates/driver/src/domain/competition/pre_processing.rs +++ b/crates/driver/src/domain/competition/pre_processing.rs @@ -7,14 +7,21 @@ use { eth, liquidity, }, - infra::{self, api::routes::solve::dto::SolveRequest, observe::metrics, tokens}, + infra::{ + self, + api::{REQUEST_BODY_LIMIT, routes::solve::dto::SolveRequest}, + observe::metrics, + tokens, + }, }, alloy::primitives::{Bytes, FixedBytes}, anyhow::{Context, Result}, - axum::body::Body, + axum::{ + body::{self, Body}, + http::Request, + }, chrono::Utc, futures::{FutureExt, StreamExt, future::BoxFuture, stream::FuturesUnordered}, - hyper::{Request, body::Bytes as RequestBytes}, itertools::Itertools, model::{ interaction::InteractionData, @@ -557,13 +564,13 @@ fn init_auction_id_in_span(id: Option) { } #[instrument(skip_all)] -async fn collect_request_body(request: Request) -> Result { +async fn collect_request_body(request: Request) -> Result { tracing::trace!("start streaming request body"); let _timer = observe::metrics::metrics().on_auction_overhead_start("driver", "stream_http_body"); let start = Instant::now(); - let body_bytes = hyper::body::to_bytes(request.into_body()) + let body_bytes = axum::body::to_bytes(request.into_body(), REQUEST_BODY_LIMIT) .await .context("failed to stream request body")?; diff --git a/crates/driver/src/infra/api/error.rs b/crates/driver/src/infra/api/error.rs index 52629bdf3f..b7720c480c 100644 --- a/crates/driver/src/infra/api/error.rs +++ b/crates/driver/src/infra/api/error.rs @@ -32,7 +32,7 @@ pub struct Error { description: &'static str, } -impl From for (hyper::StatusCode, axum::Json) { +impl From for (axum::http::StatusCode, axum::Json) { fn from(value: Kind) -> Self { let description = match value { Kind::QuotingFailed => "No valid quote found", @@ -59,7 +59,7 @@ impl From for (hyper::StatusCode, axum::Json) { Kind::MalformedRequest => "Could not parse the request", }; ( - hyper::StatusCode::BAD_REQUEST, + axum::http::StatusCode::BAD_REQUEST, axum::Json(Error { kind: value, description, @@ -68,7 +68,7 @@ impl From for (hyper::StatusCode, axum::Json) { } } -impl From for (hyper::StatusCode, axum::Json) { +impl From for (axum::http::StatusCode, axum::Json) { fn from(value: quote::Error) -> Self { let error = match value { quote::Error::QuotingFailed(_) => Kind::QuotingFailed, @@ -82,7 +82,7 @@ impl From for (hyper::StatusCode, axum::Json) { } } -impl From for (hyper::StatusCode, axum::Json) { +impl From for (axum::http::StatusCode, axum::Json) { fn from(value: competition::Error) -> Self { let error = match value { competition::Error::SolutionNotAvailable => Kind::SolutionNotAvailable, @@ -97,13 +97,13 @@ impl From for (hyper::StatusCode, axum::Json) { } } -impl From for (hyper::StatusCode, axum::Json) { +impl From for (axum::http::StatusCode, axum::Json) { fn from(_: blockchain::Error) -> Self { Kind::Unknown.into() } } -impl From for (hyper::StatusCode, axum::Json) { +impl From for (axum::http::StatusCode, axum::Json) { fn from(value: api::routes::AuctionError) -> Self { let error = match value { api::routes::AuctionError::InvalidAuctionId => Kind::InvalidAuctionId, @@ -116,7 +116,7 @@ impl From for (hyper::StatusCode, axum::Json) } } -impl From for (hyper::StatusCode, axum::Json) { +impl From for (axum::http::StatusCode, axum::Json) { fn from(value: api::routes::OrderError) -> Self { let error = match value { api::routes::OrderError::SameTokens => Kind::QuoteSameTokens, diff --git a/crates/driver/src/infra/api/mod.rs b/crates/driver/src/infra/api/mod.rs index 38d106a258..a6ac2cde60 100644 --- a/crates/driver/src/infra/api/mod.rs +++ b/crates/driver/src/infra/api/mod.rs @@ -31,7 +31,7 @@ use { mod error; pub mod routes; -const REQUEST_BODY_LIMIT: usize = 10 * 1024 * 1024; +pub const REQUEST_BODY_LIMIT: usize = 10 * 1024 * 1024; pub struct Api { pub solvers: Vec, @@ -53,7 +53,7 @@ impl Api { shutdown: impl Future + Send + 'static, order_priority_strategies: Vec, app_data_retriever: Option, - ) -> Result<(), hyper::Error> { + ) -> Result<(), std::io::Error> { // Add middleware. let mut app = axum::Router::new().layer(tower::ServiceBuilder::new().layer( tower_http::limit::RequestBodyLimitLayer::new(REQUEST_BODY_LIMIT), @@ -148,12 +148,15 @@ impl Api { ); // Start the server. - let server = axum::Server::bind(&self.addr).serve(app.into_make_service()); - tracing::info!(port = server.local_addr().port(), "serving driver"); + let listener = tokio::net::TcpListener::bind(self.addr).await?; + let local_addr = listener.local_addr()?; + tracing::info!(port = local_addr.port(), "serving driver"); if let Some(addr_sender) = self.addr_sender { - addr_sender.send(server.local_addr()).unwrap(); + addr_sender.send(local_addr).unwrap(); } - server.with_graceful_shutdown(shutdown).await + axum::serve(listener, app) + .with_graceful_shutdown(shutdown) + .await } fn build_order_sorting_strategies( diff --git a/crates/driver/src/infra/api/routes/gasprice.rs b/crates/driver/src/infra/api/routes/gasprice.rs index 175d13dec6..f495e3ec66 100644 --- a/crates/driver/src/infra/api/routes/gasprice.rs +++ b/crates/driver/src/infra/api/routes/gasprice.rs @@ -12,7 +12,7 @@ pub(in crate::infra::api) fn gasprice(app: axum::Router) -> axum::Rout #[instrument(skip(eth))] async fn route( eth: axum::extract::State, -) -> Result, (hyper::StatusCode, axum::Json)> { +) -> Result, (axum::http::StatusCode, axum::Json)> { // For simplicity we use the default time limit (None) let gas_price = eth.gas_price().await?; diff --git a/crates/driver/src/infra/api/routes/quote/mod.rs b/crates/driver/src/infra/api/routes/quote/mod.rs index a55e7490cb..d3310c5292 100644 --- a/crates/driver/src/infra/api/routes/quote/mod.rs +++ b/crates/driver/src/infra/api/routes/quote/mod.rs @@ -17,7 +17,7 @@ pub(in crate::infra::api) fn quote(router: axum::Router) -> axum::Router< async fn route( state: axum::extract::State, order: axum::extract::Query, -) -> Result, (hyper::StatusCode, axum::Json)> { +) -> Result, (axum::http::StatusCode, axum::Json)> { let handle_request = async { let order = order.0.into_domain(); observe::quoting(&order); diff --git a/crates/driver/src/infra/api/routes/reveal/mod.rs b/crates/driver/src/infra/api/routes/reveal/mod.rs index 7a2d6081e5..08e388d761 100644 --- a/crates/driver/src/infra/api/routes/reveal/mod.rs +++ b/crates/driver/src/infra/api/routes/reveal/mod.rs @@ -18,7 +18,7 @@ pub(in crate::infra::api) fn reveal(router: axum::Router) -> axum::Router async fn route( state: axum::extract::State, req: axum::Json, -) -> Result, (hyper::StatusCode, axum::Json)> { +) -> Result, (axum::http::StatusCode, axum::Json)> { let auction_id = auction::Id::try_from(req.auction_id).map_err(api::routes::AuctionError::from)?; let handle_request = async { diff --git a/crates/driver/src/infra/api/routes/settle/mod.rs b/crates/driver/src/infra/api/routes/settle/mod.rs index 2e53994c8b..fe3634da69 100644 --- a/crates/driver/src/infra/api/routes/settle/mod.rs +++ b/crates/driver/src/infra/api/routes/settle/mod.rs @@ -18,7 +18,7 @@ pub(in crate::infra::api) fn settle(router: axum::Router) -> axum::Router async fn route( state: axum::extract::State, req: axum::Json, -) -> Result<(), (hyper::StatusCode, axum::Json)> { +) -> Result<(), (axum::http::StatusCode, axum::Json)> { let auction_id = auction::Id::try_from(req.auction_id).map_err(api::routes::AuctionError::from)?; let solver = state.solver().name().to_string(); diff --git a/crates/driver/src/infra/api/routes/solve/mod.rs b/crates/driver/src/infra/api/routes/solve/mod.rs index 417ddd8b5e..baa78c6b4d 100644 --- a/crates/driver/src/infra/api/routes/solve/mod.rs +++ b/crates/driver/src/infra/api/routes/solve/mod.rs @@ -20,7 +20,7 @@ async fn route( // This delays interpreting the data as much as possible and allows // logging how long the raw data transfer takes. request: Request, -) -> Result, (hyper::StatusCode, axum::Json)> { +) -> Result, (axum::http::StatusCode, axum::Json)> { let solver = state.solver().name().as_str(); let handle_request = async { diff --git a/crates/driver/src/tests/setup/mod.rs b/crates/driver/src/tests/setup/mod.rs index 65b5424475..9b0416a128 100644 --- a/crates/driver/src/tests/setup/mod.rs +++ b/crates/driver/src/tests/setup/mod.rs @@ -41,10 +41,10 @@ use { providers::Provider, signers::local::PrivateKeySigner, }, + axum::http::StatusCode, bigdecimal::{BigDecimal, FromPrimitive}, ethrpc::Web3, futures::future::join_all, - hyper::StatusCode, model::order::{BuyTokenDestination, SellTokenSource}, number::{serialization::HexOrDecimalU256, testing::ApproxEq}, serde::{Deserialize, de::IntoDeserializer}, @@ -954,7 +954,7 @@ impl Setup { flashloans: solution.flashloans.clone(), }); } - let orderbook = Orderbook::start(&orders); + let orderbook = Orderbook::start(&orders).await; let quotes = orders .into_iter() .map(|order| blockchain.quote(&order)) @@ -1229,7 +1229,7 @@ pub struct SolveOk<'a> { impl<'a> Solve<'a> { /// Expect the /solve endpoint to have returned a 200 OK response. pub fn ok(self) -> SolveOk<'a> { - assert_eq!(self.status, hyper::StatusCode::OK); + assert_eq!(self.status, axum::http::StatusCode::OK); SolveOk { body: self.body, trades: self.trades, @@ -1238,7 +1238,7 @@ impl<'a> Solve<'a> { } pub fn err(self) -> SolveErr { - assert_ne!(self.status, hyper::StatusCode::OK); + assert_ne!(self.status, axum::http::StatusCode::OK); SolveErr { body: self.body } } } @@ -1401,7 +1401,7 @@ pub struct Reveal { impl Reveal { /// Expect the /reveal endpoint to have returned a 200 OK response. pub fn ok(self) -> RevealOk { - assert_eq!(self.status, hyper::StatusCode::OK); + assert_eq!(self.status, axum::http::StatusCode::OK); RevealOk { body: self.body } } @@ -1477,7 +1477,7 @@ pub struct Quote<'a> { impl<'a> Quote<'a> { /// Expect the /quote endpoint to have returned a 200 OK response. pub fn ok(self) -> QuoteOk<'a> { - assert_eq!(self.status, hyper::StatusCode::OK); + assert_eq!(self.status, axum::http::StatusCode::OK); QuoteOk { trades: self.trades, body: self.body, @@ -1687,7 +1687,7 @@ impl Settle { pub fn err(self) -> SettleErr { match self.status { SettleStatus::Err { status_code, body } => { - assert_eq!(status_code, hyper::StatusCode::BAD_REQUEST); + assert_eq!(status_code, axum::http::StatusCode::BAD_REQUEST); SettleErr { body } } _ => panic!("expected a 400 BAD REQUEST response"), diff --git a/crates/driver/src/tests/setup/orderbook.rs b/crates/driver/src/tests/setup/orderbook.rs index 8b3da45a3a..3d48174ada 100644 --- a/crates/driver/src/tests/setup/orderbook.rs +++ b/crates/driver/src/tests/setup/orderbook.rs @@ -27,7 +27,7 @@ impl Orderbook { /// /// # Returns /// The `Orderbook` instance with the server listening address. - pub fn start(orders: &[Order]) -> Self { + pub async fn start(orders: &[Order]) -> Self { let app_data_storage = orders .iter() .filter_map(|order| { @@ -43,15 +43,16 @@ impl Orderbook { .collect::>(); let app = Router::new() - .route("/api/v1/app_data/:app_data", get(Self::app_data_handler)) + .route("/api/v1/app_data/{app_data}", get(Self::app_data_handler)) .layer(Extension(app_data_storage)); - let server = - axum::Server::bind(&"0.0.0.0:0".parse().unwrap()).serve(app.into_make_service()); - let addr = server.local_addr(); + let listener = tokio::net::TcpListener::bind("0.0.0.0:0").await.unwrap(); + let addr = listener.local_addr().unwrap(); tracing::info!("Orderbook mock server listening on {}", addr); - tokio::spawn(server); + tokio::spawn(async move { + axum::serve(listener, app).await.unwrap(); + }); Orderbook { addr } } diff --git a/crates/driver/src/tests/setup/solver.rs b/crates/driver/src/tests/setup/solver.rs index ca01351e46..99537e8f9d 100644 --- a/crates/driver/src/tests/setup/solver.rs +++ b/crates/driver/src/tests/setup/solver.rs @@ -514,10 +514,9 @@ impl Solver { ), ) .with_state(State(state)); - let server = - axum::Server::bind(&"0.0.0.0:0".parse().unwrap()).serve(app.into_make_service()); - let addr = server.local_addr(); - tokio::spawn(async move { server.await.unwrap() }); + let listener = tokio::net::TcpListener::bind("0.0.0.0:0").await.unwrap(); + let addr = listener.local_addr().unwrap(); + tokio::spawn(async move { axum::serve(listener, app).await.unwrap() }); Self { addr } } } diff --git a/crates/e2e/Cargo.toml b/crates/e2e/Cargo.toml index df12e6e6a7..eb024a69ea 100644 --- a/crates/e2e/Cargo.toml +++ b/crates/e2e/Cargo.toml @@ -26,7 +26,6 @@ driver = { workspace = true } ethrpc = { workspace = true, features = ["test-util"] } futures = { workspace = true } hex-literal = { workspace = true } -hyper = { workspace = true } itertools = { workspace = true } model = { workspace = true, features = ["e2e"] } number = { workspace = true } diff --git a/crates/e2e/src/api/liquorice/server.rs b/crates/e2e/src/api/liquorice/server.rs index 8e6096b1f9..0f7bc8892e 100644 --- a/crates/e2e/src/api/liquorice/server.rs +++ b/crates/e2e/src/api/liquorice/server.rs @@ -33,14 +33,13 @@ impl LiquoriceApi { .with_state(state.clone()); let addr = SocketAddr::from((Ipv4Addr::UNSPECIFIED, 0)); - let server = axum::Server::bind(&addr).serve(app.into_make_service()); - - let addr = server.local_addr(); + let listener = tokio::net::TcpListener::bind(addr).await.unwrap(); + let addr = listener.local_addr().unwrap(); let port = addr.port(); assert!(port > 0, "assigned port must be greater than 0"); tokio::spawn(async move { - if let Err(err) = server.await { + if let Err(err) = axum::serve(listener, app).await { tracing::error!(?err, "Liquorice API server failed"); panic!("Liquorice test server crashed: {}", err); } diff --git a/crates/e2e/src/api/zeroex.rs b/crates/e2e/src/api/zeroex.rs index d78cab693b..9ee37560de 100644 --- a/crates/e2e/src/api/zeroex.rs +++ b/crates/e2e/src/api/zeroex.rs @@ -43,14 +43,13 @@ impl ZeroExApi { .with_state(state); let addr = SocketAddr::from((Ipv4Addr::UNSPECIFIED, 0)); - let server = axum::Server::bind(&addr).serve(app.into_make_service()); - - let addr = server.local_addr(); + let listener = tokio::net::TcpListener::bind(addr).await.unwrap(); + let addr = listener.local_addr().unwrap(); let port = addr.port(); assert!(port > 0, "assigned port must be greater than 0"); tokio::spawn(async move { - if let Err(err) = server.await { + if let Err(err) = axum::serve(listener, app).await { tracing::error!(?err, "ZeroEx API server failed"); panic!("ZeroEx test server crashed: {}", err); } diff --git a/crates/e2e/src/setup/proxy.rs b/crates/e2e/src/setup/proxy.rs index 85474f8ae3..6c8ca9962f 100644 --- a/crates/e2e/src/setup/proxy.rs +++ b/crates/e2e/src/setup/proxy.rs @@ -18,7 +18,6 @@ use { http::Request, response::{IntoResponse, Response}, }, - hyper::body::to_bytes, std::{collections::VecDeque, net::SocketAddr, sync::Arc}, tokio::{sync::RwLock, task::JoinHandle}, url::Url, @@ -104,10 +103,8 @@ async fn serve(listen_addr: SocketAddr, backends: Vec, state: ProxyState) { let app = Router::new().fallback(proxy_handler); tracing::info!(?listen_addr, ?backends, "starting reverse proxy"); - axum::Server::bind(&listen_addr) - .serve(app.into_make_service()) - .await - .unwrap(); + let listener = tokio::net::TcpListener::bind(listen_addr).await.unwrap(); + axum::serve(listener, app).await.unwrap(); } async fn handle_request( @@ -118,7 +115,8 @@ async fn handle_request( let (parts, body) = req.into_parts(); // Convert body to bytes once for reuse across retries - let body_bytes = match to_bytes(body).await { + // SAFETY: usize::MAX is ok here because it's a test + let body_bytes = match axum::body::to_bytes(body, usize::MAX).await { Ok(bytes) => bytes, Err(err) => { return ( diff --git a/crates/e2e/src/setup/solver/mock.rs b/crates/e2e/src/setup/solver/mock.rs index dc29236ff4..e98bf214f9 100644 --- a/crates/e2e/src/setup/solver/mock.rs +++ b/crates/e2e/src/setup/solver/mock.rs @@ -51,8 +51,8 @@ impl Mock { } } -impl Default for Mock { - fn default() -> Self { +impl Mock { + pub async fn new() -> Self { let state = State { solution: Arc::new(Mutex::new(Arc::new(|| async { None }.boxed()))), auctions: Arc::new(Mutex::new(vec![])), @@ -62,17 +62,20 @@ impl Default for Mock { .route("/solve", axum::routing::post(solve)) .with_state(state.clone()); - let server = - axum::Server::bind(&"0.0.0.0:0".parse().unwrap()).serve(app.into_make_service()); - - let mock = Mock { - state, - url: format!("http://{}", server.local_addr()).parse().unwrap(), - }; + let listener = tokio::net::TcpListener::bind("0.0.0.0:0").await.unwrap(); + let local_addr = listener.local_addr().unwrap(); - tokio::task::spawn(server.with_graceful_shutdown(shutdown_signal())); + tokio::task::spawn(async move { + axum::serve(listener, app) + .with_graceful_shutdown(shutdown_signal()) + .await + .unwrap(); + }); - mock + Mock { + state, + url: format!("http://{}", local_addr).parse().unwrap(), + } } } diff --git a/crates/e2e/tests/e2e/cow_amm.rs b/crates/e2e/tests/e2e/cow_amm.rs index 77987a81bf..237aad19e6 100644 --- a/crates/e2e/tests/e2e/cow_amm.rs +++ b/crates/e2e/tests/e2e/cow_amm.rs @@ -161,7 +161,7 @@ async fn cow_amm_jit(web3: Web3) { // for the actual solver competition. That way we can handcraft a solution // for this test and don't have to implement complete support for CoW AMMs // in the baseline solver. - let mock_solver = Mock::default(); + let mock_solver = Mock::new().await; colocation::start_driver( onchain.contracts(), vec![ @@ -532,7 +532,7 @@ async fn cow_amm_driver_support(web3: Web3) { ); // spawn a mock solver so we can later assert things about the received auction - let mock_solver = Mock::default(); + let mock_solver = Mock::new().await; colocation::start_driver_with_config_override( onchain.contracts(), vec![ @@ -817,7 +817,7 @@ async fn cow_amm_opposite_direction(web3: Web3) { // Start system with the mocked solver. Baseline is still required for the // native price estimation. - let mock_solver = Mock::default(); + let mock_solver = Mock::new().await; colocation::start_driver( onchain.contracts(), vec![ diff --git a/crates/e2e/tests/e2e/jit_orders.rs b/crates/e2e/tests/e2e/jit_orders.rs index 146bb701b5..da51e0bb04 100644 --- a/crates/e2e/tests/e2e/jit_orders.rs +++ b/crates/e2e/tests/e2e/jit_orders.rs @@ -62,7 +62,7 @@ async fn single_limit_order_test(web3: Web3) { let services = Services::new(&onchain).await; - let mock_solver = Mock::default(); + let mock_solver = Mock::new().await; // Start system colocation::start_driver( diff --git a/crates/e2e/tests/e2e/liquidity_source_notification.rs b/crates/e2e/tests/e2e/liquidity_source_notification.rs index 5e1e465904..abb5723efa 100644 --- a/crates/e2e/tests/e2e/liquidity_source_notification.rs +++ b/crates/e2e/tests/e2e/liquidity_source_notification.rs @@ -167,7 +167,7 @@ async fn liquidity_source_notification(web3: Web3) { let liquorice_api = api::liquorice::server::LiquoriceApi::start().await; // CoW services setup - let liquorice_solver_api_mock = Mock::default(); + let liquorice_solver_api_mock = Mock::new().await; let services = Services::new(&onchain).await; colocation::start_driver_with_config_override( diff --git a/crates/e2e/tests/e2e/quoting.rs b/crates/e2e/tests/e2e/quoting.rs index c9839e8865..672ab5e097 100644 --- a/crates/e2e/tests/e2e/quoting.rs +++ b/crates/e2e/tests/e2e/quoting.rs @@ -276,7 +276,7 @@ async fn quote_timeout(web3: Web3) { tracing::info!("Starting services."); let services = Services::new(&onchain).await; - let mock_solver = Mock::default(); + let mock_solver = Mock::new().await; // Start system colocation::start_driver( diff --git a/crates/e2e/tests/e2e/solver_competition.rs b/crates/e2e/tests/e2e/solver_competition.rs index 0eb4951290..76f2b53956 100644 --- a/crates/e2e/tests/e2e/solver_competition.rs +++ b/crates/e2e/tests/e2e/solver_competition.rs @@ -367,8 +367,8 @@ async fn store_filtered_solutions(web3: Web3) { let services = Services::new(&onchain).await; - let good_solver = Mock::default(); - let bad_solver = Mock::default(); + let good_solver = Mock::new().await; + let bad_solver = Mock::new().await; // Start system let base_tokens = vec![*token_a.address(), *token_b.address(), *token_c.address()]; diff --git a/crates/observe/src/metrics.rs b/crates/observe/src/metrics.rs index bae2b1ec2e..9b427e6312 100644 --- a/crates/observe/src/metrics.rs +++ b/crates/observe/src/metrics.rs @@ -116,8 +116,10 @@ pub fn serve_metrics( tracing::info!(%address, "serving metrics"); tokio::spawn(async move { - axum::Server::bind(&address) - .serve(app.into_make_service()) + let listener = tokio::net::TcpListener::bind(address) + .await + .expect("failed to bind metrics server"); + axum::serve(listener, app) .await .expect("failed to serve metrics") }) diff --git a/crates/orderbook/Cargo.toml b/crates/orderbook/Cargo.toml index 936b89986d..0b688bdc8c 100644 --- a/crates/orderbook/Cargo.toml +++ b/crates/orderbook/Cargo.toml @@ -33,7 +33,6 @@ futures = { workspace = true } const-hex = { workspace = true } hex-literal = { workspace = true } humantime = { workspace = true } -hyper = { workspace = true } mimalloc = { workspace = true, optional = true } tikv-jemallocator = { workspace = true } model = { workspace = true } diff --git a/crates/orderbook/src/api.rs b/crates/orderbook/src/api.rs index 7123b5a0e0..30d0fed648 100644 --- a/crates/orderbook/src/api.rs +++ b/crates/orderbook/src/api.rs @@ -9,11 +9,10 @@ use { axum::{ Router, extract::DefaultBodyLimit, - http::{Request, StatusCode}, + http::{Request, StatusCode, header::USER_AGENT}, middleware::{self, Next}, response::{IntoResponse, Json, Response}, }, - hyper::header::USER_AGENT, observe::distributed_tracing::tracing_axum::{self, record_trace_id}, serde::{Deserialize, Serialize}, shared::price_estimation::{PriceEstimationError, native::NativePriceEstimating}, @@ -68,10 +67,7 @@ pub struct AppState { pub quote_timeout: Duration, } -async fn summarize_request( - req: Request, - next: Next, -) -> Response { +async fn summarize_request(req: Request, next: Next) -> Response { let method = req.method().to_string(); let uri = req.uri().to_string(); @@ -99,10 +95,7 @@ async fn summarize_request( } /// Middleware that automatically tracks metrics using Axum's MatchedPath -async fn with_matched_path_metric( - req: Request, - next: Next, -) -> Response { +async fn with_matched_path_metric(req: Request, next: Next) -> Response { let metrics = ApiMetrics::instance(observe::metrics::get_storage_registry()).unwrap(); // Extract matched path and HTTP method @@ -165,7 +158,7 @@ pub fn handle_all_routes( let routes = [ // V1 routes ( - "/api/v1/account/:owner/orders", + "/api/v1/account/{owner}/orders", axum::routing::get(get_user_orders::get_user_orders_handler), ), ( @@ -174,7 +167,7 @@ pub fn handle_all_routes( .layer(DefaultBodyLimit::max(app_data_size_limit)), ), ( - "/api/v1/app_data/:hash", + "/api/v1/app_data/{hash}", axum::routing::get(get_app_data::get_app_data_handler).merge( axum::routing::put(put_app_data::put_app_data_with_hash) .layer(DefaultBodyLimit::max(app_data_size_limit)), @@ -190,12 +183,12 @@ pub fn handle_all_routes( .merge(axum::routing::delete(cancel_orders::cancel_orders_handler)), ), ( - "/api/v1/orders/:uid", + "/api/v1/orders/{uid}", axum::routing::get(get_order_by_uid::get_order_by_uid_handler) .merge(axum::routing::delete(cancel_order::cancel_order_handler)), ), ( - "/api/v1/orders/:uid/status", + "/api/v1/orders/{uid}/status", axum::routing::get(get_order_status::get_status_handler), ), ( @@ -208,19 +201,19 @@ pub fn handle_all_routes( axum::routing::get(get_solver_competition::get_solver_competition_latest_handler), ), ( - "/api/v1/solver_competition/by_tx_hash/:tx_hash", + "/api/v1/solver_competition/by_tx_hash/{tx_hash}", axum::routing::get(get_solver_competition::get_solver_competition_by_hash_handler), ), ( - "/api/v1/solver_competition/:auction_id", + "/api/v1/solver_competition/{auction_id}", axum::routing::get(get_solver_competition::get_solver_competition_by_id_handler), ), ( - "/api/v1/token/:token/metadata", + "/api/v1/token/{token}/metadata", axum::routing::get(get_token_metadata::get_token_metadata_handler), ), ( - "/api/v1/token/:token/native_price", + "/api/v1/token/{token}/native_price", axum::routing::get(get_native_price::get_native_price_handler), ), ( @@ -228,11 +221,11 @@ pub fn handle_all_routes( axum::routing::get(get_trades::get_trades_handler), ), ( - "/api/v1/transactions/:hash/orders", + "/api/v1/transactions/{hash}/orders", axum::routing::get(get_orders_by_tx::get_orders_by_tx_handler), ), ( - "/api/v1/users/:user/total_surplus", + "/api/v1/users/{user}/total_surplus", axum::routing::get(get_total_surplus::get_total_surplus_handler), ), ( @@ -246,11 +239,11 @@ pub fn handle_all_routes( axum::routing::get(get_solver_competition_v2::get_solver_competition_latest_handler), ), ( - "/api/v2/solver_competition/by_tx_hash/:tx_hash", + "/api/v2/solver_competition/by_tx_hash/{tx_hash}", axum::routing::get(get_solver_competition_v2::get_solver_competition_by_hash_handler), ), ( - "/api/v2/solver_competition/:auction_id", + "/api/v2/solver_competition/{auction_id}", axum::routing::get(get_solver_competition_v2::get_solver_competition_by_id_handler), ), ( @@ -439,19 +432,12 @@ impl IntoResponse for LoadSolverCompetitionError { } #[cfg(test)] -pub async fn response_body(response: axum::http::Response) -> Vec -where - B: axum::body::HttpBody + Unpin, - B::Data: AsRef<[u8]>, - B::Error: Debug, -{ - let mut body = response.into_body(); - let mut result = Vec::new(); - while let Some(frame) = body.data().await { - let bytes = frame.unwrap(); - result.extend_from_slice(bytes.as_ref()); - } - result +pub async fn response_body(response: axum::http::Response) -> Vec { + // SAFETY: usize::MAX is ok here because it's a test + axum::body::to_bytes(response.into_body(), usize::MAX) + .await + .unwrap() + .to_vec() } #[cfg(test)] @@ -489,8 +475,6 @@ mod tests { #[tokio::test] async fn rich_errors_handle_serialization_errors() { - use axum::body::HttpBody; - struct AlwaysErrors; impl Serialize for AlwaysErrors { fn serialize(&self, _: S) -> Result @@ -502,12 +486,10 @@ mod tests { } let response = rich_error("foo", "bar", AlwaysErrors).into_response(); - let mut body = response.into_body(); - let mut bytes = Vec::new(); - while let Some(frame) = body.data().await { - let chunk = frame.unwrap(); - bytes.extend_from_slice(&chunk); - } + // SAFETY: usize::MAX is ok here because it's a test + let bytes = axum::body::to_bytes(response.into_body(), usize::MAX) + .await + .unwrap(); assert_eq!( serde_json::from_slice::(&bytes).unwrap(), diff --git a/crates/orderbook/src/api/cancel_orders.rs b/crates/orderbook/src/api/cancel_orders.rs index 4c6f8e47e5..a44ae215de 100644 --- a/crates/orderbook/src/api/cancel_orders.rs +++ b/crates/orderbook/src/api/cancel_orders.rs @@ -5,9 +5,9 @@ use { Json, body, extract::State, + http::StatusCode, response::{IntoResponse, Response}, }, - hyper::StatusCode, model::order::{ORDER_UID_LIMIT, SignedOrderCancellations}, std::sync::Arc, }; diff --git a/crates/orderbook/src/api/get_solver_competition.rs b/crates/orderbook/src/api/get_solver_competition.rs index 4335160139..7be9e6ef0d 100644 --- a/crates/orderbook/src/api/get_solver_competition.rs +++ b/crates/orderbook/src/api/get_solver_competition.rs @@ -68,8 +68,7 @@ pub async fn get_solver_competition_latest_handler( mod tests { use { crate::solver_competition::LoadSolverCompetitionError, - axum::response::IntoResponse, - hyper::StatusCode, + axum::{http::StatusCode, response::IntoResponse}, }; #[tokio::test] diff --git a/crates/orderbook/src/run.rs b/crates/orderbook/src/run.rs index 8ab767e4eb..950c157b46 100644 --- a/crates/orderbook/src/run.rs +++ b/crates/orderbook/src/run.rs @@ -473,12 +473,18 @@ fn serve_api( ); tracing::info!(%address, "serving order book"); - let server = axum::Server::bind(&address) - .serve(app.into_make_service()) - .with_graceful_shutdown(shutdown_receiver); - task::spawn(async move { - if let Err(err) = server.await { + let listener = match tokio::net::TcpListener::bind(address).await { + Ok(listener) => listener, + Err(err) => { + tracing::error!(?err, "failed to bind server"); + return; + } + }; + if let Err(err) = axum::serve(listener, app) + .with_graceful_shutdown(shutdown_receiver) + .await + { tracing::error!(?err, "server error"); } }) diff --git a/crates/solvers/Cargo.toml b/crates/solvers/Cargo.toml index 8467f88614..fba01b7051 100644 --- a/crates/solvers/Cargo.toml +++ b/crates/solvers/Cargo.toml @@ -23,7 +23,6 @@ const-hex = { workspace = true } derive_more = { workspace = true } ethrpc = { workspace = true } futures = { workspace = true } -hyper = { workspace = true } itertools = { workspace = true } mimalloc = { workspace = true, optional = true } num = { workspace = true } diff --git a/crates/solvers/src/api/mod.rs b/crates/solvers/src/api/mod.rs index 734365291c..dbb363cd5d 100644 --- a/crates/solvers/src/api/mod.rs +++ b/crates/solvers/src/api/mod.rs @@ -21,7 +21,7 @@ impl Api { self, bind: Option>, shutdown: impl Future + Send + 'static, - ) -> Result<(), hyper::Error> { + ) -> Result<(), std::io::Error> { let app = axum::Router::new() .layer(tower::ServiceBuilder::new().layer( tower_http::limit::RequestBodyLimitLayer::new(REQUEST_BODY_LIMIT), @@ -38,11 +38,13 @@ impl Api { // axum's default body limit needs to be disabled to not have the default limit on top of our custom limit .layer(axum::extract::DefaultBodyLimit::disable()); - let server = axum::Server::bind(&self.addr).serve(app.into_make_service()); + let listener = tokio::net::TcpListener::bind(self.addr).await?; if let Some(bind) = bind { - let _ = bind.send(server.local_addr()); + let _ = bind.send(listener.local_addr()?); } - server.with_graceful_shutdown(shutdown).await + axum::serve(listener, app) + .with_graceful_shutdown(shutdown) + .await } } From 65450c5919ac3d8d84656823a1fcf1d6f23a5db5 Mon Sep 17 00:00:00 2001 From: ilya Date: Wed, 18 Feb 2026 11:49:26 +0000 Subject: [PATCH 073/219] Orderbook native price estimators fallback (#4161) # Background The orderbook's native price estimator is currently configured to use a Forwarder estimator, which is basically the Autopilot's API. In case Autopilot is down, quote competition can't proceed without native prices, and no new orders can be placed during that time. # Description Adds an optional fallback native price estimator that kicks in when the primary estimator experiences sustained failures. This protects native price availability during primary estimator outages. The fallback estimator tracks consecutive `ProtocolInternal` errors from the primary. After a configurable threshold (3 errors), it switches to the fallback estimator and periodically probes the primary to detect recovery. # Changes - New `FallbackNativePriceEstimator`, which wraps a primary and fallback estimator with automatic failover logic: - Tracks consecutive `ProtocolInternal` errors on the primary - Switches to fallback after 3 consecutive errors - Probes primary every 60s while in fallback mode - Recovers to primary when a probe succeeds, otherwise, continue using the fallback - `Forwarder` error mapping: HTTP send failures now return `ProtocolInternal` instead of a generic error, so the fallback estimator can detect them - New CLI argument `--native-price-estimators-fallback` on the orderbook to optionally configure fallback estimators - New factory method `caching_native_price_estimator_from_inner` to allow injecting a pre-built inner estimator (with fallback wrapping) into the caching layer ## How to test New unit and e2e tests. --- .../e2e/tests/e2e/place_order_with_quote.rs | 177 +++++- crates/orderbook/src/arguments.rs | 10 + crates/orderbook/src/run.rs | 33 +- crates/shared/src/price_estimation/factory.rs | 11 + .../src/price_estimation/native/fallback.rs | 532 ++++++++++++++++++ .../src/price_estimation/native/forwarder.rs | 6 +- .../shared/src/price_estimation/native/mod.rs | 8 +- 7 files changed, 766 insertions(+), 11 deletions(-) create mode 100644 crates/shared/src/price_estimation/native/fallback.rs diff --git a/crates/e2e/tests/e2e/place_order_with_quote.rs b/crates/e2e/tests/e2e/place_order_with_quote.rs index 47d7808297..d5e4ea6d6a 100644 --- a/crates/e2e/tests/e2e/place_order_with_quote.rs +++ b/crates/e2e/tests/e2e/place_order_with_quote.rs @@ -1,7 +1,14 @@ use { ::alloy::primitives::U256, + autopilot::{ + config::{ + Configuration, + solver::{Account, Solver}, + }, + shutdown_controller::ShutdownController, + }, driver::domain::eth::NonZeroU256, - e2e::setup::*, + e2e::setup::{colocation, wait_for_condition, *}, ethrpc::alloy::{CallBuilderExt, EvmProviderExt}, model::{ order::{OrderCreation, OrderKind}, @@ -10,7 +17,8 @@ use { }, number::units::EthUnit, shared::web3::Web3, - std::ops::DerefMut, + std::{ops::DerefMut, str::FromStr}, + url::Url, }; #[tokio::test] @@ -25,6 +33,12 @@ async fn local_node_disabled_same_sell_and_buy_token_order_feature() { run_test(disabled_same_sell_and_buy_token_order_feature).await; } +#[tokio::test] +#[ignore] +async fn local_node_fallback_native_price_estimator() { + run_test(fallback_native_price_estimator).await; +} + async fn place_order_with_quote(web3: Web3) { let mut onchain = OnchainComponents::deploy(web3.clone()).await; @@ -162,3 +176,162 @@ async fn disabled_same_sell_and_buy_token_order_feature(web3: Web3) { matches!(services.submit_quote("e_request).await, Err((reqwest::StatusCode::BAD_REQUEST, response)) if response.contains("SameBuyAndSellToken")) ); } + +async fn fallback_native_price_estimator(web3: Web3) { + let mut onchain = OnchainComponents::deploy(web3.clone()).await; + + let [solver] = onchain.make_solvers(10u64.eth()).await; + let [trader] = onchain.make_accounts(10u64.eth()).await; + let [token] = onchain + .deploy_tokens_with_weth_uni_v2_pools(1_000u64.eth(), 1_000u64.eth()) + .await; + + onchain + .contracts() + .weth + .approve(onchain.contracts().allowance, 6u64.eth()) + .from(trader.address()) + .send_and_watch() + .await + .unwrap(); + onchain + .contracts() + .weth + .deposit() + .from(trader.address()) + .value(6u64.eth()) + .send_and_watch() + .await + .unwrap(); + + tracing::info!("Starting services."); + let services = Services::new(&onchain).await; + + colocation::start_driver( + onchain.contracts(), + vec![ + colocation::start_baseline_solver( + "test_solver".into(), + solver.clone(), + *onchain.contracts().weth.address(), + vec![], + 1, + true, + ) + .await, + ], + colocation::LiquidityProvider::UniswapV2, + false, + ); + + let (manual_shutdown, control) = ShutdownController::new_manual_shutdown(); + let autopilot_config_file = Configuration { + drivers: vec![Solver::new( + "test_solver".to_string(), + Url::from_str("http://localhost:11088/test_solver").unwrap(), + Account::Address(solver.address()), + )], + } + .to_temp_path(); + let autopilot_handle = services + .start_autopilot_with_shutdown_controller( + None, + vec![ + format!("--config={}", autopilot_config_file.path().display()), + "--price-estimation-drivers=test_quoter|http://localhost:11088/test_solver" + .to_string(), + "--gas-estimators=http://localhost:11088/gasprice".to_string(), + ], + control, + ) + .await; + + services + .start_api(vec![ + "--price-estimation-drivers=test_quoter|http://localhost:11088/test_solver".to_string(), + "--gas-estimators=http://localhost:11088/gasprice".to_string(), + "--native-price-estimators-fallback=Driver|test_quoter|http://localhost:11088/test_solver" + .to_string(), + "--native-price-cache-max-age=2s".to_string(), + ]) + .await; + + tracing::info!("Quoting with autopilot running"); + let quote_sell_amount = 1u64.eth(); + let quote_request = OrderQuoteRequest { + from: trader.address(), + sell_token: *onchain.contracts().weth.address(), + buy_token: *token.address(), + side: OrderQuoteSide::Sell { + sell_amount: SellAmount::BeforeFee { + value: NonZeroU256::try_from(quote_sell_amount).unwrap(), + }, + }, + ..Default::default() + }; + let quote_response = services.submit_quote("e_request).await.unwrap(); + tracing::debug!(?quote_response); + assert!(quote_response.id.is_some()); + + tracing::info!("Placing order with autopilot running"); + let order = OrderCreation { + quote_id: quote_response.id, + sell_token: *onchain.contracts().weth.address(), + sell_amount: quote_sell_amount, + buy_token: *token.address(), + buy_amount: quote_response.quote.buy_amount, + valid_to: model::time::now_in_epoch_seconds() + 300, + kind: OrderKind::Sell, + ..Default::default() + } + .sign( + EcdsaSigningScheme::Eip712, + &onchain.contracts().domain_separator, + &trader.signer, + ); + services.create_order(&order).await.unwrap(); + + tracing::info!("Shutting down autopilot"); + manual_shutdown.shutdown(); + wait_for_condition(TIMEOUT, || async { + onchain.mint_block().await; + autopilot_handle.is_finished() + }) + .await + .unwrap(); + + // Wait for native price cache to expire (max age = 2s) + tokio::time::sleep(std::time::Duration::from_secs(3)).await; + + // The FallbackNativePriceEstimator switches to fallback after 3 consecutive + // ProtocolInternal errors from the primary (forwarder → dead autopilot). + tracing::info!("Waiting for native price fallback to activate"); + wait_for_condition(TIMEOUT, || async { + services.get_native_price(token.address()).await.is_ok() + }) + .await + .unwrap(); + + tracing::info!("Quoting after autopilot shutdown (via fallback)"); + let quote_response = services.submit_quote("e_request).await.unwrap(); + tracing::debug!(?quote_response); + assert!(quote_response.id.is_some()); + + tracing::info!("Placing order after autopilot shutdown (via fallback)"); + let order = OrderCreation { + quote_id: quote_response.id, + sell_token: *onchain.contracts().weth.address(), + sell_amount: quote_sell_amount, + buy_token: *token.address(), + buy_amount: quote_response.quote.buy_amount, + valid_to: model::time::now_in_epoch_seconds() + 300, + kind: OrderKind::Sell, + ..Default::default() + } + .sign( + EcdsaSigningScheme::Eip712, + &onchain.contracts().domain_separator, + &trader.signer, + ); + services.create_order(&order).await.unwrap(); +} diff --git a/crates/orderbook/src/arguments.rs b/crates/orderbook/src/arguments.rs index 9f40f7a120..ce2308f6a3 100644 --- a/crates/orderbook/src/arguments.rs +++ b/crates/orderbook/src/arguments.rs @@ -87,6 +87,11 @@ pub struct Arguments { #[clap(long, env)] pub native_price_estimators: NativePriceEstimators, + /// Fallback native price estimators to use when all primary estimators + /// are down. + #[clap(long, env)] + pub native_price_estimators_fallback: Option, + /// How many successful price estimates for each order will cause a fast /// or native price estimation to return its result early. /// The bigger the value the more the fast price estimation performs like @@ -173,6 +178,7 @@ impl std::fmt::Display for Arguments { banned_users_max_cache_size, eip1271_skip_creation_validation, native_price_estimators, + native_price_estimators_fallback, fast_price_estimation_results_required, max_limit_orders_per_user, ipfs_gateway, @@ -218,6 +224,10 @@ impl std::fmt::Display for Arguments { "eip1271_skip_creation_validation: {eip1271_skip_creation_validation}" )?; writeln!(f, "native_price_estimators: {native_price_estimators}")?; + writeln!( + f, + "native_price_estimators_fallback: {native_price_estimators_fallback:?}" + )?; writeln!( f, "fast_price_estimation_results_required: {fast_price_estimation_results_required}" diff --git a/crates/orderbook/src/run.rs b/crates/orderbook/src/run.rs index 950c157b46..3de44aa8e8 100644 --- a/crates/orderbook/src/run.rs +++ b/crates/orderbook/src/run.rs @@ -38,7 +38,7 @@ use { PriceEstimating, QuoteVerificationMode, factory::{self, PriceEstimatorFactory}, - native::NativePriceEstimating, + native::{FallbackNativePriceEstimator, NativePriceEstimating}, }, signature_validator, token_info::{CachedTokenInfoFetcher, TokenInfoFetcher}, @@ -238,14 +238,33 @@ pub async fn run(args: Arguments) { args.price_estimation.native_price_cache_max_age, prices, ); + let primary = price_estimator_factory + .native_price_estimator( + args.native_price_estimators.as_slice(), + args.fast_price_estimation_results_required, + &native_token, + ) + .await + .expect("failed to build primary native price estimator"); + + let inner: Box = + if let Some(ref fallback_config) = args.native_price_estimators_fallback { + let fallback = price_estimator_factory + .native_price_estimator( + fallback_config.as_slice(), + args.fast_price_estimation_results_required, + &native_token, + ) + .await + .expect("failed to build fallback native price estimator"); + Box::new(FallbackNativePriceEstimator::new(primary, fallback)) + } else { + primary + }; + let native_price_estimator: Arc = Arc::new( price_estimator_factory - .caching_native_price_estimator( - args.native_price_estimators.as_slice(), - args.fast_price_estimation_results_required, - &native_token, - cache, - ) + .caching_native_price_estimator_from_inner(inner, cache) .await, ); diff --git a/crates/shared/src/price_estimation/factory.rs b/crates/shared/src/price_estimation/factory.rs index f46a9c8767..be2f83e5d7 100644 --- a/crates/shared/src/price_estimation/factory.rs +++ b/crates/shared/src/price_estimation/factory.rs @@ -393,6 +393,17 @@ impl<'a> PriceEstimatorFactory<'a> { .native_price_estimator(native, results_required, weth) .await .expect("failed to build native price estimator"); + self.caching_native_price_estimator_from_inner(inner, cache) + .await + } + + /// Creates a [`CachingNativePriceEstimator`] from a pre-built inner + /// estimator. + pub async fn caching_native_price_estimator_from_inner( + &mut self, + inner: Box, + cache: native_price_cache::Cache, + ) -> native_price_cache::CachingNativePriceEstimator { let approximation_tokens = self .build_approximation_tokens() .await diff --git a/crates/shared/src/price_estimation/native/fallback.rs b/crates/shared/src/price_estimation/native/fallback.rs new file mode 100644 index 0000000000..0e83aa7727 --- /dev/null +++ b/crates/shared/src/price_estimation/native/fallback.rs @@ -0,0 +1,532 @@ +//! A native price estimator wrapper that automatically switches to a fallback +//! estimator when the primary becomes unavailable. +//! +//! # State Machine +//! +//! The estimator operates as a two-state machine: +//! +//! ```text +//! 3 consecutive +//! ProtocolInternal errors +//! ┌─────────┐ ───────────────────────> ┌──────────┐ +//! │ Primary │ │ Fallback │ +//! └─────────┘ <─────────────────────── └──────────┘ +//! probe succeeds +//! (every PROBE_INTERVAL) +//! ``` +//! +//! **Primary state**: All requests go to the primary estimator. A counter +//! tracks consecutive [`PriceEstimationError::ProtocolInternal`] errors. Any +//! success resets the counter. Once the counter reaches +//! [`CONSECUTIVE_ERRORS_THRESHOLD`], the estimator switches to fallback and +//! the current request is retried against the fallback. +//! +//! **Fallback state**: All requests go to the fallback estimator. Every +//! [`PROBE_INTERVAL`], one request probes both the primary and fallback +//! concurrently. If the primary probe succeeds, the estimator switches back to +//! primary; otherwise it stays in fallback and resets the probe timer. +//! +//! Only `ProtocolInternal` errors (e.g. connection refused, timeouts) trigger +//! the switch. Domain errors like `NoLiquidity` do not affect the state. + +use { + super::{NativePriceEstimateResult, NativePriceEstimating}, + crate::price_estimation::PriceEstimationError, + alloy::primitives::Address, + futures::{FutureExt, future::BoxFuture}, + std::{ + sync::Mutex, + time::{Duration, Instant}, + }, +}; + +/// How often the estimator probes the primary while in fallback state. +const PROBE_INTERVAL: Duration = Duration::from_secs(60); + +/// Number of consecutive `ProtocolInternal` errors from the primary before +/// switching to fallback. +const CONSECUTIVE_ERRORS_THRESHOLD: u32 = 3; + +enum State { + Primary { + /// Counts consecutive protocol internal errors from the primary + /// estimator. + consecutive_errors: u32, + }, + Fallback { + /// Tracks when we last tried the primary. + last_probe: Instant, + }, +} + +/// What the estimator should do for the current request based on the current +/// state and probe timing. +enum Action { + /// Use the primary estimator. + Primary, + /// Use the fallback estimator directly (within probe interval). + Fallback, + /// Probe both primary and fallback concurrently (probe interval elapsed). + Probe, +} + +/// Wraps a primary and fallback [`NativePriceEstimating`] implementation, +/// automatically switching to the fallback when the primary experiences +/// repeated `ProtocolInternal` failures and periodically probing to recover. +pub struct FallbackNativePriceEstimator { + primary: Box, + fallback: Box, + state: Mutex, +} + +impl FallbackNativePriceEstimator { + pub fn new( + primary: Box, + fallback: Box, + ) -> Self { + Self { + primary, + fallback, + state: Mutex::new(State::Primary { + consecutive_errors: 0, + }), + } + } +} + +impl FallbackNativePriceEstimator { + /// Returns `true` if the fallback should be used. + fn should_use_fallback(&self, result: &NativePriceEstimateResult) -> bool { + let Err(PriceEstimationError::ProtocolInternal(err)) = result else { + if let State::Primary { + consecutive_errors, .. + } = &mut *self.state.lock().unwrap() + { + *consecutive_errors = 0; + } + return false; + }; + + let (use_fallback, consecutive_errors) = { + let mut state = self.state.lock().unwrap(); + let State::Primary { + consecutive_errors, .. + } = &mut *state + else { + return false; + }; + *consecutive_errors += 1; + let count = *consecutive_errors; + if count >= CONSECUTIVE_ERRORS_THRESHOLD { + *state = State::Fallback { + last_probe: Instant::now(), + }; + (true, count) + } else { + (false, count) + } + }; + + if use_fallback { + tracing::info!( + ?err, + consecutive_errors, + "primary native price estimator down, switching to fallback" + ); + } else { + tracing::debug!( + ?err, + consecutive_errors, + "primary native price estimator error, not yet switching to fallback" + ); + } + use_fallback + } + + fn next_action(&self) -> Action { + let mut state = self.state.lock().unwrap(); + match &mut *state { + State::Primary { .. } => Action::Primary, + State::Fallback { last_probe } if last_probe.elapsed() >= PROBE_INTERVAL => { + // Update immediately to prevent concurrent requests from also + // probing (thundering herd). + *last_probe = Instant::now(); + Action::Probe + } + State::Fallback { .. } => Action::Fallback, + } + } + + async fn estimate_with_primary( + &self, + token: Address, + timeout: Duration, + ) -> NativePriceEstimateResult { + let result = self.primary.estimate_native_price(token, timeout).await; + if self.should_use_fallback(&result) { + self.fallback.estimate_native_price(token, timeout).await + } else { + result + } + } + + async fn estimate_with_probe( + &self, + token: Address, + timeout: Duration, + ) -> NativePriceEstimateResult { + let (primary_result, fallback_result) = futures::join!( + self.primary.estimate_native_price(token, timeout), + self.fallback.estimate_native_price(token, timeout), + ); + + if matches!( + &primary_result, + Err(PriceEstimationError::ProtocolInternal(_)) + ) { + { + let mut state = self.state.lock().unwrap(); + *state = State::Fallback { + last_probe: Instant::now(), + }; + } + tracing::debug!("primary still down after probe, continuing with fallback"); + fallback_result + } else { + { + let mut state = self.state.lock().unwrap(); + *state = State::Primary { + consecutive_errors: 0, + }; + } + tracing::info!("primary native price estimator recovered"); + primary_result + } + } +} + +impl NativePriceEstimating for FallbackNativePriceEstimator { + fn estimate_native_price( + &self, + token: Address, + timeout: Duration, + ) -> BoxFuture<'_, NativePriceEstimateResult> { + async move { + match self.next_action() { + Action::Primary => self.estimate_with_primary(token, timeout).await, + Action::Probe => self.estimate_with_probe(token, timeout).await, + Action::Fallback => self.fallback.estimate_native_price(token, timeout).await, + } + } + .boxed() + } +} + +#[cfg(test)] +mod tests { + use { + super::*, + crate::price_estimation::native::MockNativePriceEstimating, + futures::FutureExt, + }; + + const TOKEN: Address = Address::with_last_byte(1); + const TIMEOUT: Duration = Duration::from_secs(5); + + #[tokio::test] + async fn uses_primary_when_healthy() { + let mut primary = MockNativePriceEstimating::new(); + primary + .expect_estimate_native_price() + .returning(|_, _| async { Ok(1.0) }.boxed()); + + let mut fallback = MockNativePriceEstimating::new(); + fallback.expect_estimate_native_price().never(); + + let estimator = FallbackNativePriceEstimator::new(Box::new(primary), Box::new(fallback)); + + let result = estimator.estimate_native_price(TOKEN, TIMEOUT).await; + assert_eq!(result.unwrap(), 1.0); + } + + #[tokio::test] + async fn switches_to_fallback_on_protocol_internal() { + let mut primary = MockNativePriceEstimating::new(); + primary + .expect_estimate_native_price() + .times(3) + .returning(|_, _| { + async { + Err(PriceEstimationError::ProtocolInternal(anyhow::anyhow!( + "connection refused" + ))) + } + .boxed() + }); + + let mut fallback = MockNativePriceEstimating::new(); + fallback + .expect_estimate_native_price() + .times(1) + .returning(|_, _| async { Ok(2.0) }.boxed()); + + let estimator = FallbackNativePriceEstimator::new(Box::new(primary), Box::new(fallback)); + + // First two errors: stay in primary, return the error + for _ in 0..2 { + let result = estimator.estimate_native_price(TOKEN, TIMEOUT).await; + assert!(matches!( + result, + Err(PriceEstimationError::ProtocolInternal(_)) + )); + } + + // Third error: threshold reached, switch to fallback + let result = estimator.estimate_native_price(TOKEN, TIMEOUT).await; + assert_eq!(result.unwrap(), 2.0); + } + + #[tokio::test] + async fn stays_in_fallback_without_probing_before_interval() { + let mut primary = MockNativePriceEstimating::new(); + // Called 3 times (threshold) for the initial failures, NOT called again before + // probe interval + primary + .expect_estimate_native_price() + .times(3) + .returning(|_, _| { + async { + Err(PriceEstimationError::ProtocolInternal(anyhow::anyhow!( + "connection refused" + ))) + } + .boxed() + }); + + let mut fallback = MockNativePriceEstimating::new(); + // Called once when threshold is reached, once for the subsequent request + fallback + .expect_estimate_native_price() + .times(2) + .returning(|_, _| async { Ok(2.0) }.boxed()); + + let estimator = FallbackNativePriceEstimator::new(Box::new(primary), Box::new(fallback)); + + // First two calls: primary errors returned (below threshold) + for _ in 0..2 { + let _ = estimator.estimate_native_price(TOKEN, TIMEOUT).await; + } + + // Third call: threshold reached, triggers fallback + let _ = estimator.estimate_native_price(TOKEN, TIMEOUT).await; + + // Fourth call should use fallback directly (within probe interval) + let result = estimator.estimate_native_price(TOKEN, TIMEOUT).await; + assert_eq!(result.unwrap(), 2.0); + } + + #[tokio::test] + async fn probes_primary_after_interval_and_recovers() { + let mut primary = MockNativePriceEstimating::new(); + let mut call_count = 0u32; + primary + .expect_estimate_native_price() + .times(4) // 3 for threshold + 1 probe + .returning(move |_, _| { + call_count += 1; + if call_count <= 3 { + // First 3 calls: primary is down (reaching threshold) + async { + Err(PriceEstimationError::ProtocolInternal(anyhow::anyhow!( + "connection refused" + ))) + } + .boxed() + } else { + // Fourth call (probe): primary recovered + async { Ok(1.0) }.boxed() + } + }); + + let mut fallback = MockNativePriceEstimating::new(); + // Called once when threshold is reached + once during probe (concurrent) + fallback + .expect_estimate_native_price() + .times(2) + .returning(|_, _| async { Ok(2.0) }.boxed()); + + let estimator = FallbackNativePriceEstimator::new(Box::new(primary), Box::new(fallback)); + + // First two calls: primary errors returned (below threshold) + for _ in 0..2 { + let _ = estimator.estimate_native_price(TOKEN, TIMEOUT).await; + } + + // Third call: threshold reached, triggers fallback + let result = estimator.estimate_native_price(TOKEN, TIMEOUT).await; + assert_eq!(result.unwrap(), 2.0); + + // Force probe interval to expire + { + let mut state = estimator.state.lock().unwrap(); + *state = State::Fallback { + last_probe: Instant::now() - PROBE_INTERVAL - Duration::from_secs(1), + }; + } + + // This call should probe primary (which recovers) and return primary result + let result = estimator.estimate_native_price(TOKEN, TIMEOUT).await; + assert_eq!(result.unwrap(), 1.0); + } + + #[tokio::test] + async fn probes_primary_after_interval_stays_in_fallback_if_still_down() { + let mut primary = MockNativePriceEstimating::new(); + primary.expect_estimate_native_price().returning(|_, _| { + async { + Err(PriceEstimationError::ProtocolInternal(anyhow::anyhow!( + "connection refused" + ))) + } + .boxed() + }); + + let mut fallback = MockNativePriceEstimating::new(); + fallback + .expect_estimate_native_price() + .returning(|_, _| async { Ok(2.0) }.boxed()); + + let estimator = FallbackNativePriceEstimator::new(Box::new(primary), Box::new(fallback)); + + // First two calls: primary errors (below threshold) + for _ in 0..2 { + let _ = estimator.estimate_native_price(TOKEN, TIMEOUT).await; + } + + // Third call: threshold reached, triggers fallback + let _ = estimator.estimate_native_price(TOKEN, TIMEOUT).await; + + // Force probe interval to expire + { + let mut state = estimator.state.lock().unwrap(); + *state = State::Fallback { + last_probe: Instant::now() - PROBE_INTERVAL - Duration::from_secs(1), + }; + } + + // Probe fires, primary still down → use fallback result + let result = estimator.estimate_native_price(TOKEN, TIMEOUT).await; + assert_eq!(result.unwrap(), 2.0); + } + + #[tokio::test] + async fn does_not_switch_on_fewer_than_threshold_errors() { + let mut primary = MockNativePriceEstimating::new(); + let mut call_count = 0u32; + primary + .expect_estimate_native_price() + .times(3) + .returning(move |_, _| { + call_count += 1; + if call_count <= 2 { + async { + Err(PriceEstimationError::ProtocolInternal(anyhow::anyhow!( + "transient error" + ))) + } + .boxed() + } else { + // Third call: primary recovers before threshold + async { Ok(1.0) }.boxed() + } + }); + + let mut fallback = MockNativePriceEstimating::new(); + fallback.expect_estimate_native_price().never(); + + let estimator = FallbackNativePriceEstimator::new(Box::new(primary), Box::new(fallback)); + + // Two errors: below threshold, stay in primary + for _ in 0..2 { + let result = estimator.estimate_native_price(TOKEN, TIMEOUT).await; + assert!(matches!( + result, + Err(PriceEstimationError::ProtocolInternal(_)) + )); + } + + // Third call: primary succeeds, fallback never used + let result = estimator.estimate_native_price(TOKEN, TIMEOUT).await; + assert_eq!(result.unwrap(), 1.0); + } + + #[tokio::test] + async fn resets_counter_on_success() { + let mut primary = MockNativePriceEstimating::new(); + let mut call_count = 0u32; + primary + .expect_estimate_native_price() + .times(4) + .returning(move |_, _| { + call_count += 1; + match call_count { + // error, success, error, error — never reaches 3 consecutive + 1 | 3 | 4 => async { + Err(PriceEstimationError::ProtocolInternal(anyhow::anyhow!( + "transient error" + ))) + } + .boxed(), + 2 => async { Ok(1.0) }.boxed(), + _ => unreachable!(), + } + }); + + let mut fallback = MockNativePriceEstimating::new(); + fallback.expect_estimate_native_price().never(); + + let estimator = FallbackNativePriceEstimator::new(Box::new(primary), Box::new(fallback)); + + // Call 1: error (consecutive_errors = 1) + let result = estimator.estimate_native_price(TOKEN, TIMEOUT).await; + assert!(matches!( + result, + Err(PriceEstimationError::ProtocolInternal(_)) + )); + + // Call 2: success (consecutive_errors reset to 0) + let result = estimator.estimate_native_price(TOKEN, TIMEOUT).await; + assert_eq!(result.unwrap(), 1.0); + + // Call 3: error (consecutive_errors = 1) + let result = estimator.estimate_native_price(TOKEN, TIMEOUT).await; + assert!(matches!( + result, + Err(PriceEstimationError::ProtocolInternal(_)) + )); + + // Call 4: error (consecutive_errors = 2, still below threshold) + let result = estimator.estimate_native_price(TOKEN, TIMEOUT).await; + assert!(matches!( + result, + Err(PriceEstimationError::ProtocolInternal(_)) + )); + } + + #[tokio::test] + async fn does_not_switch_on_non_protocol_errors() { + let mut primary = MockNativePriceEstimating::new(); + primary + .expect_estimate_native_price() + .times(1) + .returning(|_, _| async { Err(PriceEstimationError::NoLiquidity) }.boxed()); + + let mut fallback = MockNativePriceEstimating::new(); + fallback.expect_estimate_native_price().never(); + + let estimator = FallbackNativePriceEstimator::new(Box::new(primary), Box::new(fallback)); + + let result = estimator.estimate_native_price(TOKEN, TIMEOUT).await; + assert!(matches!(result, Err(PriceEstimationError::NoLiquidity))); + } +} diff --git a/crates/shared/src/price_estimation/native/forwarder.rs b/crates/shared/src/price_estimation/native/forwarder.rs index 28089b9a1a..bfff0073a2 100644 --- a/crates/shared/src/price_estimation/native/forwarder.rs +++ b/crates/shared/src/price_estimation/native/forwarder.rs @@ -43,7 +43,11 @@ impl Forwarder { if let Some(id) = observe::distributed_tracing::request_id::from_current_span() { request = request.header("X-REQUEST-ID", id); } - let response = request.send().await.context("failed to send request")?; + let response = request.send().await.map_err(|err| { + PriceEstimationError::ProtocolInternal( + anyhow::Error::new(err).context("failed to send request"), + ) + })?; match response.status() { StatusCode::OK => { diff --git a/crates/shared/src/price_estimation/native/mod.rs b/crates/shared/src/price_estimation/native/mod.rs index 15f7bee858..b70e45ee8b 100644 --- a/crates/shared/src/price_estimation/native/mod.rs +++ b/crates/shared/src/price_estimation/native/mod.rs @@ -13,10 +13,16 @@ use { }; mod coingecko; +pub mod fallback; mod forwarder; mod oneinch; -pub use self::{coingecko::CoinGecko, forwarder::Forwarder, oneinch::OneInch}; +pub use self::{ + coingecko::CoinGecko, + fallback::FallbackNativePriceEstimator, + forwarder::Forwarder, + oneinch::OneInch, +}; pub type NativePrice = f64; pub type NativePriceEstimateResult = Result; From 1a29bbe65743f50b9e669db5eb7dc6fc0c5d1150 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Duarte?= Date: Wed, 18 Feb 2026 12:43:50 +0000 Subject: [PATCH 074/219] Fix limit layer in driver and bump limit to 20Mb (#4170) # Description The upgrade to axum 0.8 required an upper bound on loading requests completely into memory, so I reused the driver's limit. Turns out, the limit was not correctly applied and the new API forced it. This PR fixes the layer to work correctly and bumps the max request body size to 20MB # Changes - [ ] Fix the limit layer - [ ] Bump the default limit to 20MB ## How to test Staging --- crates/driver/src/infra/api/mod.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/crates/driver/src/infra/api/mod.rs b/crates/driver/src/infra/api/mod.rs index a6ac2cde60..07da4bfbf3 100644 --- a/crates/driver/src/infra/api/mod.rs +++ b/crates/driver/src/infra/api/mod.rs @@ -31,7 +31,7 @@ use { mod error; pub mod routes; -pub const REQUEST_BODY_LIMIT: usize = 10 * 1024 * 1024; +pub const REQUEST_BODY_LIMIT: usize = 20 * 1024 * 1024; pub struct Api { pub solvers: Vec, @@ -55,9 +55,7 @@ impl Api { app_data_retriever: Option, ) -> Result<(), std::io::Error> { // Add middleware. - let mut app = axum::Router::new().layer(tower::ServiceBuilder::new().layer( - tower_http::limit::RequestBodyLimitLayer::new(REQUEST_BODY_LIMIT), - )); + let mut app = axum::Router::new(); let balance_fetcher = account_balances::cached( self.eth.web3(), @@ -141,6 +139,9 @@ impl Api { app = app // axum's default body limit needs to be disabled to not have the default limit on top of our custom limit .layer(axum::extract::DefaultBodyLimit::disable()) + .layer(tower::ServiceBuilder::new().layer( + tower_http::limit::RequestBodyLimitLayer::new(REQUEST_BODY_LIMIT), + )) .layer( tower::ServiceBuilder::new() .layer(tower_http::trace::TraceLayer::new_for_http().make_span_with(make_span)) From 24a47092dcce10c015eea2b0e53a95ef02abc4ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Duarte?= Date: Wed, 18 Feb 2026 13:23:04 +0000 Subject: [PATCH 075/219] Remove request body limit in the driver (#4171) # Description 20Mb was not enough, this PR removes the limit. # Changes - [ ] Remove the limit layer ## How to test Staging --- crates/driver/src/domain/competition/pre_processing.rs | 9 ++------- crates/driver/src/infra/api/mod.rs | 8 ++------ 2 files changed, 4 insertions(+), 13 deletions(-) diff --git a/crates/driver/src/domain/competition/pre_processing.rs b/crates/driver/src/domain/competition/pre_processing.rs index a4d5105705..1b82c13246 100644 --- a/crates/driver/src/domain/competition/pre_processing.rs +++ b/crates/driver/src/domain/competition/pre_processing.rs @@ -7,12 +7,7 @@ use { eth, liquidity, }, - infra::{ - self, - api::{REQUEST_BODY_LIMIT, routes::solve::dto::SolveRequest}, - observe::metrics, - tokens, - }, + infra::{self, api::routes::solve::dto::SolveRequest, observe::metrics, tokens}, }, alloy::primitives::{Bytes, FixedBytes}, anyhow::{Context, Result}, @@ -570,7 +565,7 @@ async fn collect_request_body(request: Request) -> Result { observe::metrics::metrics().on_auction_overhead_start("driver", "stream_http_body"); let start = Instant::now(); - let body_bytes = axum::body::to_bytes(request.into_body(), REQUEST_BODY_LIMIT) + let body_bytes = axum::body::to_bytes(request.into_body(), usize::MAX) .await .context("failed to stream request body")?; diff --git a/crates/driver/src/infra/api/mod.rs b/crates/driver/src/infra/api/mod.rs index 07da4bfbf3..c594b472a0 100644 --- a/crates/driver/src/infra/api/mod.rs +++ b/crates/driver/src/infra/api/mod.rs @@ -31,8 +31,6 @@ use { mod error; pub mod routes; -pub const REQUEST_BODY_LIMIT: usize = 20 * 1024 * 1024; - pub struct Api { pub solvers: Vec, pub liquidity: liquidity::Fetcher, @@ -137,11 +135,9 @@ impl Api { } app = app - // axum's default body limit needs to be disabled to not have the default limit on top of our custom limit + // axum's default body limit is 2MB too low for solvers, 20MB is still too low + // so instead of constantly guessing and updating, we disable the limit altogether .layer(axum::extract::DefaultBodyLimit::disable()) - .layer(tower::ServiceBuilder::new().layer( - tower_http::limit::RequestBodyLimitLayer::new(REQUEST_BODY_LIMIT), - )) .layer( tower::ServiceBuilder::new() .layer(tower_http::trace::TraceLayer::new_for_http().make_span_with(make_span)) From 55de111bbfe07792531c01c85c82fb2b7c0a2e36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Duarte?= Date: Wed, 18 Feb 2026 13:33:14 +0000 Subject: [PATCH 076/219] [TRIVIAL] Bump alloy to 1.7.3 (#4168) # Description While looking into the dependencies I noticed our alloy version is a bit behind, this PR updates it. I've reviewed the changelog and there does not seem to be anything that will surprise us. It brings the bonus of some preparations being made for the glamsterdam network upgrade. # Changes - [ ] Alloy version bump to 1.7.3 ## How to test Compiler + Staging --- Cargo.lock | 118 ++++++++++++++++++++++++++++++----------------------- Cargo.toml | 4 +- 2 files changed, 68 insertions(+), 54 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f4717183d6..a7cbe2f5ff 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -47,9 +47,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy" -version = "1.4.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cb837e538ce3eac04e357ef47b8acead0b14c83ec6bcafedd167e6a60c40876" +checksum = "4973038846323e4e69a433916522195dce2947770076c03078fc21c80ea0f1c4" dependencies = [ "alloy-consensus", "alloy-contract", @@ -84,9 +84,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.4.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12870ab65b131f609257436935047eec3cfabee8809732f6bf5a69fe2a18cf2e" +checksum = "b0c0dc44157867da82c469c13186015b86abef209bf0e41625e4b68bac61d728" dependencies = [ "alloy-eips", "alloy-primitives", @@ -111,9 +111,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.4.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47c66b14d2187de0c4efe4ef678aaa57a6a34cccdbea3a0773627fac9bd128f4" +checksum = "ba4cdb42df3871cd6b346d6a938ec2ba69a9a0f49d1f82714bc5c48349268434" dependencies = [ "alloy-consensus", "alloy-eips", @@ -125,9 +125,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "1.4.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9bf6afe8c25b63c98927c6f76d90cf8dc443cc4980a7d824151c84a6e568934" +checksum = "ca63b7125a981415898ffe2a2a696c83696c9c6bdb1671c8a912946bbd8e49e7" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -214,15 +214,28 @@ dependencies = [ "thiserror 2.0.17", ] +[[package]] +name = "alloy-eip7928" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3231de68d5d6e75332b7489cfcc7f4dfabeba94d990a10e4b923af0e6623540" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "borsh", + "serde", +] + [[package]] name = "alloy-eips" -version = "1.4.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f076d25ddfcd2f1cbcc234e072baf97567d1df0e3fccdc1f8af8cc8b18dc6299" +checksum = "b9f7ef09f21bd1e9cb8a686f168cb4a206646804567f0889eadb8dcc4c9288c8" dependencies = [ "alloy-eip2124", "alloy-eip2930", "alloy-eip7702", + "alloy-eip7928", "alloy-primitives", "alloy-rlp", "alloy-serde", @@ -251,9 +264,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.4.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "250dbd8496f04eabe997e6e4c5186a0630b8bc3dbe7552e1fd917d491ef811e9" +checksum = "ff42cd777eea61f370c0b10f2648a1c81e0b783066cd7269228aa993afd487f7" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -266,9 +279,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.4.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd45cdac957d1fa1d0c18f54f262350eb72f1adc38dd1f8b15f33f0747c6a60c" +checksum = "8cbca04f9b410fdc51aaaf88433cbac761213905a65fe832058bcf6690585762" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -292,9 +305,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.4.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fba5c43e055effb5bd33dbc74b1ab7fe0f367d8801a25af9e7c716b3ef5e440b" +checksum = "42d6d15e069a8b11f56bef2eccbad2a873c6dd4d4c81d04dda29710f5ea52f04" dependencies = [ "alloy-consensus", "alloy-eips", @@ -334,9 +347,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.4.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e87a90cacc27dffd91fa6440145934a782227d31b9876444c5924d3607084ea" +checksum = "d181c8cc7cf4805d7e589bf4074d56d55064fa1a979f005a45a62b047616d870" dependencies = [ "alloy-chains", "alloy-consensus", @@ -379,9 +392,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "1.4.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c24a102935aa9d5a8b8fc8c47f39a0823672c33f0b27b5806292cb80988e6345" +checksum = "e8bd82953194dec221aa4cbbbb0b1e2df46066fe9d0333ac25b43a311e122d13" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -423,9 +436,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "1.4.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57a65bb9060e43e9738bbd7c30d742ed962d609f2123a665bbdab7e6e0f13fd3" +checksum = "f2792758a93ae32a32e9047c843d536e1448044f78422d71bf7d7c05149e103f" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -448,9 +461,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "1.4.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98bfd40f4e36cb29015ec744bc764629edbe823ec6b95aceef2684090c142976" +checksum = "7bdcbf9dfd5eea8bfeb078b1d906da8cd3a39c4d4dbe7a628025648e323611f6" dependencies = [ "alloy-primitives", "alloy-rpc-types-anvil", @@ -464,9 +477,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "1.4.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ac7d0dbb62e807028554e34c2b5724a1f57132792684107c32009e84fcf4044" +checksum = "e0a3100b76987c1b1dc81f3abe592b7edc29e92b1242067a69d65e0030b35cf9" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -476,9 +489,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "1.4.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8faa6f22068857f58579271b15e042f4725ad35cdce2ed4778ba32ffd3102b92" +checksum = "dd720b63f82b457610f2eaaf1f32edf44efffe03ae25d537632e7d23e7929e1a" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -487,9 +500,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "1.4.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccb37a9eee8e7a19bb07b5cd55d33457884e44b212588b7429c5d318d2b90295" +checksum = "e1b21e1ad18ff1b31ff1030e046462ab8168cf8894e6778cd805c8bdfe2bd649" dependencies = [ "alloy-primitives", "derive_more 2.1.1", @@ -499,9 +512,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.4.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec734cce11f7fe889950b36b51589397528b26beb6f890834a2131ee9f174d7" +checksum = "9b2dc411f13092f237d2bf6918caf80977fc2f51485f9b90cb2a2f956912c8c9" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -520,9 +533,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "1.4.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe64cd4af2e68b2154ac02a7908249a448fbd3d1d05890786a5af93686083cc" +checksum = "1ad79f1e27e161943b5a4f99fe5534ef0849876214be411e0032c12f38e94daa" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -534,9 +547,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "1.4.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9504c0f00a72883e640abc4681a5691a57dec693bc28d4aa80257c8e1e9e6e1f" +checksum = "d459f902a2313737bc66d18ed094c25d2aeb268b74d98c26bbbda2aa44182ab0" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -546,9 +559,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.4.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27f076bfd74fccc63d50546e1765359736357a953de2eb778b7b6191571735e6" +checksum = "e2ce1e0dbf7720eee747700e300c99aac01b1a95bb93f493a01e78ee28bb1a37" dependencies = [ "alloy-primitives", "serde", @@ -557,9 +570,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.4.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d80748c209a68421ab6f737828ce6ede7543569a5cad099c1ec16fc1baa05620" +checksum = "2425c6f314522c78e8198979c8cbf6769362be4da381d4152ea8eefce383535d" dependencies = [ "alloy-dyn-abi", "alloy-primitives", @@ -574,9 +587,9 @@ dependencies = [ [[package]] name = "alloy-signer-aws" -version = "1.4.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2a0b80a0e21c1b8d7321d0a88bd115ad1182c293ca7e3dd0217c156f98b5b1e" +checksum = "e38b411077d7b17e464de7dfa599f5b94161cdffc25c2f28a90a3a345b6d6490" dependencies = [ "alloy-consensus", "alloy-network", @@ -593,9 +606,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.4.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17eb1eb39351b4bf20bb0710d8d3a91eb7918d3f3de2f3835f556842e33865cb" +checksum = "c3ecb71ee53d8d9c3fa7bac17542c8116ebc7a9726c91b1bf333ec3d04f5a789" dependencies = [ "alloy-consensus", "alloy-network", @@ -685,9 +698,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.4.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a0c1a0288cdff6ee2b2c2c98ab42889d221ca8a9ee4120ede59b5449e0dcb20" +checksum = "fa186e560d523d196580c48bf00f1bf62e63041f28ecf276acc22f8b27bb9f53" dependencies = [ "alloy-json-rpc", "auto_impl", @@ -708,12 +721,13 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.4.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36dfa207caf6b528b9466c714626f5b2dfd5e8d4595a74631d5670672dac102b" +checksum = "aa501ad58dd20acddbfebc65b52e60f05ebf97c52fa40d1b35e91f5e2da0ad0e" dependencies = [ "alloy-json-rpc", "alloy-transport", + "itertools 0.14.0", "reqwest", "serde_json", "tower 0.5.3", @@ -723,9 +737,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "1.4.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91620efb46f8d011e37f74fac53a643e830a7bb24982143094b887003cbfb6be" +checksum = "b9f00445db69d63298e2b00a0ea1d859f00e6424a3144ffc5eba9c31da995e16" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -756,9 +770,9 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.4.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb0d567f4830dea921868c7680004ae0c7f221b05e6477db6c077c7953698f56" +checksum = "6fa0c53e8c1e1ef4d01066b01c737fb62fc9397ab52c6e7bb5669f97d281b9bc" dependencies = [ "darling", "proc-macro2", diff --git a/Cargo.toml b/Cargo.toml index 3099d89977..0669151eea 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,10 +3,10 @@ resolver = "3" members = ["crates/*"] [workspace.dependencies] -alloy = { version = "1.1.0", default-features = false } +alloy = { version = "1.7.3", default-features = false } # alloy-signer was added because eip712 on the regular alloy crate brings way too much stuff with it # as such, this is a workaround to force feature unification over alloy to the feature set we want -alloy-signer = { version = "1.1.0", default-features = false} +alloy-signer = { version = "1.7.3", default-features = false} anyhow = "1.0.100" async-trait = "0.1.80" axum = "0.8" From 4351d4c252d95f27edaadf8f96e5144527f4a464 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Duarte?= Date: Wed, 18 Feb 2026 13:33:24 +0000 Subject: [PATCH 077/219] [TRIVIAL] Bump opentelemetry->0.31 & tracing-opentelemetry->0.32.1 (#4169) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description One more for the upgrade pile. I'd like to think that this will slightly improve service health due to some of the points in the changelog — https://github.com/open-telemetry/opentelemetry-rust/blob/main/opentelemetry-sdk/CHANGELOG.md#0310 > * Fix: Restore true parallel exports in the async-native BatchSpanProcessor by honoring OTEL_BSP_MAX_CONCURRENT_EXPORTS (https://github.com/open-telemetry/opentelemetry-rust/pull/3028). A regression in https://github.com/open-telemetry/opentelemetry-rust/pull/2685 inadvertently awaited the export() future directly in opentelemetry-sdk/src/trace/span_processor_with_async_runtime.rs instead of spawning it on the runtime, forcing all exports to run sequentially. > * Fix: batch size accounting in BatchSpanProcessor when queue is full (https://github.com/open-telemetry/opentelemetry-rust/pull/3089). # Changes - [ ] Bump opentelemetry-* crates to 0.31 - [ ] Bump tracing-opentelemetry to 0.32.1 ## How to test Compile --- Cargo.lock | 76 +++++++++++++------ Cargo.toml | 8 +- .../src/distributed_tracing/tracing_axum.rs | 4 +- 3 files changed, 61 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a7cbe2f5ff..5d3ad257b7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4673,9 +4673,9 @@ dependencies = [ [[package]] name = "opentelemetry" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaf416e4cb72756655126f7dd7bb0af49c674f4c1b9903e80c009e0c37e552e6" +checksum = "b84bcd6ae87133e903af7ef497404dda70c60d0ea14895fc8a5e6722754fc2a0" dependencies = [ "futures-core", "futures-sink", @@ -4687,9 +4687,9 @@ dependencies = [ [[package]] name = "opentelemetry-http" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f6639e842a97dbea8886e3439710ae463120091e2e064518ba8e716e6ac36d" +checksum = "d7a6d09a73194e6b66df7c8f1b680f156d916a1a942abf2de06823dd02b7855d" dependencies = [ "async-trait", "bytes", @@ -4700,40 +4700,41 @@ dependencies = [ [[package]] name = "opentelemetry-otlp" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbee664a43e07615731afc539ca60c6d9f1a9425e25ca09c57bc36c87c55852b" +checksum = "7a2366db2dca4d2ad033cad11e6ee42844fd727007af5ad04a1730f4cb8163bf" dependencies = [ "http 1.4.0", "opentelemetry", "opentelemetry-http", "opentelemetry-proto", "opentelemetry_sdk", - "prost 0.13.5", + "prost 0.14.3", "reqwest", "thiserror 2.0.17", "tokio", - "tonic 0.13.1", + "tonic 0.14.4", "tracing", ] [[package]] name = "opentelemetry-proto" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e046fd7660710fe5a05e8748e70d9058dc15c94ba914e7c4faa7c728f0e8ddc" +checksum = "a7175df06de5eaee9909d4805a3d07e28bb752c34cab57fa9cff549da596b30f" dependencies = [ "opentelemetry", "opentelemetry_sdk", - "prost 0.13.5", - "tonic 0.13.1", + "prost 0.14.3", + "tonic 0.14.4", + "tonic-prost", ] [[package]] name = "opentelemetry_sdk" -version = "0.30.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11f644aa9e5e31d11896e024305d7e3c98a88884d9f8919dbf37a9991bc47a4b" +checksum = "e14ae4f5991976fd48df6d843de219ca6d31b01daaab2dad5af2badeded372bd" dependencies = [ "futures-channel", "futures-executor", @@ -4741,7 +4742,6 @@ dependencies = [ "opentelemetry", "percent-encoding", "rand 0.9.2", - "serde_json", "thiserror 2.0.17", ] @@ -5186,6 +5186,16 @@ dependencies = [ "prost-derive 0.13.5", ] +[[package]] +name = "prost" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2ea70524a2f82d518bce41317d0fae74151505651af45faf1ffbd6fd33f0568" +dependencies = [ + "bytes", + "prost-derive 0.14.3", +] + [[package]] name = "prost-derive" version = "0.12.6" @@ -5212,6 +5222,19 @@ dependencies = [ "syn 2.0.114", ] +[[package]] +name = "prost-derive" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27c6023962132f4b30eb4c172c91ce92d933da334c59c23cddee82358ddafb0b" +dependencies = [ + "anyhow", + "itertools 0.14.0", + "proc-macro2", + "quote", + "syn 2.0.114", +] + [[package]] name = "prost-types" version = "0.12.6" @@ -7131,9 +7154,9 @@ dependencies = [ [[package]] name = "tonic" -version = "0.13.1" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e581ba15a835f4d9ea06c55ab1bd4dce26fc53752c69a04aac00703bfb49ba9" +checksum = "7f32a6f80051a4111560201420c7885d0082ba9efe2ab61875c587bb6b18b9a0" dependencies = [ "async-trait", "base64 0.22.1", @@ -7146,7 +7169,7 @@ dependencies = [ "hyper-util", "percent-encoding", "pin-project", - "prost 0.13.5", + "sync_wrapper 1.0.2", "tokio", "tokio-stream", "tower 0.5.3", @@ -7155,6 +7178,17 @@ dependencies = [ "tracing", ] +[[package]] +name = "tonic-prost" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f86539c0089bfd09b1f8c0ab0239d80392af74c21bc9e0f15e1b4aca4c1647f" +dependencies = [ + "bytes", + "prost 0.14.3", + "tonic 0.14.4", +] + [[package]] name = "tower" version = "0.4.13" @@ -7276,14 +7310,12 @@ dependencies = [ [[package]] name = "tracing-opentelemetry" -version = "0.31.0" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddcf5959f39507d0d04d6413119c04f33b623f4f951ebcbdddddfad2d0623a9c" +checksum = "1ac28f2d093c6c477eaa76b23525478f38de514fa9aeb1285738d4b97a9552fc" dependencies = [ "js-sys", - "once_cell", "opentelemetry", - "opentelemetry_sdk", "smallvec", "tracing", "tracing-core", diff --git a/Cargo.toml b/Cargo.toml index 0669151eea..3d3b525730 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -75,9 +75,9 @@ multibase = "0.9" number = { path = "crates/number" } observe = { path = "crates/observe" } order-validation = { path = "crates/order-validation" } -opentelemetry = { version = "0.30", features = ["tracing"] } -opentelemetry-otlp = "0.30" -opentelemetry_sdk = "0.30" +opentelemetry = { version = "0.31", features = ["tracing"] } +opentelemetry-otlp = "0.31" +opentelemetry_sdk = "0.31" orderbook = { path = "crates/orderbook" } paste = "1.0" pin-project-lite = "0.2.14" @@ -96,7 +96,7 @@ time = "0.3.47" tiny-keccak = "2.0.2" tower = "0.5" tower-http = "0.6" -tracing-opentelemetry = "0.31" +tracing-opentelemetry = "0.32.1" tracing-serde = "0.2" vergen = "8" quote = "1.0.41" diff --git a/crates/observe/src/distributed_tracing/tracing_axum.rs b/crates/observe/src/distributed_tracing/tracing_axum.rs index d26fabcaec..8fbdf643c1 100644 --- a/crates/observe/src/distributed_tracing/tracing_axum.rs +++ b/crates/observe/src/distributed_tracing/tracing_axum.rs @@ -25,7 +25,9 @@ pub fn make_span(request: &Request) -> Span { let request_id = request_id(request.headers()); let span = info_span!("http_request", ?request_id, trace_id = field::Empty); - span.set_parent(parent_context); + if let Err(err) = span.set_parent(parent_context) { + tracing::error!(?err, "failed to set request parent span!"); + } { let _span = span.enter(); info!(uri = %request.uri(), method = %request.method(), "HTTP request"); From 4c2987a5aa63eab9cddab248ace9438fa884b82f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Duarte?= Date: Wed, 18 Feb 2026 13:51:45 +0000 Subject: [PATCH 078/219] Update old de/serialization errors (#4163) # Description The error handling that is being removed was added to avoid breaking any solver/partner code; the change was upped today (Feb 17th) and the teams with touch points to the affected parties were notified * https://nomevlabs.slack.com/archives/C036JAGRQ04/p1770811138347829 * https://nomevlabs.slack.com/archives/C0369B2UF6J/p1770810493856389 This PR "brings back" the newer and more accurate errors, relying on Axum's default extractor behavior. # Changes - [ ] Remove custom deserialization code to provide old error codes - [ ] Update E2E tests - [ ] Update orderbook/openapi.yml ## How to test E2E tests --- crates/e2e/tests/e2e/malformed_requests.rs | 43 +++++++++---------- crates/orderbook/openapi.yml | 18 ++++++++ crates/orderbook/src/api/get_app_data.rs | 8 +--- crates/orderbook/src/api/get_native_price.rs | 11 +---- crates/orderbook/src/api/get_order_by_uid.rs | 10 +---- crates/orderbook/src/api/get_order_status.rs | 10 +---- crates/orderbook/src/api/get_orders_by_tx.rs | 11 +---- .../src/api/get_solver_competition.rs | 19 ++------ .../src/api/get_solver_competition_v2.rs | 19 ++------ .../orderbook/src/api/get_token_metadata.rs | 11 +---- crates/orderbook/src/api/get_total_surplus.rs | 10 +---- crates/orderbook/src/api/get_user_orders.rs | 10 +---- crates/orderbook/src/api/post_order.rs | 13 ++---- crates/orderbook/src/api/post_quote.rs | 12 ++---- crates/orderbook/src/api/put_app_data.rs | 24 ++--------- 15 files changed, 73 insertions(+), 156 deletions(-) diff --git a/crates/e2e/tests/e2e/malformed_requests.rs b/crates/e2e/tests/e2e/malformed_requests.rs index 046f460c8b..622a0ea7b0 100644 --- a/crates/e2e/tests/e2e/malformed_requests.rs +++ b/crates/e2e/tests/e2e/malformed_requests.rs @@ -53,8 +53,8 @@ async fn http_validation(web3: Web3) { assert_eq!( response.status(), - StatusCode::NOT_FOUND, - "Expected 404 for invalid OrderUid ({description}): {uid}" + StatusCode::BAD_REQUEST, + "Expected 400 for invalid OrderUid ({description}): {uid}" ); } @@ -76,8 +76,8 @@ async fn http_validation(web3: Web3) { assert_eq!( response.status(), - StatusCode::NOT_FOUND, - "Expected 404 for invalid Address ({description}): {addr}" + StatusCode::BAD_REQUEST, + "Expected 400 for invalid Address ({description}): {addr}" ); } @@ -90,8 +90,8 @@ async fn http_validation(web3: Web3) { assert_eq!( response.status(), - StatusCode::NOT_FOUND, - "Expected 404 for invalid token Address ({description}): {addr}" + StatusCode::BAD_REQUEST, + "Expected 400 for invalid token Address ({description}): {addr}" ); } @@ -113,22 +113,19 @@ async fn http_validation(web3: Web3) { assert_eq!( response.status(), - StatusCode::NOT_FOUND, - "Expected 404 for invalid tx hash ({description}): {hash}" + StatusCode::BAD_REQUEST, + "Expected 400 for invalid tx hash ({description}): {hash}" ); } // Test malformed auction IDs - // Note: "-1" returns 404 because it doesn't match the u64 route pattern at all, - // while non-numeric strings return 400 as they match the path but fail - // deserialization for (id, description, expected_status) in [ - ("not-a-number", "non-numeric", StatusCode::NOT_FOUND), - ("-1", "negative number", StatusCode::NOT_FOUND), + ("not-a-number", "non-numeric", StatusCode::BAD_REQUEST), + ("-1", "negative number", StatusCode::BAD_REQUEST), ( "99999999999999999999999", "u64 overflow", - StatusCode::NOT_FOUND, + StatusCode::BAD_REQUEST, ), ] { let response = client @@ -218,8 +215,8 @@ async fn http_validation(web3: Web3) { assert_eq!( response.status(), - StatusCode::BAD_REQUEST, - "Missing required fields should return 400" + StatusCode::UNPROCESSABLE_ENTITY, + "Missing required fields should return 422" ); // Wrong field types @@ -241,8 +238,8 @@ async fn http_validation(web3: Web3) { assert_eq!( response.status(), - StatusCode::BAD_REQUEST, - "Wrong field types should return 400" + StatusCode::UNPROCESSABLE_ENTITY, + "Wrong field types should return 422" ); // Invalid enum value @@ -261,8 +258,8 @@ async fn http_validation(web3: Web3) { assert_eq!( response.status(), - StatusCode::BAD_REQUEST, - "Invalid enum value should return 400" + StatusCode::UNPROCESSABLE_ENTITY, + "Invalid enum value should return 422" ); // Test error response formats @@ -275,11 +272,13 @@ async fn http_validation(web3: Web3) { .await .unwrap(); - assert_eq!(response.status(), StatusCode::BAD_REQUEST); + assert_eq!(response.status(), StatusCode::UNPROCESSABLE_ENTITY); let body_text = response.text().await.unwrap(); assert!( - body_text.contains("deserialize error") || body_text.contains("missing field"), + body_text.contains("deserialize") + || body_text.contains("missing field") + || body_text.contains("Failed to deserialize"), "Deserialization error should contain helpful description. Got: {body_text}" ); diff --git a/crates/orderbook/openapi.yml b/crates/orderbook/openapi.yml index b47831b118..1d6a9eefc3 100644 --- a/crates/orderbook/openapi.yml +++ b/crates/orderbook/openapi.yml @@ -146,6 +146,8 @@ paths: application/json: schema: $ref: "#/components/schemas/Order" + "400": + description: Invalid order UID. "404": description: Order was not found. delete: @@ -206,6 +208,8 @@ paths: application/json: schema: $ref: "#/components/schemas/CompetitionOrderStatus" + "400": + description: Invalid order UID. "/api/v1/transactions/{txHash}/orders": get: operationId: getOrdersByTxHash @@ -225,6 +229,8 @@ paths: type: array items: $ref: "#/components/schemas/Order" + "400": + description: Invalid transaction hash. /api/v1/trades: get: operationId: getTrades @@ -473,6 +479,8 @@ paths: application/json: schema: $ref: "#/components/schemas/SolverCompetitionResponse" + "400": + description: Invalid auction ID. "404": description: No competition information available for this auction id. "/api/v1/solver_competition/by_tx_hash/{tx_hash}": @@ -496,6 +504,8 @@ paths: application/json: schema: $ref: "#/components/schemas/SolverCompetitionResponse" + "400": + description: Invalid transaction hash. "404": description: No competition information available for this `tx_hash`. /api/v1/solver_competition/latest: @@ -533,6 +543,8 @@ paths: application/json: schema: $ref: "#/components/schemas/SolverCompetitionResponse" + "400": + description: Invalid auction ID. "404": description: No competition information available for this auction id. "/api/v2/solver_competition/by_tx_hash/{tx_hash}": @@ -555,6 +567,8 @@ paths: application/json: schema: $ref: "#/components/schemas/SolverCompetitionResponse" + "400": + description: Invalid transaction hash. "404": description: No competition information available for this `tx_hash`. /api/v2/solver_competition/latest: @@ -601,6 +615,8 @@ paths: application/json: schema: $ref: "#/components/schemas/AppDataObject" + "400": + description: Invalid app data hash. "404": description: No full `appData` stored for this hash. put: @@ -697,6 +713,8 @@ paths: application/json: schema: $ref: "#/components/schemas/TotalSurplus" + "400": + description: Invalid address. components: schemas: TransactionHash: diff --git a/crates/orderbook/src/api/get_app_data.rs b/crates/orderbook/src/api/get_app_data.rs index 68ff733611..6b4f9f3a96 100644 --- a/crates/orderbook/src/api/get_app_data.rs +++ b/crates/orderbook/src/api/get_app_data.rs @@ -6,17 +6,13 @@ use { http::StatusCode, response::{IntoResponse, Json, Response}, }, - std::{str::FromStr, sync::Arc}, + std::sync::Arc, }; pub async fn get_app_data_handler( State(state): State>, - Path(contract_app_data): Path, + Path(contract_app_data): Path, ) -> Response { - let Ok(contract_app_data) = AppDataHash::from_str(&contract_app_data) else { - return StatusCode::NOT_FOUND.into_response(); - }; - let result = state .database_read .get_full_app_data(&contract_app_data) diff --git a/crates/orderbook/src/api/get_native_price.rs b/crates/orderbook/src/api/get_native_price.rs index 563b73bad5..52c490aeb4 100644 --- a/crates/orderbook/src/api/get_native_price.rs +++ b/crates/orderbook/src/api/get_native_price.rs @@ -3,23 +3,16 @@ use { alloy::primitives::Address, axum::{ extract::{Path, State}, - http::StatusCode, response::{IntoResponse, Json, Response}, }, model::quote::NativeTokenPrice, - std::{str::FromStr, sync::Arc}, + std::sync::Arc, }; pub async fn get_native_price_handler( State(state): State>, - Path(token): Path, + Path(token): Path
, ) -> Response { - // TODO: remove after all downstream callers have been notified of the status - // code changes - let Ok(token) = Address::from_str(&token) else { - return StatusCode::NOT_FOUND.into_response(); - }; - state .native_price_estimator .estimate_native_price(token, state.quote_timeout) diff --git a/crates/orderbook/src/api/get_order_by_uid.rs b/crates/orderbook/src/api/get_order_by_uid.rs index 4e0b9e6e71..7cc11721e3 100644 --- a/crates/orderbook/src/api/get_order_by_uid.rs +++ b/crates/orderbook/src/api/get_order_by_uid.rs @@ -7,19 +7,13 @@ use { response::{IntoResponse, Json, Response}, }, model::order::{Order, OrderUid}, - std::{str::FromStr, sync::Arc}, + std::sync::Arc, }; pub async fn get_order_by_uid_handler( State(state): State>, - Path(uid): Path, + Path(uid): Path, ) -> Response { - // TODO: remove after all downstream callers have been notified of the status - // code changes - let Ok(uid) = OrderUid::from_str(&uid) else { - return StatusCode::NOT_FOUND.into_response(); - }; - let result = state.orderbook.get_order(&uid).await; get_order_by_uid_response(result) } diff --git a/crates/orderbook/src/api/get_order_status.rs b/crates/orderbook/src/api/get_order_status.rs index e90e9537a3..40d92c9292 100644 --- a/crates/orderbook/src/api/get_order_status.rs +++ b/crates/orderbook/src/api/get_order_status.rs @@ -6,19 +6,13 @@ use { response::{IntoResponse, Json, Response}, }, model::order::OrderUid, - std::{str::FromStr, sync::Arc}, + std::sync::Arc, }; pub async fn get_status_handler( State(state): State>, - Path(uid): Path, + Path(uid): Path, ) -> Response { - // TODO: remove after all downstream callers have been notified of the status - // code changes - let Ok(uid) = OrderUid::from_str(&uid) else { - return StatusCode::NOT_FOUND.into_response(); - }; - let status = state.orderbook.get_order_status(&uid).await; match status { Ok(status) => Json(status).into_response(), diff --git a/crates/orderbook/src/api/get_orders_by_tx.rs b/crates/orderbook/src/api/get_orders_by_tx.rs index a06acee9db..a50c2dd43d 100644 --- a/crates/orderbook/src/api/get_orders_by_tx.rs +++ b/crates/orderbook/src/api/get_orders_by_tx.rs @@ -3,22 +3,15 @@ use { alloy::primitives::B256, axum::{ extract::{Path, State}, - http::StatusCode, response::{IntoResponse, Json, Response}, }, - std::{str::FromStr, sync::Arc}, + std::sync::Arc, }; pub async fn get_orders_by_tx_handler( State(state): State>, - Path(hash): Path, + Path(hash): Path, ) -> Response { - // TODO: remove after all downstream callers have been notified of the status - // code changes - let Ok(hash) = B256::from_str(&hash) else { - return StatusCode::NOT_FOUND.into_response(); - }; - let result = state.orderbook.get_orders_for_tx(&hash).await; match result { Ok(response) => Json(response).into_response(), diff --git a/crates/orderbook/src/api/get_solver_competition.rs b/crates/orderbook/src/api/get_solver_competition.rs index 7be9e6ef0d..b94574b0bf 100644 --- a/crates/orderbook/src/api/get_solver_competition.rs +++ b/crates/orderbook/src/api/get_solver_competition.rs @@ -6,23 +6,16 @@ use { alloy::primitives::B256, axum::{ extract::{Path, State}, - http::StatusCode, response::{IntoResponse, Json, Response}, }, model::{AuctionId, solver_competition::SolverCompetitionAPI}, - std::{str::FromStr, sync::Arc}, + std::sync::Arc, }; pub async fn get_solver_competition_by_id_handler( State(state): State>, - Path(auction_id): Path, + Path(auction_id): Path, ) -> Response { - // TODO: remove after all downstream callers have been notified of the status - // code changes - let Ok(auction_id) = auction_id.parse::() else { - return StatusCode::NOT_FOUND.into_response(); - }; - // We use u64 to ensure that negative numbers are returned as BAD_REQUEST // however, there's a gap between u64::MAX and i64::MAX, numbers beyond i64::MAX // will be marked as NOT_FOUND as they're positive (and as such, valid) but @@ -41,14 +34,8 @@ pub async fn get_solver_competition_by_id_handler( pub async fn get_solver_competition_by_hash_handler( State(state): State>, - Path(tx_hash): Path, + Path(tx_hash): Path, ) -> Response { - // TODO: remove after all downstream callers have been notified of the status - // code changes - let Ok(tx_hash) = B256::from_str(&tx_hash) else { - return StatusCode::NOT_FOUND.into_response(); - }; - let handler: &dyn SolverCompetitionStoring = &state.database_read; handler .load_competition(Identifier::Transaction(tx_hash)) diff --git a/crates/orderbook/src/api/get_solver_competition_v2.rs b/crates/orderbook/src/api/get_solver_competition_v2.rs index 1e332c0446..71e7c48354 100644 --- a/crates/orderbook/src/api/get_solver_competition_v2.rs +++ b/crates/orderbook/src/api/get_solver_competition_v2.rs @@ -3,23 +3,16 @@ use { alloy::primitives::B256, axum::{ extract::{Path, State}, - http::StatusCode, response::{IntoResponse, Json, Response}, }, model::{AuctionId, solver_competition_v2::Response as CompetitionResponse}, - std::{str::FromStr, sync::Arc}, + std::sync::Arc, }; pub async fn get_solver_competition_by_id_handler( State(state): State>, - Path(auction_id): Path, + Path(auction_id): Path, ) -> Response { - // TODO: remove after all downstream callers have been notified of the status - // code changes - let Ok(auction_id) = auction_id.parse::() else { - return StatusCode::NOT_FOUND.into_response(); - }; - state .database_read .load_competition_by_id(auction_id) @@ -30,14 +23,8 @@ pub async fn get_solver_competition_by_id_handler( pub async fn get_solver_competition_by_hash_handler( State(state): State>, - Path(tx_hash): Path, + Path(tx_hash): Path, ) -> Response { - // TODO: remove after all downstream callers have been notified of the status - // code changes - let Ok(tx_hash) = B256::from_str(&tx_hash) else { - return StatusCode::NOT_FOUND.into_response(); - }; - state .database_read .load_competition_by_tx_hash(tx_hash) diff --git a/crates/orderbook/src/api/get_token_metadata.rs b/crates/orderbook/src/api/get_token_metadata.rs index 04794b61d9..06782b4dd4 100644 --- a/crates/orderbook/src/api/get_token_metadata.rs +++ b/crates/orderbook/src/api/get_token_metadata.rs @@ -3,22 +3,15 @@ use { alloy::primitives::Address, axum::{ extract::{Path, State}, - http::StatusCode, response::{IntoResponse, Json, Response}, }, - std::{str::FromStr, sync::Arc}, + std::sync::Arc, }; pub async fn get_token_metadata_handler( State(state): State>, - Path(token): Path, + Path(token): Path
, ) -> Response { - // TODO: remove after all downstream callers have been notified of the status - // code changes - let Ok(token) = Address::from_str(&token) else { - return StatusCode::NOT_FOUND.into_response(); - }; - let result = state.database_read.token_metadata(&token).await; match result { Ok(metadata) => Json(metadata).into_response(), diff --git a/crates/orderbook/src/api/get_total_surplus.rs b/crates/orderbook/src/api/get_total_surplus.rs index 143b67b9e8..04d5434f2f 100644 --- a/crates/orderbook/src/api/get_total_surplus.rs +++ b/crates/orderbook/src/api/get_total_surplus.rs @@ -7,19 +7,13 @@ use { response::{IntoResponse, Json, Response}, }, serde_json::json, - std::{str::FromStr, sync::Arc}, + std::sync::Arc, }; pub async fn get_total_surplus_handler( State(state): State>, - Path(user): Path, + Path(user): Path
, ) -> Response { - // TODO: remove after all downstream callers have been notified of the status - // code changes - let Ok(user) = Address::from_str(&user) else { - return StatusCode::NOT_FOUND.into_response(); - }; - let surplus = state.database_read.total_surplus(&user).await; match surplus { Ok(surplus) => ( diff --git a/crates/orderbook/src/api/get_user_orders.rs b/crates/orderbook/src/api/get_user_orders.rs index 07daef1352..8330a72313 100644 --- a/crates/orderbook/src/api/get_user_orders.rs +++ b/crates/orderbook/src/api/get_user_orders.rs @@ -7,7 +7,7 @@ use { response::{IntoResponse, Json, Response}, }, serde::Deserialize, - std::{str::FromStr, sync::Arc}, + std::sync::Arc, }; #[derive(Clone, Copy, Debug, Deserialize)] @@ -18,15 +18,9 @@ pub(crate) struct QueryParams { pub async fn get_user_orders_handler( State(state): State>, - Path(owner): Path, + Path(owner): Path
, Query(query): Query, ) -> Response { - // TODO: remove after all downstream callers have been notified of the status - // code changes - let Ok(owner) = Address::from_str(&owner) else { - return StatusCode::NOT_FOUND.into_response(); - }; - const DEFAULT_OFFSET: u64 = 0; const DEFAULT_LIMIT: u64 = 10; const MIN_LIMIT: u64 = 1; diff --git a/crates/orderbook/src/api/post_order.rs b/crates/orderbook/src/api/post_order.rs index 8e2bfea75e..c67270b84a 100644 --- a/crates/orderbook/src/api/post_order.rs +++ b/crates/orderbook/src/api/post_order.rs @@ -5,7 +5,6 @@ use { }, axum::{ Json, - body, extract::State, http::StatusCode, response::{IntoResponse, Response}, @@ -23,14 +22,10 @@ use { std::sync::Arc, }; -pub async fn post_order_handler(State(state): State>, body: body::Bytes) -> Response { - // TODO: remove after all downstream callers have been notified of the status - // code changes - let order = match serde_json::from_slice::(&body) { - Ok(order) => order, - Err(err) => return (StatusCode::BAD_REQUEST, err.to_string()).into_response(), - }; - +pub async fn post_order_handler( + State(state): State>, + Json(order): Json, +) -> Response { state .orderbook .add_order(order.clone()) diff --git a/crates/orderbook/src/api/post_quote.rs b/crates/orderbook/src/api/post_quote.rs index 90febb2575..4981e6cc64 100644 --- a/crates/orderbook/src/api/post_quote.rs +++ b/crates/orderbook/src/api/post_quote.rs @@ -6,7 +6,6 @@ use { }, axum::{ Json, - body, extract::State, response::{IntoResponse, Response}, }, @@ -16,13 +15,10 @@ use { std::sync::Arc, }; -pub async fn post_quote_handler(State(state): State>, body: body::Bytes) -> Response { - // TODO: remove after all downstream callers have been notified of the status - // code changes - let Ok(request) = serde_json::from_slice::(&body) else { - return StatusCode::BAD_REQUEST.into_response(); - }; - +pub async fn post_quote_handler( + State(state): State>, + Json(request): Json, +) -> Response { state .quotes .calculate_quote(&request) diff --git a/crates/orderbook/src/api/put_app_data.rs b/crates/orderbook/src/api/put_app_data.rs index 835628fa44..a1b5be2876 100644 --- a/crates/orderbook/src/api/put_app_data.rs +++ b/crates/orderbook/src/api/put_app_data.rs @@ -2,24 +2,17 @@ use { crate::api::{AppState, internal_error_reply}, app_data::{AppDataDocument, AppDataHash}, axum::{ - body::{self}, extract::{Path, State}, http::StatusCode, response::{IntoResponse, Json, Response}, }, - std::{str::FromStr, sync::Arc}, + std::sync::Arc, }; pub async fn put_app_data_without_hash( State(state): State>, - body: body::Bytes, + Json(document): Json, ) -> Response { - // TODO: remove after all downstream callers have been notified of the status - // code changes - let Ok(document) = serde_json::from_slice::(&body) else { - return StatusCode::BAD_REQUEST.into_response(); - }; - state .app_data .register(None, document.full_app_data.as_bytes()) @@ -29,18 +22,9 @@ pub async fn put_app_data_without_hash( pub async fn put_app_data_with_hash( State(state): State>, - Path(hash): Path, - body: body::Bytes, + Path(hash): Path, + Json(document): Json, ) -> Response { - // TODO: remove after all downstream callers have been notified of the status - // code changes - let Ok(hash) = AppDataHash::from_str(&hash) else { - return StatusCode::NOT_FOUND.into_response(); - }; - let Ok(document) = serde_json::from_slice::(&body) else { - return StatusCode::BAD_REQUEST.into_response(); - }; - state .app_data .register(Some(hash), document.full_app_data.as_bytes()) From a61a42f9a6771d7fc4ce5cefa152f35d04aeb86f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Duarte?= Date: Wed, 18 Feb 2026 15:13:32 +0000 Subject: [PATCH 079/219] Fix tracing spans on solvers (#4173) # Description https://github.com/cowprotocol/services/pull/4169 changed the `set_parent` from silently failing to "returning an error" which I proceeded to log as error, this is very spammy as it happens on every HTTP request Digging further, the issue is that the baseline solver does not set the "main parent span" which we would like to have; so this PR also addresses that (still needs to be enabled in infra) # Changes - [ ] Fix the log level - [ ] Add & use tracing arguments in solvers ## How to test Run a test in staging Test in base, deployment around 14:52 Screenshot 2026-02-18 at 14 54 59 --- crates/observe/src/distributed_tracing/tracing_axum.rs | 2 +- crates/solvers/src/infra/cli.rs | 4 ++++ crates/solvers/src/run.rs | 3 ++- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/crates/observe/src/distributed_tracing/tracing_axum.rs b/crates/observe/src/distributed_tracing/tracing_axum.rs index 8fbdf643c1..a2f1a1a755 100644 --- a/crates/observe/src/distributed_tracing/tracing_axum.rs +++ b/crates/observe/src/distributed_tracing/tracing_axum.rs @@ -26,7 +26,7 @@ pub fn make_span(request: &Request) -> Span { let span = info_span!("http_request", ?request_id, trace_id = field::Empty); if let Err(err) = span.set_parent(parent_context) { - tracing::error!(?err, "failed to set request parent span!"); + tracing::debug!(?err, "failed to set request parent span"); } { let _span = span.enter(); diff --git a/crates/solvers/src/infra/cli.rs b/crates/solvers/src/infra/cli.rs index 601690a51e..49a13a1fbe 100644 --- a/crates/solvers/src/infra/cli.rs +++ b/crates/solvers/src/infra/cli.rs @@ -2,6 +2,7 @@ use { clap::{Parser, Subcommand}, + shared::arguments::TracingArguments, std::{net::SocketAddr, path::PathBuf}, }; @@ -21,6 +22,9 @@ pub struct Args { #[clap(long, env, default_value = "false")] pub use_json_logs: bool, + #[clap(flatten)] + pub tracing: TracingArguments, + /// The socket address to bind to. #[arg(long, env, default_value = "127.0.0.1:7872")] pub addr: SocketAddr, diff --git a/crates/solvers/src/run.rs b/crates/solvers/src/run.rs index 41cd1b71c3..ad87ccfe60 100644 --- a/crates/solvers/src/run.rs +++ b/crates/solvers/src/run.rs @@ -6,6 +6,7 @@ use { infra::{cli, config}, }, clap::Parser, + shared::arguments::tracing_config, std::net::SocketAddr, tokio::sync::oneshot, }; @@ -29,7 +30,7 @@ async fn run_with(args: cli::Args, bind: Option>) { &args.log, tracing::Level::ERROR.into(), args.use_json_logs, - None, + tracing_config(&args.tracing, "solvers".into()), ); observe::tracing::initialize_reentrant(&obs_config); #[cfg(unix)] From 0f5da5818b28f53cba00ce59018425a173e81a1c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Duarte?= Date: Wed, 18 Feb 2026 17:29:08 +0000 Subject: [PATCH 080/219] Upgrade reqwest to 0.13 (#4172) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Description Upgrades reqwest to 0.13 — this carries the change from native-tls to rustls by default, I tested this out and nothing broke # Changes - [ ] Upgrade reqwest to 0.13 ## How to test Tested on base staging, didn't see any issues --- Cargo.lock | 341 +++++++++++++++++++++++++++++++++++---- Cargo.toml | 2 +- crates/driver/Cargo.toml | 2 +- crates/e2e/Cargo.toml | 2 +- crates/shared/Cargo.toml | 2 +- 5 files changed, 312 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5d3ad257b7..4ee0250813 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -380,7 +380,7 @@ dependencies = [ "lru 0.16.3", "parking_lot", "pin-project", - "reqwest", + "reqwest 0.12.28", "serde", "serde_json", "thiserror 2.0.17", @@ -448,7 +448,7 @@ dependencies = [ "alloy-transport-ws", "futures", "pin-project", - "reqwest", + "reqwest 0.12.28", "serde", "serde_json", "tokio", @@ -728,7 +728,7 @@ dependencies = [ "alloy-json-rpc", "alloy-transport", "itertools 0.14.0", - "reqwest", + "reqwest 0.12.28", "serde_json", "tower 0.5.3", "tracing", @@ -1206,7 +1206,7 @@ dependencies = [ "prometheus", "prometheus-metric-storage", "rand 0.8.5", - "reqwest", + "reqwest 0.13.2", "rust_decimal", "s3", "serde", @@ -2060,6 +2060,12 @@ dependencies = [ "shlex", ] +[[package]] +name = "cesu8" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c" + [[package]] name = "cfg-if" version = "1.0.4" @@ -2200,6 +2206,16 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" +[[package]] +name = "combine" +version = "4.6.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" +dependencies = [ + "bytes", + "memchr", +] + [[package]] name = "compression-codecs" version = "0.4.36" @@ -2320,7 +2336,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "reqwest", + "reqwest 0.13.2", "serde", "serde_json", "syn 2.0.114", @@ -2789,7 +2805,7 @@ dependencies = [ "prometheus", "prometheus-metric-storage", "rand 0.8.5", - "reqwest", + "reqwest 0.13.2", "s3", "serde", "serde-ext", @@ -2849,7 +2865,7 @@ dependencies = [ "observe", "orderbook", "refunder", - "reqwest", + "reqwest 0.13.2", "rstest", "serde_json", "shared", @@ -2992,7 +3008,7 @@ dependencies = [ "prometheus", "prometheus-metric-storage", "rand 0.8.5", - "reqwest", + "reqwest 0.13.2", "scopeguard", "serde", "serde_json", @@ -3297,8 +3313,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" dependencies = [ "cfg-if", + "js-sys", "libc", "wasi", + "wasm-bindgen", ] [[package]] @@ -3308,9 +3326,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" dependencies = [ "cfg-if", + "js-sys", "libc", "r-efi", "wasip2", + "wasm-bindgen", ] [[package]] @@ -3999,6 +4019,28 @@ dependencies = [ "tracing", ] +[[package]] +name = "jni" +version = "0.21.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97" +dependencies = [ + "cesu8", + "cfg-if", + "combine", + "jni-sys", + "log", + "thiserror 1.0.69", + "walkdir", + "windows-sys 0.45.0", +] + +[[package]] +name = "jni-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" + [[package]] name = "jobserver" version = "0.1.34" @@ -4011,9 +4053,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.83" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" +checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" dependencies = [ "once_cell", "wasm-bindgen", @@ -4172,6 +4214,12 @@ dependencies = [ "hashbrown 0.16.1", ] +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + [[package]] name = "macro-string" version = "0.1.4" @@ -4695,7 +4743,7 @@ dependencies = [ "bytes", "http 1.4.0", "opentelemetry", - "reqwest", + "reqwest 0.12.28", ] [[package]] @@ -4710,7 +4758,7 @@ dependencies = [ "opentelemetry-proto", "opentelemetry_sdk", "prost 0.14.3", - "reqwest", + "reqwest 0.12.28", "thiserror 2.0.17", "tokio", "tonic 0.14.4", @@ -4789,7 +4837,7 @@ dependencies = [ "order-validation", "prometheus", "prometheus-metric-storage", - "reqwest", + "reqwest 0.13.2", "serde", "serde_json", "serde_with", @@ -5272,6 +5320,62 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +[[package]] +name = "quinn" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" +dependencies = [ + "bytes", + "cfg_aliases", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls 0.23.36", + "socket2 0.6.1", + "thiserror 2.0.17", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-proto" +version = "0.11.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" +dependencies = [ + "aws-lc-rs", + "bytes", + "getrandom 0.3.4", + "lru-slab", + "rand 0.9.2", + "ring", + "rustc-hash", + "rustls 0.23.36", + "rustls-pki-types", + "slab", + "thiserror 2.0.17", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2 0.6.1", + "tracing", + "windows-sys 0.60.2", +] + [[package]] name = "quote" version = "1.0.43" @@ -5384,7 +5488,7 @@ dependencies = [ "observe", "prometheus", "prometheus-metric-storage", - "reqwest", + "reqwest 0.13.2", "thiserror 1.0.69", "tokio", "tracing", @@ -5504,6 +5608,44 @@ name = "reqwest" version = "0.12.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" +dependencies = [ + "base64 0.22.1", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http 1.4.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.8.1", + "hyper-tls", + "hyper-util", + "js-sys", + "log", + "native-tls", + "percent-encoding", + "pin-project-lite", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper 1.0.2", + "tokio", + "tokio-native-tls", + "tower 0.5.3", + "tower-http", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "reqwest" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab3f43e3283ab1488b624b44b0e988d0acea0b3214e694730a055cb6b2efa801" dependencies = [ "base64 0.22.1", "bytes", @@ -5519,21 +5661,22 @@ dependencies = [ "http-body-util", "hyper 1.8.1", "hyper-rustls 0.27.7", - "hyper-tls", "hyper-util", "js-sys", "log", "mime", - "native-tls", "percent-encoding", "pin-project-lite", + "quinn", + "rustls 0.23.36", "rustls-pki-types", + "rustls-platform-verifier", "serde", "serde_json", "serde_urlencoded", "sync_wrapper 1.0.2", "tokio", - "tokio-native-tls", + "tokio-rustls 0.26.4", "tokio-util", "tower 0.5.3", "tower-http", @@ -5777,9 +5920,37 @@ version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "21e6f2ab2928ca4291b86736a8bd920a277a399bba1589409d72154ff87c1282" dependencies = [ + "web-time", "zeroize", ] +[[package]] +name = "rustls-platform-verifier" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d99feebc72bae7ab76ba994bb5e121b8d83d910ca40b36e0921f53becc41784" +dependencies = [ + "core-foundation 0.10.1", + "core-foundation-sys", + "jni", + "log", + "once_cell", + "rustls 0.23.36", + "rustls-native-certs", + "rustls-platform-verifier-android", + "rustls-webpki 0.103.8", + "security-framework 3.5.1", + "security-framework-sys", + "webpki-root-certs", + "windows-sys 0.61.2", +] + +[[package]] +name = "rustls-platform-verifier-android" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" + [[package]] name = "rustls-webpki" version = "0.101.7" @@ -5840,6 +6011,15 @@ dependencies = [ "tokio", ] +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + [[package]] name = "schannel" version = "0.1.28" @@ -6209,7 +6389,7 @@ dependencies = [ "rand 0.8.5", "rate-limit", "regex", - "reqwest", + "reqwest 0.13.2", "rust_decimal", "serde", "serde_json", @@ -6345,7 +6525,7 @@ dependencies = [ "observe", "prometheus", "prometheus-metric-storage", - "reqwest", + "reqwest 0.13.2", "serde", "serde_json", "serde_with", @@ -7555,6 +7735,16 @@ dependencies = [ "libc", ] +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + [[package]] name = "want" version = "0.3.1" @@ -7587,9 +7777,9 @@ checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" +checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" dependencies = [ "cfg-if", "once_cell", @@ -7600,11 +7790,12 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.56" +version = "0.4.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836d9622d604feee9e5de25ac10e3ea5f2d65b41eac0d9ce72eb5deae707ce7c" +checksum = "70a6e77fd0ae8029c9ea0063f87c46fde723e7d887703d74ad2616d792e51e6f" dependencies = [ "cfg-if", + "futures-util", "js-sys", "once_cell", "wasm-bindgen", @@ -7613,9 +7804,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" +checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -7623,9 +7814,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" +checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" dependencies = [ "bumpalo", "proc-macro2", @@ -7636,18 +7827,18 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.106" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" +checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" dependencies = [ "unicode-ident", ] [[package]] name = "wasm-streams" -version = "0.4.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" +checksum = "9d1ec4f6517c9e11ae630e200b2b65d193279042e28edd4a2cda233e46670bbb" dependencies = [ "futures-util", "js-sys", @@ -7672,9 +7863,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.83" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" +checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" dependencies = [ "js-sys", "wasm-bindgen", @@ -7690,6 +7881,15 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "webpki-root-certs" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "804f18a4ac2676ffb4e8b5b5fa9ae38af06df08162314f96a68d2a363e21a8ca" +dependencies = [ + "rustls-pki-types", +] + [[package]] name = "webpki-roots" version = "0.26.11" @@ -7718,6 +7918,15 @@ dependencies = [ "wasite", ] +[[package]] +name = "winapi-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" +dependencies = [ + "windows-sys 0.61.2", +] + [[package]] name = "windows-core" version = "0.62.2" @@ -7788,6 +7997,15 @@ dependencies = [ "windows-link", ] +[[package]] +name = "windows-sys" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +dependencies = [ + "windows-targets 0.42.2", +] + [[package]] name = "windows-sys" version = "0.48.0" @@ -7824,6 +8042,21 @@ dependencies = [ "windows-link", ] +[[package]] +name = "windows-targets" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" +dependencies = [ + "windows_aarch64_gnullvm 0.42.2", + "windows_aarch64_msvc 0.42.2", + "windows_i686_gnu 0.42.2", + "windows_i686_msvc 0.42.2", + "windows_x86_64_gnu 0.42.2", + "windows_x86_64_gnullvm 0.42.2", + "windows_x86_64_msvc 0.42.2", +] + [[package]] name = "windows-targets" version = "0.48.5" @@ -7872,6 +8105,12 @@ dependencies = [ "windows_x86_64_msvc 0.53.1", ] +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" + [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" @@ -7890,6 +8129,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" +[[package]] +name = "windows_aarch64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" + [[package]] name = "windows_aarch64_msvc" version = "0.48.5" @@ -7908,6 +8153,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" +[[package]] +name = "windows_i686_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" + [[package]] name = "windows_i686_gnu" version = "0.48.5" @@ -7938,6 +8189,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" +[[package]] +name = "windows_i686_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" + [[package]] name = "windows_i686_msvc" version = "0.48.5" @@ -7956,6 +8213,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" +[[package]] +name = "windows_x86_64_gnu" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" + [[package]] name = "windows_x86_64_gnu" version = "0.48.5" @@ -7974,6 +8237,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" + [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" @@ -7992,6 +8261,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" +[[package]] +name = "windows_x86_64_msvc" +version = "0.42.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" + [[package]] name = "windows_x86_64_msvc" version = "0.48.5" diff --git a/Cargo.toml b/Cargo.toml index 3d3b525730..3b65190132 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,7 +37,7 @@ prometheus = "0.13.4" prometheus-metric-storage = "0.5.0" rand = "0.8.5" regex = "1.10.4" -reqwest = "0.12" +reqwest = "0.13" rstest = "0.26" ruint = { version = "1.17.2", default-features = false } serde = { version = "1.0.203", features = ["derive"] } diff --git a/crates/driver/Cargo.toml b/crates/driver/Cargo.toml index e174bb026f..a3ceb59a24 100644 --- a/crates/driver/Cargo.toml +++ b/crates/driver/Cargo.toml @@ -41,7 +41,7 @@ number = { workspace = true } prometheus = { workspace = true } prometheus-metric-storage = { workspace = true } rand = { workspace = true } -reqwest = { workspace = true } +reqwest = { workspace = true, features = ["query"] } s3 = { workspace = true } serde = { workspace = true, features = ["derive"] } serde-ext = { workspace = true } diff --git a/crates/e2e/Cargo.toml b/crates/e2e/Cargo.toml index eb024a69ea..2541d0eb0c 100644 --- a/crates/e2e/Cargo.toml +++ b/crates/e2e/Cargo.toml @@ -31,7 +31,7 @@ model = { workspace = true, features = ["e2e"] } number = { workspace = true } observe = { workspace = true } orderbook = { workspace = true, features = ["e2e"] } -reqwest = { workspace = true, features = ["blocking"] } +reqwest = { workspace = true, features = ["blocking", "query"] } serde_json = { workspace = true } shared = { workspace = true } solver = { workspace = true } diff --git a/crates/shared/Cargo.toml b/crates/shared/Cargo.toml index b156517aec..3ae6756896 100644 --- a/crates/shared/Cargo.toml +++ b/crates/shared/Cargo.toml @@ -41,7 +41,7 @@ prometheus = { workspace = true } prometheus-metric-storage = { workspace = true } rand = { workspace = true } rate-limit = { workspace = true } -reqwest = { workspace = true, features = ["cookies", "gzip", "json"] } +reqwest = { workspace = true, features = ["cookies", "gzip", "json", "query"] } rust_decimal = { workspace = true, features = ["maths"] } serde = { workspace = true } serde_json = { workspace = true } From 0743608fd386f8c1f02293514d05a2c5f69a6913 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Duarte?= Date: Wed, 18 Feb 2026 17:46:31 +0000 Subject: [PATCH 081/219] [TRIVIAL] Remove direct dependency on derivative crate (#4174) # Description I was reading through the issues when I saw the cargo audit one, even though we still depend on derivative transitively (thus we can't remove it from the audit list), getting Claude to remove our direct dependencies was less than 5 minutes. # Changes - [ ] Replace derivative proc-macro usage with manual trait implementations: ## How to test Compilation + existing tests --- Cargo.lock | 2 - Cargo.toml | 1 - crates/shared/Cargo.toml | 1 - crates/shared/src/zeroex_api.rs | 41 +++++++++++++--- crates/solver/Cargo.toml | 1 - crates/solver/src/liquidity/mod.rs | 75 +++++++++++++++++++++--------- 6 files changed, 88 insertions(+), 33 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4ee0250813..875e41ae8e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6368,7 +6368,6 @@ dependencies = [ "contracts", "dashmap", "database", - "derivative", "derive_more 1.0.0", "ethrpc", "futures", @@ -6480,7 +6479,6 @@ dependencies = [ "async-trait", "const-hex", "contracts", - "derivative", "ethrpc", "futures", "hex-literal", diff --git a/Cargo.toml b/Cargo.toml index 3b65190132..d3e4b4df54 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,7 +15,6 @@ cached = { version = "0.49.3", default-features = false } chrono = { version = "0.4.38", default-features = false } clap = { version = "4.5.6", features = ["derive", "env"] } dashmap = "6.1.0" -derivative = "2.2.0" derive_more = { version = "1.0.0", features = ["full"] } mimalloc = "0.1.43" tikv-jemallocator = { version = "0.6", features = ["unprefixed_malloc_on_supported_platforms", "profiling"] } diff --git a/crates/shared/Cargo.toml b/crates/shared/Cargo.toml index 3ae6756896..9cc14c52b7 100644 --- a/crates/shared/Cargo.toml +++ b/crates/shared/Cargo.toml @@ -24,7 +24,6 @@ contracts = { workspace = true } dashmap = { workspace = true } database = { workspace = true } derive_more = { workspace = true } -derivative = { workspace = true } ethrpc = { workspace = true } futures = { workspace = true } observe = { workspace = true } diff --git a/crates/shared/src/zeroex_api.rs b/crates/shared/src/zeroex_api.rs index 32e3e8fa4d..9c3359cc02 100644 --- a/crates/shared/src/zeroex_api.rs +++ b/crates/shared/src/zeroex_api.rs @@ -8,7 +8,6 @@ use { alloy::primitives::{Address, B256, U256, address}, anyhow::{Context, Result}, chrono::{DateTime, NaiveDateTime, TimeZone, Utc}, - derivative::Derivative, ethrpc::block_stream::{BlockInfo, CurrentBlockWatcher}, number::serialization::HexOrDecimalU256, observe::tracing::tracing_headers, @@ -82,11 +81,9 @@ impl Default for OrdersQuery { } #[serde_as] -#[derive(Debug, Derivative, Clone, Deserialize, Serialize, Eq, PartialEq)] -#[derivative(Default)] +#[derive(Debug, Clone, Deserialize, Serialize, Eq, PartialEq)] #[serde(rename_all = "camelCase")] pub struct OrderMetadata { - #[derivative(Default(value = "DateTime::::MIN_UTC"))] pub created_at: DateTime, #[serde(with = "bytes_hex")] pub order_hash: Vec, @@ -94,6 +91,16 @@ pub struct OrderMetadata { pub remaining_fillable_taker_amount: u128, } +impl Default for OrderMetadata { + fn default() -> Self { + Self { + created_at: DateTime::::MIN_UTC, + order_hash: Default::default(), + remaining_fillable_taker_amount: Default::default(), + } + } +} + #[derive(Debug, Clone, Deserialize, Serialize, Eq, PartialEq, Default)] #[serde(rename_all = "camelCase")] pub struct ZeroExSignature { @@ -104,15 +111,13 @@ pub struct ZeroExSignature { } #[serde_as] -#[derive(Debug, Derivative, Clone, Deserialize, Serialize, Eq, PartialEq)] -#[derivative(Default)] +#[derive(Debug, Clone, Deserialize, Serialize, Eq, PartialEq)] #[serde(rename_all = "camelCase")] pub struct Order { /// The ID of the Ethereum chain where the `verifying_contract` is located. pub chain_id: u64, /// Timestamp in seconds of when the order expires. Expired orders cannot be /// filled. - #[derivative(Default(value = "NaiveDateTime::MAX.and_utc().timestamp() as u64"))] #[serde_as(as = "DisplayFromStr")] pub expiry: u64, /// The address of the entity that will receive any fees stipulated by the @@ -156,6 +161,28 @@ pub struct Order { pub verifying_contract: Address, } +impl Default for Order { + fn default() -> Self { + Self { + chain_id: Default::default(), + expiry: NaiveDateTime::MAX.and_utc().timestamp() as u64, + fee_recipient: Default::default(), + maker: Default::default(), + maker_amount: Default::default(), + maker_token: Default::default(), + pool: Default::default(), + salt: Default::default(), + sender: Default::default(), + signature: Default::default(), + taker: Default::default(), + taker_amount: Default::default(), + taker_token: Default::default(), + taker_token_fee_amount: Default::default(), + verifying_contract: Default::default(), + } + } +} + #[derive(Debug, Default, Clone, Deserialize, Serialize, Eq, PartialEq)] pub struct OrderRecord(Arc); diff --git a/crates/solver/Cargo.toml b/crates/solver/Cargo.toml index 71efe6cdcd..38c48ae415 100644 --- a/crates/solver/Cargo.toml +++ b/crates/solver/Cargo.toml @@ -34,7 +34,6 @@ tokio = { workspace = true, features = ["macros", "rt-multi-thread", "time"] } tracing = { workspace = true } [dev-dependencies] -derivative = { workspace = true } maplit = { workspace = true } tokio = { workspace = true, features = ["test-util"] } testlib = { workspace = true } diff --git a/crates/solver/src/liquidity/mod.rs b/crates/solver/src/liquidity/mod.rs index cd4c789f49..5a118cd54b 100644 --- a/crates/solver/src/liquidity/mod.rs +++ b/crates/solver/src/liquidity/mod.rs @@ -4,8 +4,6 @@ pub mod uniswap_v2; pub mod uniswap_v3; pub mod zeroex; -#[cfg(test)] -use derivative::Derivative; use { crate::settlement::SettlementEncoder, alloy::primitives::{Address, U256}, @@ -85,8 +83,7 @@ pub enum Exchange { /// solvers. User orders (market + limit) containing OrderUid are the orders /// from the orderbook. #[derive(Debug, Clone)] -#[cfg_attr(test, derive(Derivative))] -#[cfg_attr(test, derivative(PartialEq))] +#[cfg_attr(test, derive(PartialEq))] pub enum LimitOrderId { Market(OrderUid), Limit(OrderUid), @@ -102,8 +99,7 @@ pub enum LimitOrderId { /// (1) and (2) are gathered when the auction is cut and they are sent to /// searchers (3) are received from searchers as part of the solution. #[derive(Debug, Clone)] -#[cfg_attr(test, derive(Derivative))] -#[cfg_attr(test, derivative(PartialEq))] +#[cfg_attr(test, derive(PartialEq))] pub enum LiquidityOrderId { /// TODO: Split into different variants once we have a DTO of order model /// for `driver` in driver solver colocation TODO: The only reason why @@ -130,8 +126,6 @@ impl From for LimitOrderId { /// Basic limit sell and buy orders #[derive(Clone)] -#[cfg_attr(test, derive(Derivative))] -#[cfg_attr(test, derivative(PartialEq))] pub struct LimitOrder { // Opaque Identifier for debugging purposes pub id: LimitOrderId, @@ -144,11 +138,25 @@ pub struct LimitOrder { pub partially_fillable: bool, /// Takes partiall fill into account. pub user_fee: U256, - #[cfg_attr(test, derivative(PartialEq = "ignore"))] pub settlement_handling: Arc>, pub exchange: Exchange, } +#[cfg(test)] +impl PartialEq for LimitOrder { + fn eq(&self, other: &Self) -> bool { + self.id == other.id + && self.sell_token == other.sell_token + && self.buy_token == other.buy_token + && self.sell_amount == other.sell_amount + && self.buy_amount == other.buy_amount + && self.kind == other.kind + && self.partially_fillable == other.partially_fillable + && self.user_fee == other.user_fee + && self.exchange == other.exchange + } +} + impl LimitOrder { /// Returns the full execution amount for the specified limit order. pub fn full_execution_amount(&self) -> U256 { @@ -239,17 +247,24 @@ impl Default for LimitOrder { /// 2 sided constant product automated market maker with equal reserve value and /// a trading fee (e.g. Uniswap, Sushiswap) #[derive(Clone)] -#[cfg_attr(test, derive(Derivative))] -#[cfg_attr(test, derivative(PartialEq))] pub struct ConstantProductOrder { pub address: Address, pub tokens: TokenPair, pub reserves: (u128, u128), pub fee: Ratio, - #[cfg_attr(test, derivative(PartialEq = "ignore"))] pub settlement_handling: Arc>, } +#[cfg(test)] +impl PartialEq for ConstantProductOrder { + fn eq(&self, other: &Self) -> bool { + self.address == other.address + && self.tokens == other.tokens + && self.reserves == other.reserves + && self.fee == other.fee + } +} + impl ConstantProductOrder { /// Creates a new constant product order from a Uniswap V2 pool and a /// settlement handler implementation. @@ -280,17 +295,24 @@ impl From for ConstantProductOrder { /// 2 sided weighted product automated market maker with weighted reserves and a /// trading fee (e.g. BalancerV2) #[derive(Clone)] -#[cfg_attr(test, derive(Derivative))] -#[cfg_attr(test, derivative(PartialEq))] pub struct WeightedProductOrder { pub address: Address, pub reserves: BTreeMap, pub fee: Bfp, pub version: WeightedPoolVersion, - #[cfg_attr(test, derivative(PartialEq = "ignore"))] pub settlement_handling: Arc>, } +#[cfg(test)] +impl PartialEq for WeightedProductOrder { + fn eq(&self, other: &Self) -> bool { + self.address == other.address + && self.reserves == other.reserves + && self.fee == other.fee + && self.version == other.version + } +} + impl std::fmt::Debug for WeightedProductOrder { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "Weighted Product AMM {:?}", self.reserves.keys()) @@ -298,17 +320,24 @@ impl std::fmt::Debug for WeightedProductOrder { } #[derive(Clone)] -#[cfg_attr(test, derive(Derivative))] -#[cfg_attr(test, derivative(PartialEq))] pub struct StablePoolOrder { pub address: Address, pub reserves: BTreeMap, pub fee: Bfp, pub amplification_parameter: AmplificationParameter, - #[cfg_attr(test, derivative(PartialEq = "ignore"))] pub settlement_handling: Arc>, } +#[cfg(test)] +impl PartialEq for StablePoolOrder { + fn eq(&self, other: &Self) -> bool { + self.address == other.address + && self.reserves == other.reserves + && self.fee == other.fee + && self.amplification_parameter == other.amplification_parameter + } +} + impl std::fmt::Debug for StablePoolOrder { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "Stable Pool AMM {:?}", self.reserves.keys()) @@ -348,15 +377,19 @@ impl Settleable for StablePoolOrder { /// Concentrated type of liquidity with ticks (e.g. UniswapV3) #[derive(Clone)] -#[cfg_attr(test, derive(Derivative))] -#[cfg_attr(test, derivative(PartialEq))] pub struct ConcentratedLiquidity { pub tokens: TokenPair, pub pool: PoolInfo, - #[cfg_attr(test, derivative(PartialEq = "ignore"))] pub settlement_handling: Arc>, } +#[cfg(test)] +impl PartialEq for ConcentratedLiquidity { + fn eq(&self, other: &Self) -> bool { + self.tokens == other.tokens && self.pool == other.pool + } +} + impl std::fmt::Debug for ConcentratedLiquidity { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "Concentrated liquidity {:?}", self.pool) From ffb235b40489e2c23f7f4ae93a7d9d156322da19 Mon Sep 17 00:00:00 2001 From: "Jan [Yann]" <4518474+fafk@users.noreply.github.com> Date: Wed, 18 Feb 2026 19:23:36 +0100 Subject: [PATCH 082/219] Update Claude's victoria logs instructions (#4177) # Description An update of Claude's instructions after we migrated the logging system. Also puts the querying behind a wrapper script so you can click "allow reading logs" once instead of having to approve every single query. Update your .env.claude from known sources. --- CLAUDE.md | 32 +++++++-------- docs/COW_ORDER_DEBUG_SKILL.md | 71 ++++++++++---------------------- scripts/vlogs | 76 +++++++++++++++++++++++++++++++++++ 3 files changed, 113 insertions(+), 66 deletions(-) create mode 100755 scripts/vlogs diff --git a/CLAUDE.md b/CLAUDE.md index 0ed24cd9cb..a8b983984f 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -183,26 +183,24 @@ Use `$ETH_MAINNET_RPC` from `.env.claude` for mainnet. Use `cast` or whatever to ## Grafana Logs Access -Query logs via the Grafana API (credentials in `.env.claude`): +Use the `scripts/vlogs` wrapper to query Victoria Logs: ```bash -source .env.claude && curl -s -H "Authorization: Bearer $GRAFANA_API_TOKEN" \ - "$GRAFANA_URL/api/ds/query" \ - -X POST -H "Content-Type: application/json" \ - -d '{ - "queries": [{ - "refId": "A", - "datasource": {"type": "victoriametrics-logs-datasource", "uid": "'"$VICTORIA_LOGS_DATASOURCE_UID"'"}, - "expr": "", - "queryType": "instant" - }], - "from": "now-1h", - "to": "now" - }' +# Basic usage +scripts/vlogs "" [--from