Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion ipa-multipoint/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ required-features = ["std"]

[dependencies]
banderwagon = { path = "../banderwagon", default-features = false }
hashbrown = { version = "0.16", features = ["serde"] }
hashbrown = "0.16"
hex = { version = "0.4.3", default-features = false, features = ["alloc"] }
itertools = { version = "0.10.1", default-features = false, features = ["use_alloc"] }
rayon = { version = "*", optional = true }
Expand Down
2 changes: 1 addition & 1 deletion salt/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ salt-macros = { path = "../salt-macros", default-features = false }
auto_impl = { version = "1", default-features = false }
blake3 = { version = "1.8", default-features = false }
derive_more = { version = "2", default-features = false, features = ["full"] }
hashbrown = { version = "0.16", features = ["serde"] }
hashbrown = "0.16"
hex = { version = "0.4", default-features = false, features = ["alloc"] }
rayon = { version = "*", optional = true }
rustc-hash = { version = "2.0", default-features = false }
Expand Down
151 changes: 150 additions & 1 deletion salt/src/proof/prover.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,16 @@ use ipa_multipoint::{

use salt_macros::prelude::*;
use salt_macros::{chunks, iter, num_threads, sort_unstable};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde::{
de::{Error as _, MapAccess, Visitor},
ser::SerializeMap,
Deserialize, Deserializer, Serialize, Serializer,
};
use spin::Lazy;
use std::collections::{BTreeMap, BTreeSet};
use std::{format, string::ToString, vec::Vec};

use core::fmt;
use hashbrown::HashMap;
use rustc_hash::FxBuildHasher;
type FxHashMap<K, V> = HashMap<K, V, FxBuildHasher>;
Expand Down Expand Up @@ -109,9 +114,57 @@ pub struct SaltProof {

/// the level of the buckets trie
/// used to let verifier determine the bucket trie level
#[serde(with = "fx_hashmap_serde")]
pub levels: FxHashMap<BucketId, u8>,
}

// Hand-written (de)serialization for `levels` so the salt crate doesn't need
// to enable `hashbrown/serde` (which transitively pulls in serde_core 1.0.221+
// and breaks downstream alloy-tx-macros 1.0.23). Entries are emitted in
// ascending key order to keep proof bytes deterministic across provers.
mod fx_hashmap_serde {
use super::*;

pub fn serialize<S: Serializer>(
map: &FxHashMap<BucketId, u8>,
s: S,
) -> Result<S::Ok, S::Error> {
let mut entries: Vec<(&BucketId, &u8)> = map.iter().collect();
entries.sort_unstable_by_key(|(k, _)| **k);
let mut m = s.serialize_map(Some(entries.len()))?;
for (k, v) in entries {
m.serialize_entry(k, v)?;
}
m.end()
}

pub fn deserialize<'de, D: Deserializer<'de>>(
d: D,
) -> Result<FxHashMap<BucketId, u8>, D::Error> {
struct LevelsVisitor;

impl<'de> Visitor<'de> for LevelsVisitor {
type Value = FxHashMap<BucketId, u8>;

fn expecting(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("a map of BucketId to u8")
}

fn visit_map<A: MapAccess<'de>>(self, mut access: A) -> Result<Self::Value, A::Error> {
let mut map: FxHashMap<BucketId, u8> = FxHashMap::default();
while let Some((k, v)) = access.next_entry::<BucketId, u8>()? {
if map.insert(k, v).is_some() {
return Err(A::Error::custom("duplicate BucketId in levels"));
}
}
Ok(map)
}
}

d.deserialize_map(LevelsVisitor)
}
}

/// Converts a bucket slot entry into a field element for IPA polynomial commitments.
///
/// This function maps key-value pairs (or empty slots) to field elements that serve
Expand Down Expand Up @@ -1213,6 +1266,102 @@ mod tests {
assert_eq!(queries[1].point, Fr::from(5u64));
}

/// Round-trip + determinism tests for the hand-rolled `fx_hashmap_serde`
/// module. The module exists so we can drop the `hashbrown/serde` feature
/// (which transitively pulls in `serde_core >= 1.0.221` and breaks
/// downstream `alloy-tx-macros 1.0.23`); the sort-on-serialize is what
/// keeps proof bytes identical across provers despite `FxHashMap`'s
/// non-deterministic iteration order.
mod fx_hashmap_serde_tests {
use super::*;

#[derive(Debug, PartialEq, Serialize, Deserialize)]
struct LevelsWrapper(#[serde(with = "fx_hashmap_serde")] FxHashMap<BucketId, u8>);

fn levels_wrapper<I: IntoIterator<Item = (BucketId, u8)>>(entries: I) -> LevelsWrapper {
let mut map: FxHashMap<BucketId, u8> = HashMap::with_hasher(FxBuildHasher);
for (k, v) in entries {
map.insert(k, v);
}
LevelsWrapper(map)
}

/// Serializing then deserializing must yield an equivalent map.
#[test]
fn round_trip_preserves_entries() {
let original =
levels_wrapper([(0u32, 0u8), (42, 3), (1_000_000, 7), (BucketId::MAX, 255)]);

let bytes =
bincode::serde::encode_to_vec(&original, bincode::config::legacy()).unwrap();
let (decoded, _): (LevelsWrapper, _) =
bincode::serde::decode_from_slice(&bytes, bincode::config::legacy()).unwrap();

assert_eq!(original, decoded);
}

/// Empty map round-trips and serializes to a stable, non-empty map header.
#[test]
fn round_trip_empty() {
let original = levels_wrapper([]);
let bytes =
bincode::serde::encode_to_vec(&original, bincode::config::legacy()).unwrap();
let (decoded, _): (LevelsWrapper, _) =
bincode::serde::decode_from_slice(&bytes, bincode::config::legacy()).unwrap();
assert_eq!(original, decoded);
assert!(decoded.0.is_empty());
}

/// Two maps with identical entries inserted in different orders must
/// serialize to identical bytes. Without the ascending-key sort in
/// `fx_hashmap_serde::serialize`, `FxHashMap` iteration order would
/// leak into proof bytes and silently diverge across provers.
#[test]
fn serialization_is_order_independent() {
let entries: [(BucketId, u8); 6] = [
(3, 1),
(1, 2),
(4, 3),
(1_000_000, 4),
(5, 5),
(BucketId::MAX, 6),
];

let forward = levels_wrapper(entries);
// Reverse insertion order to perturb the FxHashMap's internal layout.
let reverse = {
let mut rev = entries;
rev.reverse();
levels_wrapper(rev)
};

// Sanity: the underlying maps are equal as maps...
assert_eq!(forward, reverse);

let forward_bytes =
bincode::serde::encode_to_vec(&forward, bincode::config::legacy()).unwrap();
let reverse_bytes =
bincode::serde::encode_to_vec(&reverse, bincode::config::legacy()).unwrap();

// ...and serialize to identical bytes regardless of insertion order.
assert_eq!(forward_bytes, reverse_bytes);
}

#[test]
fn rejects_duplicate_bucket_id() {
let mut bytes = Vec::new();
bytes.extend_from_slice(&2u64.to_le_bytes());
bytes.extend_from_slice(&7u32.to_le_bytes());
bytes.push(1u8);
bytes.extend_from_slice(&7u32.to_le_bytes());
bytes.push(2u8);

let result: Result<(LevelsWrapper, _), _> =
bincode::serde::decode_from_slice(&bytes, bincode::config::legacy());
assert!(result.is_err(), "duplicate BucketId must be rejected");
}
}

/// Tests create_leaf_node_queries with missing key-value data.
#[test]
fn test_create_leaf_node_queries_missing_data() {
Expand Down
68 changes: 63 additions & 5 deletions salt/src/state/state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -902,16 +902,38 @@ impl<'a, S: StateReader> PlainStateProvider<'a, S> {
plain_key: &[u8],
hint: Option<BucketId>,
) -> Result<Option<Vec<u8>>, S::Error> {
Ok(self
.plain_value_and_slot(plain_key, hint)?
.map(|(_, salt_val)| salt_val.value().to_vec()))
}

/// Retrieves a plain value together with the slot it occupies.
///
/// Same lookup as [`Self::plain_value`], but exposes the underlying
/// [`SlotId`] and full [`SaltValue`] for callers that need them
/// (e.g. to read the original encoded record or perform follow-up
/// slot-addressed operations) without paying for the `Vec<u8>` copy.
///
/// # Arguments
/// * `plain_key` - The plain key to look up
/// * `hint` - Optional bucket_id hint; if provided only that bucket is searched
///
/// # Returns
/// * `Ok(Some((slot_id, salt_value)))` - Slot and encoded value when the key exists
/// * `Ok(None)` - If the key does not exist
/// * `Err(error)` - On underlying storage errors
pub fn plain_value_and_slot(
&self,
plain_key: &[u8],
hint: Option<BucketId>,
) -> Result<Option<(SlotId, SaltValue)>, S::Error> {
// Use the hint if provided, otherwise compute the bucket_id
let bucket_id = hint.unwrap_or_else(|| hasher::bucket_id(plain_key));
let meta = self.store.metadata(bucket_id)?;

match shi_search(bucket_id, meta.nonce, meta.capacity, plain_key, |key| {
shi_search(bucket_id, meta.nonce, meta.capacity, plain_key, |key| {
self.store.value(key)
})? {
Some((_, salt_val)) => Ok(Some(salt_val.value().to_vec())),
None => Ok(None),
}
})
}
}

Expand Down Expand Up @@ -1494,6 +1516,42 @@ mod tests {
assert_eq!(provider.plain_value(b"missing", None).unwrap(), None);
}

#[test]
fn test_plain_state_provider_plain_value_and_slot() {
let store = MemStore::new();
let kvs = create_same_bucket_test_data(1);
let updates = EphemeralSaltState::new(&store)
.update_fin(kvs.iter().map(|(k, v)| (k, v)))
.unwrap();
store.update_state(updates);

let provider = PlainStateProvider::new(&store);
let (key, value) = &kvs[0];
let bucket_id = hasher::bucket_id(key);

// Successful lookup returns the slot and a SaltValue whose value() matches.
let (slot, salt_val) = provider.plain_value_and_slot(key, None).unwrap().unwrap();
assert_eq!(salt_val.value(), value.as_deref().unwrap());

// Returned slot must be the slot the entry was actually stored at.
let stored = store.value((bucket_id, slot).into()).unwrap().unwrap();
assert_eq!(stored, salt_val);

// Wrong hint short-circuits to None without touching the right bucket.
assert_eq!(
provider
.plain_value_and_slot(key, Some(bucket_id + 1))
.unwrap(),
None
);

// Non-existent key returns None.
assert_eq!(
provider.plain_value_and_slot(b"missing", None).unwrap(),
None
);
}

/// Tests cache hit and miss behavior in the value() method.
///
/// Validates that the caching logic correctly handles:
Expand Down
Loading