diff --git a/Cargo.toml b/Cargo.toml index cc89e52..1507312 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,7 @@ resolver = "2" [workspace.package] version = "0.16.0-rc.4" edition = "2024" -rust-version = "1.88" +rust-version = "1.92" authors = ["init4"] license = "MIT OR Apache-2.0" homepage = "https://github.com/init4tech/signet-sdk" @@ -77,12 +77,15 @@ alloy-contract = { version = "1.4.0", features = ["pubsub"] } reth = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.1" } reth-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.1" } reth-chainspec = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.1" } +reth-codecs = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.1" } reth-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.1" } +reth-db-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.1" } reth-db-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.1" } reth-eth-wire-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.1" } reth-evm-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.1" } reth-exex = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.1" } reth-exex-test-utils = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.1" } +reth-libmdbx = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.1" } reth-network-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.1" } reth-network-peers = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.1" } reth-node-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.9.1" } diff --git a/crates/storage/Cargo.toml b/crates/storage/Cargo.toml new file mode 100644 index 0000000..8b25ecd --- /dev/null +++ b/crates/storage/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "signet-storage" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +authors.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +alloy.workspace = true +auto_impl = "1.3.0" +bytes = "1.11.0" +reth.workspace = true +reth-db = { workspace = true, features = ["test-utils"] } +reth-db-api.workspace = true +reth-libmdbx.workspace = true +thiserror.workspace = true +trevm.workspace = true + +[dev-dependencies] +serial_test = "3.3.1" +tempfile.workspace = true diff --git a/crates/storage/README.md b/crates/storage/README.md new file mode 100644 index 0000000..a7d6488 --- /dev/null +++ b/crates/storage/README.md @@ -0,0 +1,17 @@ +# Signet Storage + +High-level API for Signet's storage layer + +This library contains the following: + +- Traits for serializing and deserializing Signet data structures as DB keys/ + value. +- Traits for hot and cold storage operations. +- Relevant KV table definitions. + +## Significant Traits + +- `HotKv` - Encapsulates logic for reading and writing to hot storage. +- `ColdKv` - Encapsulates logic for reading and writing to cold storage. +- `KeySer` - Provides methods for serializing a type as a DB key. +- `ValueSer` - Provides methods for serializing a type as a DB value. diff --git a/crates/storage/src/cold/mod.rs b/crates/storage/src/cold/mod.rs new file mode 100644 index 0000000..9b9d9be --- /dev/null +++ b/crates/storage/src/cold/mod.rs @@ -0,0 +1 @@ +//! Placeholder module for cold storage implementation. diff --git a/crates/storage/src/hot/impls/mdbx.rs b/crates/storage/src/hot/impls/mdbx.rs new file mode 100644 index 0000000..ecdfbf1 --- /dev/null +++ b/crates/storage/src/hot/impls/mdbx.rs @@ -0,0 +1,1356 @@ +use crate::{ + hot::model::{ + DualKeyValue, DualKeyedTraverse, DualTableTraverse, HotKv, HotKvError, HotKvRead, + HotKvReadError, HotKvWrite, KvTraverse, KvTraverseMut, RawDualKeyValue, RawKeyValue, + RawValue, + }, + ser::{DeserError, KeySer, MAX_KEY_SIZE, ValSer}, + tables::{DualKeyed, MAX_FIXED_VAL_SIZE}, +}; +use bytes::{BufMut, BytesMut}; +use reth_db::{ + Database, DatabaseEnv, + mdbx::{RW, TransactionKind, WriteFlags, tx::Tx}, +}; +use reth_db_api::DatabaseError; +use reth_libmdbx::{Cursor, RO}; +use std::borrow::Cow; + +/// Error type for reth-libmdbx based hot storage. +#[derive(Debug, thiserror::Error)] +pub enum MdbxError { + /// Inner error + #[error(transparent)] + Mdbx(#[from] reth_libmdbx::Error), + + /// Reth error. + #[error(transparent)] + Reth(#[from] DatabaseError), + + /// Deser. + #[error(transparent)] + Deser(#[from] DeserError), +} + +impl HotKvReadError for MdbxError { + fn into_hot_kv_error(self) -> HotKvError { + match self { + MdbxError::Mdbx(e) => HotKvError::from_err(e), + MdbxError::Deser(e) => HotKvError::Deser(e), + MdbxError::Reth(e) => HotKvError::from_err(e), + } + } +} + +impl From for DatabaseError { + fn from(value: DeserError) -> Self { + DatabaseError::Other(value.to_string()) + } +} + +impl HotKv for DatabaseEnv { + type RoTx = Tx; + type RwTx = Tx; + + fn reader(&self) -> Result { + self.tx().map_err(HotKvError::from_err) + } + + fn writer(&self) -> Result { + self.tx_mut().map_err(HotKvError::from_err) + } +} + +impl HotKvRead for Tx +where + K: TransactionKind, +{ + type Error = MdbxError; + + type Traverse<'a> = Cursor; + + fn raw_traverse<'a>(&'a self, table: &str) -> Result, Self::Error> { + let db = self.inner.open_db(Some(table))?; + let cursor = self.inner.cursor(&db)?; + + Ok(cursor) + } + + fn raw_get<'a>( + &'a self, + table: &str, + key: &[u8], + ) -> Result>, Self::Error> { + let dbi = self.inner.open_db(Some(table)).map(|db| db.dbi())?; + + self.inner.get(dbi, key.as_ref()).map_err(MdbxError::Mdbx) + } + + fn raw_get_dual<'a>( + &'a self, + _table: &str, + _key1: &[u8], + _key2: &[u8], + ) -> Result>, Self::Error> { + unimplemented!("Not implemented: raw_get_dual. Use get_dual instead."); + } + + fn get_dual( + &self, + key1: &T::Key, + key2: &T::Key2, + ) -> Result, Self::Error> { + let db = self.inner.open_db(Some(T::NAME))?; + let mut cursor = self.inner.cursor(&db)?; + + DualTableTraverse::::exact_dual(&mut cursor, key1, key2) + } +} + +impl HotKvWrite for Tx { + type TraverseMut<'a> = Cursor; + + fn raw_traverse_mut<'a>( + &'a mut self, + table: &str, + ) -> Result, Self::Error> { + let db = self.inner.open_db(Some(table))?; + let cursor = self.inner.cursor(&db)?; + + Ok(cursor) + } + + fn queue_raw_put(&mut self, table: &str, key: &[u8], value: &[u8]) -> Result<(), Self::Error> { + let dbi = self.inner.open_db(Some(table)).map(|db| db.dbi())?; + + self.inner.put(dbi, key, value, WriteFlags::UPSERT).map(|_| ()).map_err(MdbxError::Mdbx) + } + + fn queue_raw_put_dual( + &mut self, + _table: &str, + _key1: &[u8], + _key2: &[u8], + _value: &[u8], + ) -> Result<(), Self::Error> { + unimplemented!("Not implemented: queue_raw_put_dual. Use queue_put_dual instead."); + } + + // Specialized put for dual-keyed tables. + fn queue_put_dual( + &mut self, + key1: &T::Key, + key2: &T::Key2, + value: &T::Value, + ) -> Result<(), Self::Error> { + let k2_size = ::SIZE; + let mut scratch = [0u8; MAX_KEY_SIZE]; + + // This will be the total length of key2 + value, reserved in mdbx + let encoded_len = k2_size + value.encoded_size(); + + // Prepend the value with k2. + let mut buf = BytesMut::with_capacity(encoded_len); + let encoded_k2 = key2.encode_key(&mut scratch); + buf.put_slice(encoded_k2); + value.encode_value_to(&mut buf); + + let encoded_k1 = key1.encode_key(&mut scratch); + // NB: DUPSORT and RESERVE are incompatible :( + let db = self.inner.open_db(Some(T::NAME))?; + self.inner.put(db.dbi(), encoded_k1, &buf, Default::default())?; + + Ok(()) + } + + fn queue_raw_delete(&mut self, table: &str, key: &[u8]) -> Result<(), Self::Error> { + let dbi = self.inner.open_db(Some(table)).map(|db| db.dbi())?; + + self.inner.del(dbi, key, None).map(|_| ()).map_err(MdbxError::Mdbx) + } + + fn queue_raw_clear(&mut self, table: &str) -> Result<(), Self::Error> { + // Future: port more of reth's db env with dbi caching to avoid + // repeated open_db calls + let dbi = self.inner.open_db(Some(table)).map(|db| db.dbi())?; + self.inner.clear_db(dbi).map(|_| ()).map_err(MdbxError::Mdbx) + } + + fn queue_raw_create( + &mut self, + table: &str, + dual_key: bool, + fixed_val: bool, + ) -> Result<(), Self::Error> { + let mut flags = Default::default(); + + if dual_key { + flags |= reth_libmdbx::DatabaseFlags::DUP_SORT; + if fixed_val { + flags |= reth_libmdbx::DatabaseFlags::DUP_FIXED; + } + } + + self.inner.create_db(Some(table), flags).map(|_| ()).map_err(MdbxError::Mdbx) + } + + fn raw_commit(self) -> Result<(), Self::Error> { + // when committing, mdbx returns true on failure + let res = self.inner.commit()?; + + if res.0 { Err(reth_libmdbx::Error::Other(1).into()) } else { Ok(()) } + } +} + +impl KvTraverse for Cursor +where + K: TransactionKind, +{ + fn first<'a>(&'a mut self) -> Result>, MdbxError> { + Cursor::first(self).map_err(MdbxError::Mdbx) + } + + fn last<'a>(&'a mut self) -> Result>, MdbxError> { + Cursor::last(self).map_err(MdbxError::Mdbx) + } + + fn exact<'a>(&'a mut self, key: &[u8]) -> Result>, MdbxError> { + Cursor::set(self, key).map_err(MdbxError::Mdbx) + } + + fn lower_bound<'a>(&'a mut self, key: &[u8]) -> Result>, MdbxError> { + Cursor::set_range(self, key).map_err(MdbxError::Mdbx) + } + + fn read_next<'a>(&'a mut self) -> Result>, MdbxError> { + Cursor::next(self).map_err(MdbxError::Mdbx) + } + + fn read_prev<'a>(&'a mut self) -> Result>, MdbxError> { + Cursor::prev(self).map_err(MdbxError::Mdbx) + } +} + +impl KvTraverseMut for Cursor { + fn delete_current(&mut self) -> Result<(), MdbxError> { + Cursor::del(self, Default::default()).map_err(MdbxError::Mdbx) + } +} + +impl DualKeyedTraverse for Cursor +where + K: TransactionKind, +{ + fn exact_dual<'a>( + &'a mut self, + _key1: &[u8], + _key2: &[u8], + ) -> Result>, MdbxError> { + unimplemented!("Use DualTableTraverse for exact_dual"); + } + + fn next_dual_above<'a>( + &'a mut self, + _key1: &[u8], + _key2: &[u8], + ) -> Result>, MdbxError> { + unimplemented!("Use DualTableTraverse for next_dual_above"); + } + + fn next_k1<'a>(&'a mut self) -> Result>, MdbxError> { + unimplemented!("Use DualTableTraverse for next_k1"); + } + + fn next_k2<'a>(&'a mut self) -> Result>, MdbxError> { + unimplemented!("Use DualTableTraverse for next_k2"); + } +} + +impl DualTableTraverse for Cursor +where + T: DualKeyed, + K: TransactionKind, +{ + fn next_dual_above( + &mut self, + key1: &T::Key, + key2: &T::Key2, + ) -> Result>, MdbxError> { + Ok(get_both_range_helper::(self, key1, key2)? + .map(T::decode_prepended_value) + .transpose()? + .map(|(k2, v)| (key1.clone(), k2, v))) + } + + fn next_k1(&mut self) -> Result>, MdbxError> { + let Some((k, v)) = self.next_nodup::, Cow<'_, [u8]>>()? else { + return Ok(None); + }; + + let k1 = T::Key::decode_key(&k)?; + let (k2, v) = T::decode_prepended_value(v)?; + + Ok(Some((k1, k2, v))) + } + + fn next_k2(&mut self) -> Result>, MdbxError> { + let Some((k, v)) = self.next_dup::, Cow<'_, [u8]>>()? else { + return Ok(None); + }; + + let k = T::Key::decode_key(&k)?; + let (k2, v) = T::decode_prepended_value(v)?; + + Ok(Some((k, k2, v))) + } +} + +/// Helper to handle dup fixed value tables +fn dup_fixed_helper( + cursor: &mut Cursor, + key1: &T::Key, + key2: &T::Key2, + f: impl FnOnce(&mut Cursor, &[u8], &[u8]) -> Result, +) -> Result +where + T: DualKeyed, + K: TransactionKind, +{ + let mut key1_buf = [0u8; MAX_KEY_SIZE]; + let mut key2_buf = [0u8; MAX_KEY_SIZE]; + let key1_bytes = key1.encode_key(&mut key1_buf); + let key2_bytes = key2.encode_key(&mut key2_buf); + + // K2 slice must be EXACTLY the size of the fixed value size, if the + // table has one. This is a bit ugly, and results in an extra + // allocation for fixed-size values. This could be avoided using + // max value size. + if T::IS_FIXED_VAL { + let mut buf = [0u8; MAX_KEY_SIZE + MAX_FIXED_VAL_SIZE]; + buf[..::SIZE].copy_from_slice(key2_bytes); + + let kvs: usize = ::SIZE + T::FIXED_VAL_SIZE.unwrap(); + + f(cursor, key1_bytes, &buf[..kvs]) + } else { + f(cursor, key1_bytes, key2_bytes) + } +} + +// Helper to call get_both_range with dup fixed handling +fn get_both_range_helper<'a, T, K>( + cursor: &'a mut Cursor, + key1: &T::Key, + key2: &T::Key2, +) -> Result>, MdbxError> +where + T: DualKeyed, + K: TransactionKind, +{ + dup_fixed_helper::>>( + cursor, + key1, + key2, + |cursor, key1_bytes, key2_bytes| { + cursor.get_both_range(key1_bytes, key2_bytes).map_err(MdbxError::Mdbx) + }, + ) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + hot::model::{HotDbWrite, HotKv, HotKvRead, HotKvWrite, TableTraverse, TableTraverseMut}, + tables::{SingleKey, Table, hot}, + }; + use alloy::primitives::{Address, B256, BlockNumber, Bytes, U256}; + use reth::primitives::{Account, Bytecode, Header, SealedHeader}; + use reth_db::DatabaseEnv; + use serial_test::serial; + + // Test table definitions for traversal tests + #[derive(Debug)] + struct TestTable; + + impl Table for TestTable { + const NAME: &'static str = "mdbx_test_table"; + type Key = u64; + type Value = Bytes; + } + + impl SingleKey for TestTable {} + + /// Create a temporary MDBX database for testing that will be automatically cleaned up + fn run_test(f: F) { + let db = reth_db::test_utils::create_test_rw_db(); + + // Create tables from the `crate::tables::hot` module + let mut writer = db.db().writer().unwrap(); + + writer.queue_create::().unwrap(); + writer.queue_create::().unwrap(); + writer.queue_create::().unwrap(); + writer.queue_create::().unwrap(); + writer.queue_create::().unwrap(); + writer.queue_create::().unwrap(); + writer.queue_create::().unwrap(); + writer.queue_create::().unwrap(); + writer.queue_create::().unwrap(); + writer.queue_create::().unwrap(); + + writer.commit().expect("Failed to commit table creation"); + + f(db.db()); + } + + /// Create test data + fn create_test_account() -> (Address, Account) { + let address = Address::from_slice(&[0x1; 20]); + let account = Account { + nonce: 42, + balance: U256::from(1000u64), + bytecode_hash: Some(B256::from_slice(&[0x2; 32])), + }; + (address, account) + } + + fn create_test_bytecode() -> (B256, Bytecode) { + let hash = B256::from_slice(&[0x2; 32]); + let code = reth::primitives::Bytecode::new_raw(vec![0x60, 0x80, 0x60, 0x40].into()); + (hash, code) + } + + fn create_test_header() -> (BlockNumber, Header) { + let block_number = 12345; + let header = Header { + number: block_number, + gas_limit: 8000000, + gas_used: 100000, + timestamp: 1640995200, + parent_hash: B256::from_slice(&[0x3; 32]), + state_root: B256::from_slice(&[0x4; 32]), + ..Default::default() + }; + (block_number, header) + } + + #[test] + #[serial] + fn test_hotkv_basic_operations() { + run_test(test_hotkv_basic_operations_inner); + } + + fn test_hotkv_basic_operations_inner(db: &DatabaseEnv) { + let (address, account) = create_test_account(); + let (hash, bytecode) = create_test_bytecode(); + + // Test HotKv::writer() and basic write operations + { + let mut writer: Tx = db.writer().unwrap(); + + // Create tables first + writer.queue_create::().unwrap(); + + // Write account data + writer.queue_put::(&address, &account).unwrap(); + writer.queue_put::(&hash, &bytecode).unwrap(); + + // Commit the transaction + writer.raw_commit().unwrap(); + } + + // Test HotKv::reader() and basic read operations + { + let reader: Tx = db.reader().unwrap(); + + // Read account data + let read_account: Option = + reader.get::(&address).unwrap(); + assert_eq!(read_account, Some(account)); + + // Read bytecode + let read_bytecode: Option = reader.get::(&hash).unwrap(); + assert_eq!(read_bytecode, Some(bytecode)); + + // Test non-existent data + let nonexistent_addr = Address::from_slice(&[0xff; 20]); + let nonexistent_account: Option = + reader.get::(&nonexistent_addr).unwrap(); + assert_eq!(nonexistent_account, None); + } + } + + #[test] + #[serial] + fn test_raw_operations() { + run_test(test_raw_operations_inner) + } + + fn test_raw_operations_inner(db: &DatabaseEnv) { + let table_name = "test_table"; + let key = b"test_key"; + let value = b"test_value"; + + // Test raw write operations + { + let mut writer: Tx = db.writer().unwrap(); + + // Create table + writer.queue_raw_create(table_name, false, false).unwrap(); + + // Put raw data + writer.queue_raw_put(table_name, key, value).unwrap(); + + writer.raw_commit().unwrap(); + } + + // Test raw read operations + { + let reader: Tx = db.reader().unwrap(); + + let read_value = reader.raw_get(table_name, key).unwrap(); + assert_eq!(read_value.as_deref(), Some(value.as_slice())); + + // Test non-existent key + let nonexistent = reader.raw_get(table_name, b"nonexistent").unwrap(); + assert_eq!(nonexistent, None); + } + + // Test raw delete + { + let mut writer: Tx = db.writer().unwrap(); + + writer.queue_raw_delete(table_name, key).unwrap(); + writer.raw_commit().unwrap(); + } + + // Verify deletion + { + let reader: Tx = db.reader().unwrap(); + let deleted_value = reader.raw_get(table_name, key).unwrap(); + assert_eq!(deleted_value, None); + } + } + + #[test] + #[serial] + fn test_dual_keyed_operations() { + run_test(test_dual_keyed_operations_inner) + } + + fn test_dual_keyed_operations_inner(db: &DatabaseEnv) { + let address = Address::from_slice(&[0x1; 20]); + let storage_key = B256::from_slice(&[0x5; 32]); + let storage_value = U256::from(999u64); + + // Test dual-keyed table operations + { + let mut writer: Tx = db.writer().unwrap(); + + // Put storage data using dual keys + writer + .queue_put_dual::(&address, &storage_key, &storage_value) + .unwrap(); + + writer.raw_commit().unwrap(); + } + + // Test reading dual-keyed data + { + let reader: Tx = db.reader().unwrap(); + + // Read storage using dual key lookup + let read_value = + reader.get_dual::(&address, &storage_key).unwrap().unwrap(); + + assert_eq!(read_value, storage_value); + } + } + + #[test] + #[serial] + fn test_table_management() { + run_test(test_table_management_inner) + } + + fn test_table_management_inner(db: &DatabaseEnv) { + // Add some data + let (block_number, header) = create_test_header(); + { + let mut writer: Tx = db.writer().unwrap(); + writer.queue_put::(&block_number, &header).unwrap(); + writer.raw_commit().unwrap(); + } + + // Verify data exists + { + let reader: Tx = db.reader().unwrap(); + let read_header: Option
= reader.get::(&block_number).unwrap(); + assert_eq!(read_header, Some(header.clone())); + } + + // Clear the table + { + let mut writer: Tx = db.writer().unwrap(); + writer.queue_clear::().unwrap(); + writer.raw_commit().unwrap(); + } + + // Verify table is empty + { + let reader: Tx = db.reader().unwrap(); + let read_header: Option
= reader.get::(&block_number).unwrap(); + assert_eq!(read_header, None); + } + } + + #[test] + fn test_batch_operations() { + run_test(test_batch_operations_inner) + } + + fn test_batch_operations_inner(db: &DatabaseEnv) { + // Create test data + let accounts: Vec<(Address, Account)> = (0..10) + .map(|i| { + let mut addr_bytes = [0u8; 20]; + addr_bytes[19] = i; + let address = Address::from_slice(&addr_bytes); + let account = Account { + nonce: i.into(), + balance: U256::from((i as u64) * 100), + bytecode_hash: None, + }; + (address, account) + }) + .collect(); + + // Test batch writes + { + let mut writer: Tx = db.writer().unwrap(); + + // Write multiple accounts + for (address, account) in &accounts { + writer.queue_put::(address, account).unwrap(); + } + + writer.raw_commit().unwrap(); + } + + // Test batch reads + { + let reader: Tx = db.reader().unwrap(); + + for (address, expected_account) in &accounts { + let read_account: Option = + reader.get::(address).unwrap(); + assert_eq!(read_account.as_ref(), Some(expected_account)); + } + } + + // Test batch get_many + { + let reader: Tx = db.reader().unwrap(); + let addresses: Vec
= accounts.iter().map(|(addr, _)| *addr).collect(); + let read_accounts: Vec<(_, Option)> = + reader.get_many::(addresses.iter()).unwrap(); + + for (i, (_, expected_account)) in accounts.iter().enumerate() { + assert_eq!(read_accounts[i].1.as_ref(), Some(expected_account)); + } + } + } + + #[test] + fn test_transaction_isolation() { + run_test(test_transaction_isolation_inner) + } + + fn test_transaction_isolation_inner(db: &DatabaseEnv) { + let (address, account) = create_test_account(); + + // Setup initial data + { + let mut writer: Tx = db.writer().unwrap(); + writer.queue_put::(&address, &account).unwrap(); + writer.raw_commit().unwrap(); + } + + // Start a reader transaction + let reader: Tx = db.reader().unwrap(); + + // Modify data in a writer transaction + { + let mut writer: Tx = db.writer().unwrap(); + let modified_account = + Account { nonce: 999, balance: U256::from(9999u64), bytecode_hash: None }; + writer.queue_put::(&address, &modified_account).unwrap(); + writer.raw_commit().unwrap(); + } + + // Reader should still see original data (snapshot isolation) + { + let read_account: Option = + reader.get::(&address).unwrap(); + assert_eq!(read_account, Some(account)); + } + + // New reader should see modified data + { + let new_reader: Tx = db.reader().unwrap(); + let read_account: Option = + new_reader.get::(&address).unwrap(); + assert_eq!(read_account.unwrap().nonce, 999); + } + } + + #[test] + fn test_multiple_readers() { + run_test(test_multiple_readers_inner) + } + + fn test_multiple_readers_inner(db: &DatabaseEnv) { + let (address, account) = create_test_account(); + + // Setup data + { + let mut writer: Tx = db.writer().unwrap(); + writer.queue_put::(&address, &account).unwrap(); + writer.raw_commit().unwrap(); + } + + // Create multiple readers + let reader1: Tx = db.reader().unwrap(); + let reader2: Tx = db.reader().unwrap(); + let reader3: Tx = db.reader().unwrap(); + + // All readers should see the same data + let account1: Option = reader1.get::(&address).unwrap(); + let account2: Option = reader2.get::(&address).unwrap(); + let account3: Option = reader3.get::(&address).unwrap(); + + assert_eq!(account1, Some(account)); + assert_eq!(account2, Some(account)); + assert_eq!(account3, Some(account)); + } + + #[test] + fn test_error_handling() { + run_test(test_error_handling_inner) + } + + fn test_error_handling_inner(db: &DatabaseEnv) { + // Test reading from non-existent table + { + let reader: Tx = db.reader().unwrap(); + let result = reader.raw_get("nonexistent_table", b"key"); + + // Should handle gracefully (may return None or error depending on MDBX behavior) + match result { + Ok(None) => {} // This is fine + Err(_) => {} // This is also acceptable for non-existent table + Ok(Some(_)) => panic!("Should not return data for non-existent table"), + } + } + + // Test writing to a table without creating it first + { + let mut writer: Tx = db.writer().unwrap(); + let (address, account) = create_test_account(); + + // This should handle the case where table doesn't exist + let result = writer.queue_put::(&address, &account); + match result { + Ok(_) => { + // If it succeeds, commit should work + writer.raw_commit().unwrap(); + } + Err(_) => { + // If it fails, that's expected behavior + } + } + } + } + + #[test] + fn test_serialization_roundtrip() { + run_test(test_serialization_roundtrip_inner) + } + + fn test_serialization_roundtrip_inner(db: &DatabaseEnv) { + // Test various data types + let (block_number, header) = create_test_header(); + let header = SealedHeader::new_unhashed(header); + + { + let mut writer: Tx = db.writer().unwrap(); + + // Write different types + writer.put_header(&header).unwrap(); + + writer.raw_commit().unwrap(); + } + + { + let reader: Tx = db.reader().unwrap(); + + // Read and verify + let read_header: Option
= reader.get::(&block_number).unwrap(); + assert_eq!(read_header.as_ref(), Some(header.header())); + + let read_hash: Option = reader.get::(&header.hash()).unwrap(); + assert_eq!(read_hash, Some(header.number)); + } + } + + #[test] + fn test_large_data() { + run_test(test_large_data_inner) + } + + fn test_large_data_inner(db: &DatabaseEnv) { + // Create a large bytecode + let hash = B256::from_slice(&[0x8; 32]); + let large_code_vec: Vec = (0..10000).map(|i| (i % 256) as u8).collect(); + let large_bytecode = Bytecode::new_raw(large_code_vec.clone().into()); + + { + let mut writer: Tx = db.writer().unwrap(); + writer.queue_create::().unwrap(); + writer.queue_put::(&hash, &large_bytecode).unwrap(); + writer.raw_commit().unwrap(); + } + + { + let reader: Tx = db.reader().unwrap(); + let read_bytecode: Option = reader.get::(&hash).unwrap(); + assert_eq!(read_bytecode, Some(large_bytecode)); + } + } + + // ======================================================================== + // Cursor Traversal Tests + // ======================================================================== + + #[test] + fn test_table_traverse_basic_navigation() { + run_test(test_table_traverse_basic_navigation_inner) + } + + fn test_table_traverse_basic_navigation_inner(db: &DatabaseEnv) { + // Setup test data with multiple entries + let test_data: Vec<(u64, Bytes)> = vec![ + (1, Bytes::from_static(b"value_001")), + (2, Bytes::from_static(b"value_002")), + (3, Bytes::from_static(b"value_003")), + (10, Bytes::from_static(b"value_010")), + (20, Bytes::from_static(b"value_020")), + ]; + + // Insert test data + { + let mut writer: Tx = db.writer().unwrap(); + for (key, value) in &test_data { + writer.queue_put::(key, value).unwrap(); + } + writer.raw_commit().unwrap(); + } + + // Test cursor traversal + { + let tx: Tx = db.reader().unwrap(); + let db_handle = tx.inner.open_db(Some(TestTable::NAME)).unwrap(); + let mut cursor = tx.inner.cursor(&db_handle).unwrap(); + + // Test first() + let first_result = TableTraverse::::first(&mut cursor).unwrap(); + assert!(first_result.is_some()); + let (key, value) = first_result.unwrap(); + assert_eq!(key, test_data[0].0); + assert_eq!(value, test_data[0].1); + + // Test last() + let last_result = TableTraverse::::last(&mut cursor).unwrap(); + assert!(last_result.is_some()); + let (key, value) = last_result.unwrap(); + assert_eq!(key, test_data.last().unwrap().0); + assert_eq!(value, test_data.last().unwrap().1); + + // Test exact lookup + let exact_result = TableTraverse::::exact(&mut cursor, &2u64).unwrap(); + assert!(exact_result.is_some()); + assert_eq!(exact_result.unwrap(), test_data[1].1); + + // Test exact lookup for non-existent key + let missing_result = + TableTraverse::::exact(&mut cursor, &999u64).unwrap(); + assert!(missing_result.is_none()); + + // Test next_above (range lookup) + let range_result = + TableTraverse::::lower_bound(&mut cursor, &5u64).unwrap(); + assert!(range_result.is_some()); + let (key, value) = range_result.unwrap(); + assert_eq!(key, test_data[3].0); // key 10 + assert_eq!(value, test_data[3].1); + } + } + + #[test] + fn test_table_traverse_sequential_navigation() { + run_test(test_table_traverse_sequential_navigation_inner) + } + + fn test_table_traverse_sequential_navigation_inner(db: &DatabaseEnv) { + // Setup sequential test data + let test_data: Vec<(u64, Bytes)> = (1..=10) + .map(|i| { + let s = format!("value_{:03}", i); + let s = s.as_bytes(); + let value = Bytes::copy_from_slice(s); + (i, value) + }) + .collect(); + + // Insert test data + { + let mut writer: Tx = db.writer().unwrap(); + for (key, value) in &test_data { + writer.queue_put::(key, value).unwrap(); + } + writer.raw_commit().unwrap(); + } + + // Test sequential navigation + { + let tx: Tx = db.reader().unwrap(); + let db_handle = tx.inner.open_db(Some(TestTable::NAME)).unwrap(); + let mut cursor = tx.inner.cursor(&db_handle).unwrap(); + + // Start from first and traverse forward + let mut current_idx = 0; + let first_result = TableTraverse::::first(&mut cursor).unwrap(); + assert!(first_result.is_some()); + + let (key, value) = first_result.unwrap(); + assert_eq!(key, test_data[current_idx].0); + assert_eq!(value, test_data[current_idx].1); + + // Navigate forward through all entries + while current_idx < test_data.len() - 1 { + let next_result = TableTraverse::::read_next(&mut cursor).unwrap(); + assert!(next_result.is_some()); + + current_idx += 1; + let (key, value) = next_result.unwrap(); + assert_eq!(key, test_data[current_idx].0); + assert_eq!(value, test_data[current_idx].1); + } + + // Next should return None at the end + let beyond_end = TableTraverse::::read_next(&mut cursor).unwrap(); + assert!(beyond_end.is_none()); + + // Navigate backward + while current_idx > 0 { + let prev_result = TableTraverse::::read_prev(&mut cursor).unwrap(); + assert!(prev_result.is_some()); + + current_idx -= 1; + let (key, value) = prev_result.unwrap(); + assert_eq!(key, test_data[current_idx].0); + assert_eq!(value, test_data[current_idx].1); + } + + // Previous should return None at the beginning + let before_start = TableTraverse::::read_prev(&mut cursor).unwrap(); + assert!(before_start.is_none()); + } + } + + #[test] + fn test_table_traverse_mut_delete() { + run_test(test_table_traverse_mut_delete_inner) + } + + fn test_table_traverse_mut_delete_inner(db: &DatabaseEnv) { + let test_data: Vec<(u64, Bytes)> = vec![ + (1, Bytes::from_static(b"delete_value_1")), + (2, Bytes::from_static(b"delete_value_2")), + (3, Bytes::from_static(b"delete_value_3")), + ]; + + // Insert test data + { + let mut writer: Tx = db.writer().unwrap(); + for (key, value) in &test_data { + writer.queue_put::(key, value).unwrap(); + } + writer.raw_commit().unwrap(); + } + // Test cursor deletion + { + let tx: Tx = db.writer().unwrap(); + let db_handle = tx.inner.open_db(Some(TestTable::NAME)).unwrap(); + let mut cursor = tx.inner.cursor(&db_handle).unwrap(); + + // Navigate to middle entry + let first = TableTraverse::::first(&mut cursor).unwrap().unwrap(); + assert_eq!(first.0, test_data[0].0); + + let next = TableTraverse::::read_next(&mut cursor).unwrap().unwrap(); + assert_eq!(next.0, test_data[1].0); + + // Delete current entry (key 2) + TableTraverseMut::::delete_current(&mut cursor).unwrap(); + + tx.raw_commit().unwrap(); + } + + // Verify deletion + { + let tx: Tx = db.reader().unwrap(); + let db_handle = tx.inner.open_db(Some(TestTable::NAME)).unwrap(); + let mut cursor = tx.inner.cursor(&db_handle).unwrap(); + + // Should only have first and third entries + let first = TableTraverse::::first(&mut cursor).unwrap().unwrap(); + assert_eq!(first.0, test_data[0].0); + + let second = TableTraverse::::read_next(&mut cursor).unwrap().unwrap(); + assert_eq!(second.0, test_data[2].0); + + // Should be no more entries + let none = TableTraverse::::read_next(&mut cursor).unwrap(); + assert!(none.is_none()); + + // Verify deleted key is gone + let missing = + TableTraverse::::exact(&mut cursor, &test_data[1].0).unwrap(); + assert!(missing.is_none()); + } + } + + #[test] + fn test_table_traverse_accounts() { + run_test(test_table_traverse_accounts_inner) + } + + fn test_table_traverse_accounts_inner(db: &DatabaseEnv) { + // Setup test accounts + let test_accounts: Vec<(Address, Account)> = (0..5) + .map(|i| { + let mut addr_bytes = [0u8; 20]; + addr_bytes[19] = i; + let address = Address::from_slice(&addr_bytes); + let account = Account { + nonce: (i as u64) * 10, + balance: U256::from((i as u64) * 1000), + bytecode_hash: if i % 2 == 0 { Some(B256::from_slice(&[i; 32])) } else { None }, + }; + (address, account) + }) + .collect(); + + // Insert test data + { + let mut writer: Tx = db.writer().unwrap(); + + for (address, account) in &test_accounts { + writer.queue_put::(address, account).unwrap(); + } + + writer.raw_commit().unwrap(); + } + + // Test typed table traversal + { + let tx: Tx = db.reader().unwrap(); + let db_handle = tx.inner.open_db(Some(hot::PlainAccountState::NAME)).unwrap(); + let mut cursor = tx.inner.cursor(&db_handle).unwrap(); + + // Test first with type-safe operations + let first_raw = TableTraverse::::first(&mut cursor).unwrap(); + assert!(first_raw.is_some()); + let (first_key, first_account) = first_raw.unwrap(); + assert_eq!(first_key, test_accounts[0].0); + assert_eq!(first_account, test_accounts[0].1); + + // Test last + let last_raw = TableTraverse::::last(&mut cursor).unwrap(); + assert!(last_raw.is_some()); + let (last_key, last_account) = last_raw.unwrap(); + assert_eq!(last_key, test_accounts.last().unwrap().0); + assert_eq!(last_account, test_accounts.last().unwrap().1); + + // Test exact lookup + let target_address = &test_accounts[2].0; + let exact_account = + TableTraverse::::exact(&mut cursor, target_address) + .unwrap(); + assert!(exact_account.is_some()); + assert_eq!(exact_account.unwrap(), test_accounts[2].1); + + // Test range lookup + let mut partial_addr = [0u8; 20]; + partial_addr[19] = 3; // Between entries 2 and 3 + let range_addr = Address::from_slice(&partial_addr); + + let range_result = + TableTraverse::::lower_bound(&mut cursor, &range_addr) + .unwrap(); + assert!(range_result.is_some()); + let (found_addr, found_account) = range_result.unwrap(); + assert_eq!(found_addr, test_accounts[3].0); + assert_eq!(found_account, test_accounts[3].1); + } + } + + #[test] + fn test_dual_table_traverse() { + run_test(test_dual_table_traverse_inner) + } + + fn test_dual_table_traverse_inner(db: &DatabaseEnv) { + let one_addr = Address::repeat_byte(0x01); + let two_addr = Address::repeat_byte(0x02); + + let one_slot = B256::with_last_byte(0x01); + let two_slot = B256::with_last_byte(0x06); + let three_slot = B256::with_last_byte(0x09); + + let one_value = U256::from(0x100); + let two_value = U256::from(0x200); + let three_value = U256::from(0x300); + let four_value = U256::from(0x400); + let five_value = U256::from(0x500); + + // Setup test storage data + let test_storage: Vec<(Address, B256, U256)> = vec![ + (one_addr, one_slot, one_value), + (one_addr, two_slot, two_value), + (one_addr, three_slot, three_value), + (two_addr, one_slot, four_value), + (two_addr, two_slot, five_value), + ]; + + // Insert test data + { + let mut writer: Tx = db.writer().unwrap(); + + for (address, storage_key, value) in &test_storage { + writer + .queue_put_dual::(address, storage_key, value) + .unwrap(); + } + + writer.raw_commit().unwrap(); + } + + // Test dual-keyed traversal + { + let tx: Tx = db.reader().unwrap(); + let db_handle = tx.inner.open_db(Some(hot::PlainStorageState::NAME)).unwrap(); + let mut cursor = tx.inner.cursor(&db_handle).unwrap(); + + // Test exact dual lookup + let address = &test_storage[1].0; + let storage_key = &test_storage[1].1; + let expected_value = &test_storage[1].2; + + let exact_result = DualTableTraverse::::exact_dual( + &mut cursor, + address, + storage_key, + ) + .unwrap() + .unwrap(); + assert_eq!(exact_result, *expected_value); + + // Test range lookup for dual keys + let search_key = B256::with_last_byte(0x02); + let range_result = DualTableTraverse::::next_dual_above( + &mut cursor, + &test_storage[0].0, // Address 0x01 + &search_key, + ) + .unwrap() + .unwrap(); + + let (found_addr, found_key, found_value) = range_result; + assert_eq!(found_addr, test_storage[1].0); // Same address + assert_eq!(found_key, test_storage[1].1); // Next storage key (0x02) + assert_eq!(found_value, test_storage[1].2); // Corresponding value + + // Test next_k1 (move to next primary key) + // First position cursor at first entry of first address + DualTableTraverse::::exact_dual( + &mut cursor, + &test_storage[0].0, + &test_storage[0].1, + ) + .unwrap(); + + // Move to next primary key (different address) + let next_k1_result = + DualTableTraverse::::next_k1(&mut cursor).unwrap(); + assert!(next_k1_result.is_some()); + let (next_addr, next_storage_key, next_value) = next_k1_result.unwrap(); + assert_eq!(next_addr, test_storage[3].0); // Address 0x02 + assert_eq!(next_storage_key, test_storage[3].1); // First storage key for new address + assert_eq!(next_value, test_storage[3].2); + } + } + + #[test] + fn test_dual_table_traverse_empty_results() { + run_test(test_dual_table_traverse_empty_results_inner) + } + + fn test_dual_table_traverse_empty_results_inner(db: &DatabaseEnv) { + // Setup minimal test data + let address = Address::from_slice(&[0x01; 20]); + let storage_key = B256::from_slice(&[0x01; 32]); + let value = U256::from(100); + + { + let mut writer: Tx = db.writer().unwrap(); + writer + .queue_put_dual::(&address, &storage_key, &value) + .unwrap(); + writer.raw_commit().unwrap(); + } + + { + let tx: Tx = db.reader().unwrap(); + let db_handle = tx.inner.open_db(Some(hot::PlainStorageState::NAME)).unwrap(); + let mut cursor = tx.inner.cursor(&db_handle).unwrap(); + + // Test exact lookup for non-existent dual key + let missing_addr = Address::from_slice(&[0xFF; 20]); + let missing_key = B256::from_slice(&[0xFF; 32]); + + let exact_missing = DualTableTraverse::::exact_dual( + &mut cursor, + &missing_addr, + &missing_key, + ) + .unwrap(); + assert!(exact_missing.is_none()); + + // Test range lookup beyond all data + let beyond_key = B256::from_slice(&[0xFF; 32]); + let range_missing = DualTableTraverse::::next_dual_above( + &mut cursor, + &address, + &beyond_key, + ) + .unwrap(); + assert!(range_missing.is_none()); + + // Position at the only entry, then try next_k1 + DualTableTraverse::::exact_dual( + &mut cursor, + &address, + &storage_key, + ) + .unwrap(); + + let next_k1_missing = + DualTableTraverse::::next_k1(&mut cursor).unwrap(); + assert!(next_k1_missing.is_none()); + } + } + + #[test] + fn test_table_traverse_empty_table() { + run_test(test_table_traverse_empty_table_inner) + } + + fn test_table_traverse_empty_table_inner(db: &DatabaseEnv) { + // TestTable is already created but empty + { + let tx: Tx = db.reader().unwrap(); + let db_handle = tx.inner.open_db(Some(TestTable::NAME)).unwrap(); + let mut cursor = tx.inner.cursor(&db_handle).unwrap(); + + // All operations should return None on empty table + assert!(TableTraverse::::first(&mut cursor).unwrap().is_none()); + assert!(TableTraverse::::last(&mut cursor).unwrap().is_none()); + assert!(TableTraverse::::exact(&mut cursor, &42u64).unwrap().is_none()); + assert!( + TableTraverse::::lower_bound(&mut cursor, &42u64).unwrap().is_none() + ); + assert!(TableTraverse::::read_next(&mut cursor).unwrap().is_none()); + assert!(TableTraverse::::read_prev(&mut cursor).unwrap().is_none()); + } + } + + #[test] + fn test_table_traverse_state_management() { + run_test(test_table_traverse_state_management_inner) + } + + fn test_table_traverse_state_management_inner(db: &DatabaseEnv) { + let test_data: Vec<(u64, Bytes)> = vec![ + (1, Bytes::from_static(b"state_value_1")), + (2, Bytes::from_static(b"state_value_2")), + (3, Bytes::from_static(b"state_value_3")), + ]; + + { + let mut writer: Tx = db.writer().unwrap(); + for (key, value) in &test_data { + writer.queue_put::(key, value).unwrap(); + } + writer.raw_commit().unwrap(); + } + + { + let tx: Tx = db.reader().unwrap(); + let db_handle = tx.inner.open_db(Some(TestTable::NAME)).unwrap(); + let mut cursor = tx.inner.cursor(&db_handle).unwrap(); + + // Test that cursor operations maintain state correctly + + // Start at first + let first = TableTraverse::::first(&mut cursor).unwrap().unwrap(); + assert_eq!(first.0, test_data[0].0); + + // Move to second via next + let second = TableTraverse::::read_next(&mut cursor).unwrap().unwrap(); + assert_eq!(second.0, test_data[1].0); + + // Jump to last + let last = TableTraverse::::last(&mut cursor).unwrap().unwrap(); + assert_eq!(last.0, test_data[2].0); + + // Move back via prev + let back_to_second = + TableTraverse::::read_prev(&mut cursor).unwrap().unwrap(); + assert_eq!(back_to_second.0, test_data[1].0); + + // Use exact to jump to specific position + let exact_first = + TableTraverse::::exact(&mut cursor, &test_data[0].0).unwrap(); + assert!(exact_first.is_some()); + assert_eq!(exact_first.unwrap(), test_data[0].1); + + // Verify cursor is now positioned at first entry + let next_from_first = + TableTraverse::::read_next(&mut cursor).unwrap().unwrap(); + assert_eq!(next_from_first.0, test_data[1].0); + + // Use range lookup - look for key >= 1, should find key 1 + let range_lookup = + TableTraverse::::lower_bound(&mut cursor, &1u64).unwrap().unwrap(); + assert_eq!(range_lookup.0, test_data[0].0); // Should find key 1 + + // Verify we can continue navigation from range position + let next_after_range = + TableTraverse::::read_next(&mut cursor).unwrap().unwrap(); + assert_eq!(next_after_range.0, test_data[1].0); + } + } +} diff --git a/crates/storage/src/hot/impls/mem.rs b/crates/storage/src/hot/impls/mem.rs new file mode 100644 index 0000000..ed43cd2 --- /dev/null +++ b/crates/storage/src/hot/impls/mem.rs @@ -0,0 +1,1869 @@ +use crate::{ + hot::model::{ + DualKeyValue, DualKeyedTraverse, DualTableTraverse, HotKv, HotKvError, HotKvRead, + HotKvReadError, HotKvWrite, KvTraverse, KvTraverseMut, RawDualKeyValue, RawKeyValue, + RawValue, + }, + ser::{DeserError, KeySer, MAX_KEY_SIZE}, + tables::DualKeyed, +}; +use bytes::Bytes; +use std::{ + borrow::Cow, + collections::BTreeMap, + sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard}, +}; + +// Type aliases for store structure +type MemStoreKey = [u8; MAX_KEY_SIZE * 2]; +type StoreTable = BTreeMap; +type Store = BTreeMap; + +// Type aliases for queued operations +type TableOp = BTreeMap; +type OpStore = BTreeMap; + +/// A simple in-memory key-value store using [`BTreeMap`]s. +/// +/// The store is backed by an [`RwLock`]. As a result, this implementation +/// supports concurrent multiple concurrent read transactions, but write +/// transactions are exclusive, and cannot overlap with other read or write +/// transactions. +/// +/// This implementation is primarily intended for testing and +/// development purposes. +#[derive(Clone)] +pub struct MemKv { + map: Arc>, +} + +impl core::fmt::Debug for MemKv { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("MemKv").finish() + } +} + +impl MemKv { + /// Create a new empty in-memory KV store. + pub fn new() -> Self { + Self { map: Arc::new(RwLock::new(BTreeMap::new())) } + } + + #[track_caller] + fn key(k: &[u8]) -> MemStoreKey { + assert!(k.len() <= MAX_KEY_SIZE * 2, "Key length exceeds MAX_KEY_SIZE"); + let mut buf = [0u8; MAX_KEY_SIZE * 2]; + buf[..k.len()].copy_from_slice(k); + buf + } + + #[track_caller] + fn dual_key(k1: &[u8], k2: &[u8]) -> MemStoreKey { + assert!( + k1.len() + k2.len() <= MAX_KEY_SIZE * 2, + "Combined key length exceeds MAX_KEY_SIZE" + ); + let mut buf = [0u8; MAX_KEY_SIZE * 2]; + buf[..MAX_KEY_SIZE.min(k1.len())].copy_from_slice(k1); + buf[MAX_KEY_SIZE..MAX_KEY_SIZE + k2.len()].copy_from_slice(k2); + buf + } + + /// SAFETY: + /// Caller must ensure that `key` lives long enough. + #[track_caller] + fn split_dual_key<'a>(key: &[u8]) -> (Cow<'a, [u8]>, Cow<'a, [u8]>) { + assert_eq!(key.len(), MAX_KEY_SIZE * 2, "Key length does not match expected dual key size"); + let k1 = &key[..MAX_KEY_SIZE]; + let k2 = &key[MAX_KEY_SIZE..]; + + unsafe { std::mem::transmute((Cow::Borrowed(k1), Cow::Borrowed(k2))) } + } +} + +impl Default for MemKv { + fn default() -> Self { + Self::new() + } +} + +/// Read-only transaction for MemKv. +pub struct MemKvRoTx { + guard: RwLockReadGuard<'static, Store>, + + // Keep the store alive while the transaction exists + _store: Arc>, +} + +impl core::fmt::Debug for MemKvRoTx { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("MemKvRoTx").finish() + } +} + +// SAFETY: MemKvRoTx holds a read guard which ensures the data remains valid +unsafe impl Send for MemKvRoTx {} +unsafe impl Sync for MemKvRoTx {} + +/// Read-write transaction for MemKv. +pub struct MemKvRwTx { + guard: RwLockWriteGuard<'static, Store>, + queued_ops: OpStore, + + // Keep the store alive while the transaction exists + _store: Arc>, +} + +impl MemKvRwTx { + fn commit_inner(&mut self) { + let ops = std::mem::take(&mut self.queued_ops); + + for (table, table_op) in ops.into_iter() { + table_op.apply(&table, &mut self.guard); + } + } + + /// Downgrade the transaction to a read-only transaction without + /// committing, discarding queued changes. + pub fn downgrade(self) -> MemKvRoTx { + let guard = RwLockWriteGuard::downgrade(self.guard); + + MemKvRoTx { guard, _store: self._store } + } + + /// Commit the transaction and downgrade to a read-only transaction. + pub fn commit_downgrade(mut self) -> MemKvRoTx { + self.commit_inner(); + + let guard = RwLockWriteGuard::downgrade(self.guard); + + MemKvRoTx { guard, _store: self._store } + } +} + +impl core::fmt::Debug for MemKvRwTx { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("MemKvRwTx").finish() + } +} + +/// Queued key-value operation +#[derive(Debug, Clone)] +enum QueuedKvOp { + Delete, + Put { value: Bytes }, +} + +impl QueuedKvOp { + /// Apply the op to a table + fn apply(self, table: &mut StoreTable, key: MemStoreKey) { + match self { + QueuedKvOp::Put { value } => { + table.insert(key, value); + } + QueuedKvOp::Delete => { + table.remove(&key); + } + } + } +} + +/// Queued table operation +#[derive(Debug)] +enum QueuedTableOp { + Modify { ops: TableOp }, + Clear { new_table: TableOp }, +} + +impl Default for QueuedTableOp { + fn default() -> Self { + QueuedTableOp::Modify { ops: TableOp::new() } + } +} + +impl QueuedTableOp { + const fn is_clear(&self) -> bool { + matches!(self, QueuedTableOp::Clear { .. }) + } + + fn get(&self, key: &MemStoreKey) -> Option<&QueuedKvOp> { + match self { + QueuedTableOp::Modify { ops } => ops.get(key), + QueuedTableOp::Clear { new_table } => new_table.get(key), + } + } + + fn put(&mut self, key: MemStoreKey, op: QueuedKvOp) { + match self { + QueuedTableOp::Modify { ops } | QueuedTableOp::Clear { new_table: ops } => { + ops.insert(key, op); + } + } + } + + fn delete(&mut self, key: MemStoreKey) { + match self { + QueuedTableOp::Modify { ops } | QueuedTableOp::Clear { new_table: ops } => { + ops.insert(key, QueuedKvOp::Delete); + } + } + } + + /// Get mutable reference to the inner ops if applicable + fn apply(self, key: &str, store: &mut Store) { + match self { + QueuedTableOp::Modify { ops } => { + let table = store.entry(key.to_owned()).or_default(); + for (key, op) in ops { + op.apply(table, key); + } + } + QueuedTableOp::Clear { new_table } => { + let mut table = StoreTable::new(); + for (k, op) in new_table { + op.apply(&mut table, k); + } + + // replace the table entirely + store.insert(key.to_owned(), table); + } + } + } +} + +// SAFETY: MemKvRwTx holds a write guard which ensures exclusive access +unsafe impl Send for MemKvRwTx {} + +/// Error type for MemKv operations +#[derive(Debug, thiserror::Error)] +pub enum MemKvError { + /// Hot KV error + #[error(transparent)] + HotKv(#[from] HotKvError), + + /// Serialization error + #[error(transparent)] + Deser(#[from] DeserError), +} + +impl trevm::revm::database::DBErrorMarker for MemKvError {} + +impl HotKvReadError for MemKvError { + fn into_hot_kv_error(self) -> HotKvError { + match self { + MemKvError::HotKv(e) => e, + MemKvError::Deser(e) => HotKvError::Deser(e), + } + } +} + +/// Memory cursor for traversing a BTreeMap +pub struct MemKvCursor<'a> { + table: &'a StoreTable, + current_key: Option, +} + +impl core::fmt::Debug for MemKvCursor<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("MemKvCursor").finish() + } +} + +impl<'a> MemKvCursor<'a> { + /// Create a new cursor for the given table + pub const fn new(table: &'a StoreTable) -> Self { + Self { table, current_key: None } + } + + /// Get the current key the cursor is positioned at + pub fn current_key(&self) -> MemStoreKey { + self.current_key.unwrap_or([0u8; MAX_KEY_SIZE * 2]) + } + + /// Set the current key the cursor is positioned at + pub const fn set_current_key(&mut self, key: MemStoreKey) { + self.current_key = Some(key); + } + + /// Clear the current key the cursor is positioned at + pub const fn clear_current_key(&mut self) { + self.current_key = None; + } + + /// Get the current k1 the cursor is positioned at + fn current_k1(&self) -> [u8; MAX_KEY_SIZE] { + self.current_key + .map(|key| key[..MAX_KEY_SIZE].try_into().unwrap()) + .unwrap_or([0u8; MAX_KEY_SIZE]) + } +} + +impl<'a> KvTraverse for MemKvCursor<'a> { + fn first<'b>(&'b mut self) -> Result>, MemKvError> { + let Some((key, value)) = self.table.first_key_value() else { + self.clear_current_key(); + return Ok(None); + }; + self.current_key = Some(*key); + Ok(Some((Cow::Borrowed(key), Cow::Borrowed(value.as_ref())))) + } + + fn last<'b>(&'b mut self) -> Result>, MemKvError> { + let Some((key, value)) = self.table.last_key_value() else { + self.clear_current_key(); + return Ok(None); + }; + self.current_key = Some(*key); + Ok(Some((Cow::Borrowed(key), Cow::Borrowed(value.as_ref())))) + } + + fn exact<'b>(&'b mut self, key: &[u8]) -> Result>, MemKvError> { + let search_key = MemKv::key(key); + self.set_current_key(search_key); + if let Some(value) = self.table.get(&search_key) { + Ok(Some(Cow::Borrowed(value.as_ref()))) + } else { + Ok(None) + } + } + + fn lower_bound<'b>(&'b mut self, key: &[u8]) -> Result>, MemKvError> { + let search_key = MemKv::key(key); + + // Use range to find the first key >= search_key + if let Some((found_key, value)) = self.table.range(search_key..).next() { + self.set_current_key(*found_key); + Ok(Some((Cow::Borrowed(found_key), Cow::Borrowed(value.as_ref())))) + } else { + self.current_key = self.table.last_key_value().map(|(k, _)| *k); + Ok(None) + } + } + + fn read_next<'b>(&'b mut self) -> Result>, MemKvError> { + use core::ops::Bound; + let current = self.current_key(); + // Use Excluded bound to find strictly greater than current key + let Some((found_key, value)) = + self.table.range((Bound::Excluded(current), Bound::Unbounded)).next() + else { + return Ok(None); + }; + self.set_current_key(*found_key); + Ok(Some((Cow::Borrowed(found_key), Cow::Borrowed(value.as_ref())))) + } + + fn read_prev<'b>(&'b mut self) -> Result>, MemKvError> { + let current = self.current_key(); + let Some((k, v)) = self.table.range(..current).next_back() else { + self.clear_current_key(); + return Ok(None); + }; + self.set_current_key(*k); + Ok(Some((Cow::Borrowed(k), Cow::Borrowed(v.as_ref())))) + } +} + +// Implement DualKeyedTraverse (basic implementation - delegates to raw methods) +impl<'a> DualKeyedTraverse for MemKvCursor<'a> { + fn exact_dual<'b>( + &'b mut self, + key1: &[u8], + key2: &[u8], + ) -> Result>, MemKvError> { + let combined_key = MemKv::dual_key(key1, key2); + KvTraverse::exact(self, &combined_key) + } + + fn next_dual_above<'b>( + &'b mut self, + key1: &[u8], + key2: &[u8], + ) -> Result>, MemKvError> { + let combined_key = MemKv::dual_key(key1, key2); + let Some((found_key, value)) = KvTraverse::lower_bound(self, &combined_key)? else { + return Ok(None); + }; + let (k1, k2) = MemKv::split_dual_key(found_key.as_ref()); + Ok(Some((k1, k2, value))) + } + + fn next_k1<'b>(&'b mut self) -> Result>, MemKvError> { + // scan forward until finding a new k1 + let last_k1 = self.current_k1(); + + DualKeyedTraverse::next_dual_above(self, &last_k1, &[0xffu8; MAX_KEY_SIZE]) + } + + fn next_k2<'b>(&'b mut self) -> Result>, MemKvError> { + let current_key = self.current_key(); + let (current_k1, current_k2) = MemKv::split_dual_key(¤t_key); + + // scan forward until finding a new k2 for the same k1 + DualKeyedTraverse::next_dual_above(self, ¤t_k1, ¤t_k2) + } +} + +// Implement DualTableTraverse for typed dual-keyed table access +impl<'a, T> DualTableTraverse for MemKvCursor<'a> +where + T: DualKeyed, +{ + fn next_dual_above( + &mut self, + key1: &T::Key, + key2: &T::Key2, + ) -> Result>, MemKvError> { + let mut key1_buf = [0u8; MAX_KEY_SIZE]; + let mut key2_buf = [0u8; MAX_KEY_SIZE]; + let key1_bytes = key1.encode_key(&mut key1_buf); + let key2_bytes = key2.encode_key(&mut key2_buf); + + DualKeyedTraverse::next_dual_above(self, key1_bytes, key2_bytes)? + .map(T::decode_kkv_tuple) + .transpose() + .map_err(Into::into) + } + + fn next_k1(&mut self) -> Result>, MemKvError> { + DualKeyedTraverse::next_k1(self)?.map(T::decode_kkv_tuple).transpose().map_err(Into::into) + } + + fn next_k2(&mut self) -> Result>, MemKvError> { + DualKeyedTraverse::next_k2(self)?.map(T::decode_kkv_tuple).transpose().map_err(Into::into) + } +} + +/// Memory cursor for read-write operations +pub struct MemKvCursorMut<'a> { + table: &'a StoreTable, + queued_ops: &'a mut TableOp, + is_cleared: bool, + current_key: Option, +} + +impl core::fmt::Debug for MemKvCursorMut<'_> { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("MemKvCursorMut").field("is_cleared", &self.is_cleared).finish() + } +} + +impl<'a> MemKvCursorMut<'a> { + /// Create a new mutable cursor for the given table and queued operations + const fn new(table: &'a StoreTable, queued_ops: &'a mut TableOp, is_cleared: bool) -> Self { + Self { table, queued_ops, is_cleared, current_key: None } + } + + /// Get the current key the cursor is positioned at + pub fn current_key(&self) -> MemStoreKey { + self.current_key.unwrap_or([0u8; MAX_KEY_SIZE * 2]) + } + + /// Set the current key the cursor is positioned at + pub const fn set_current_key(&mut self, key: MemStoreKey) { + self.current_key = Some(key); + } + + /// Clear the current key the cursor is positioned at + pub const fn clear_current_key(&mut self) { + self.current_key = None; + } + + /// Get the current k1 the cursor is positioned at + fn current_k1(&self) -> [u8; MAX_KEY_SIZE] { + self.current_key + .map(|key| key[..MAX_KEY_SIZE].try_into().unwrap()) + .unwrap_or([0u8; MAX_KEY_SIZE]) + } + + /// Get value for a key, returning owned bytes + fn get_owned(&self, key: &MemStoreKey) -> Option { + if let Some(op) = self.queued_ops.get(key) { + match op { + QueuedKvOp::Put { value } => Some(value.clone()), + QueuedKvOp::Delete => None, + } + } else if !self.is_cleared { + self.table.get(key).cloned() + } else { + None + } + } + + /// Get the first key-value pair >= key, returning owned data + fn get_range_owned(&self, key: &MemStoreKey) -> Option<(MemStoreKey, Bytes)> { + let q = self.queued_ops.range(*key..).next(); + let c = if !self.is_cleared { self.table.range(*key..).next() } else { None }; + + match (q, c) { + (None, None) => None, + (Some((qk, queued)), Some((ck, current))) => { + if qk <= ck { + // Queued operation takes precedence + match queued { + QueuedKvOp::Put { value } => Some((*qk, value.clone())), + QueuedKvOp::Delete => { + // Skip deleted entry and look for next + let mut next_key = *qk; + for i in (0..next_key.len()).rev() { + if next_key[i] < u8::MAX { + next_key[i] += 1; + break; + } + next_key[i] = 0; + } + self.get_range_owned(&next_key) + } + } + } else { + Some((*ck, current.clone())) + } + } + (Some((qk, queued)), None) => match queued { + QueuedKvOp::Put { value } => Some((*qk, value.clone())), + QueuedKvOp::Delete => { + let mut next_key = *qk; + for i in (0..next_key.len()).rev() { + if next_key[i] < u8::MAX { + next_key[i] += 1; + break; + } + next_key[i] = 0; + } + self.get_range_owned(&next_key) + } + }, + (None, Some((ck, current))) => Some((*ck, current.clone())), + } + } + + /// Get the first key-value pair > key (strictly greater), returning owned data + fn get_range_exclusive_owned(&self, key: &MemStoreKey) -> Option<(MemStoreKey, Bytes)> { + use core::ops::Bound; + + let q = self.queued_ops.range((Bound::Excluded(*key), Bound::Unbounded)).next(); + let c = if !self.is_cleared { + self.table.range((Bound::Excluded(*key), Bound::Unbounded)).next() + } else { + None + }; + + match (q, c) { + (None, None) => None, + (Some((qk, queued)), Some((ck, current))) => { + if qk <= ck { + // Queued operation takes precedence + match queued { + QueuedKvOp::Put { value } => Some((*qk, value.clone())), + QueuedKvOp::Delete => { + // This key is deleted, recurse to find the next one + self.get_range_exclusive_owned(qk) + } + } + } else { + // Check if the current key has a delete queued + if let Some(QueuedKvOp::Delete) = self.queued_ops.get(ck) { + self.get_range_exclusive_owned(ck) + } else { + Some((*ck, current.clone())) + } + } + } + (Some((qk, queued)), None) => match queued { + QueuedKvOp::Put { value } => Some((*qk, value.clone())), + QueuedKvOp::Delete => self.get_range_exclusive_owned(qk), + }, + (None, Some((ck, current))) => { + // Check if the current key has a delete queued + if let Some(QueuedKvOp::Delete) = self.queued_ops.get(ck) { + self.get_range_exclusive_owned(ck) + } else { + Some((*ck, current.clone())) + } + } + } + } + + /// Get the last key-value pair < key, returning owned data + fn get_range_reverse_owned(&self, key: &MemStoreKey) -> Option<(MemStoreKey, Bytes)> { + let q = self.queued_ops.range(..*key).next_back(); + let c = if !self.is_cleared { self.table.range(..*key).next_back() } else { None }; + + match (q, c) { + (None, None) => None, + (Some((qk, queued)), Some((ck, current))) => { + if qk >= ck { + // Queued operation takes precedence + match queued { + QueuedKvOp::Put { value } => Some((*qk, value.clone())), + QueuedKvOp::Delete => self.get_range_reverse_owned(qk), + } + } else { + Some((*ck, current.clone())) + } + } + (Some((qk, queued)), None) => match queued { + QueuedKvOp::Put { value } => Some((*qk, value.clone())), + QueuedKvOp::Delete => self.get_range_reverse_owned(qk), + }, + (None, Some((ck, current))) => Some((*ck, current.clone())), + } + } +} + +impl<'a> KvTraverse for MemKvCursorMut<'a> { + fn first<'b>(&'b mut self) -> Result>, MemKvError> { + let start_key = [0u8; MAX_KEY_SIZE * 2]; + + // Get the first effective key-value pair + if let Some((key, value)) = self.get_range_owned(&start_key) { + self.current_key = Some(key); + Ok(Some((Cow::Owned(key.to_vec()), Cow::Owned(value.to_vec())))) + } else { + self.current_key = None; + Ok(None) + } + } + + fn last<'b>(&'b mut self) -> Result>, MemKvError> { + let end_key = [0xffu8; MAX_KEY_SIZE * 2]; + + if let Some((key, value)) = self.get_range_reverse_owned(&end_key) { + self.current_key = Some(key); + Ok(Some((Cow::Owned(key.to_vec()), Cow::Owned(value.to_vec())))) + } else { + self.current_key = None; + Ok(None) + } + } + + fn exact<'b>(&'b mut self, key: &[u8]) -> Result>, MemKvError> { + let search_key = MemKv::key(key); + self.current_key = Some(search_key); + + if let Some(value) = self.get_owned(&search_key) { + Ok(Some(Cow::Owned(value.to_vec()))) + } else { + Ok(None) + } + } + + fn lower_bound<'b>(&'b mut self, key: &[u8]) -> Result>, MemKvError> { + let search_key = MemKv::key(key); + + if let Some((found_key, value)) = self.get_range_owned(&search_key) { + self.current_key = Some(found_key); + Ok(Some((Cow::Owned(found_key.to_vec()), Cow::Owned(value.to_vec())))) + } else { + self.current_key = None; + Ok(None) + } + } + + fn read_next<'b>(&'b mut self) -> Result>, MemKvError> { + let current = self.current_key(); + + // Use exclusive range to find strictly greater than current key + if let Some((found_key, value)) = self.get_range_exclusive_owned(¤t) { + self.current_key = Some(found_key); + Ok(Some((Cow::Owned(found_key.to_vec()), Cow::Owned(value.to_vec())))) + } else { + self.current_key = None; + Ok(None) + } + } + + fn read_prev<'b>(&'b mut self) -> Result>, MemKvError> { + let current = self.current_key(); + + if let Some((found_key, value)) = self.get_range_reverse_owned(¤t) { + self.current_key = Some(found_key); + Ok(Some((Cow::Owned(found_key.to_vec()), Cow::Owned(value.to_vec())))) + } else { + self.current_key = None; + Ok(None) + } + } +} + +impl<'a> KvTraverseMut for MemKvCursorMut<'a> { + fn delete_current(&mut self) -> Result<(), MemKvError> { + if let Some(key) = self.current_key { + // Queue a delete operation + self.queued_ops.insert(key, QueuedKvOp::Delete); + Ok(()) + } else { + Err(MemKvError::HotKv(HotKvError::Inner("No current key to delete".into()))) + } + } +} + +impl<'a> DualKeyedTraverse for MemKvCursorMut<'a> { + fn exact_dual<'b>( + &'b mut self, + key1: &[u8], + key2: &[u8], + ) -> Result>, MemKvError> { + let combined_key = MemKv::dual_key(key1, key2); + KvTraverse::exact(self, &combined_key) + } + + fn next_dual_above<'b>( + &'b mut self, + key1: &[u8], + key2: &[u8], + ) -> Result>, MemKvError> { + let combined_key = MemKv::dual_key(key1, key2); + let Some((found_key, value)) = KvTraverse::lower_bound(self, &combined_key)? else { + return Ok(None); + }; + + let (key1, key2) = MemKv::split_dual_key(found_key.as_ref()); + Ok(Some((key1, key2, value))) + } + + fn next_k1<'b>(&'b mut self) -> Result>, MemKvError> { + // scan forward until finding a new k1 + let last_k1 = self.current_k1(); + + DualKeyedTraverse::next_dual_above(self, &last_k1, &[0xffu8; MAX_KEY_SIZE]) + } + + fn next_k2<'b>(&'b mut self) -> Result>, MemKvError> { + let current_key = self.current_key(); + let (current_k1, current_k2) = MemKv::split_dual_key(¤t_key); + + // scan forward until finding a new k2 for the same k1 + DualKeyedTraverse::next_dual_above(self, ¤t_k1, ¤t_k2) + } +} + +// Implement DualTableTraverse for typed dual-keyed table access +impl<'a, T> DualTableTraverse for MemKvCursorMut<'a> +where + T: DualKeyed, +{ + fn next_dual_above( + &mut self, + key1: &T::Key, + key2: &T::Key2, + ) -> Result>, MemKvError> { + let mut key1_buf = [0u8; MAX_KEY_SIZE]; + let mut key2_buf = [0u8; MAX_KEY_SIZE]; + let key1_bytes = key1.encode_key(&mut key1_buf); + let key2_bytes = key2.encode_key(&mut key2_buf); + + DualKeyedTraverse::next_dual_above(self, key1_bytes, key2_bytes)? + .map(T::decode_kkv_tuple) + .transpose() + .map_err(Into::into) + } + + fn next_k1(&mut self) -> Result>, MemKvError> { + DualKeyedTraverse::next_k1(self)?.map(T::decode_kkv_tuple).transpose().map_err(Into::into) + } + + fn next_k2(&mut self) -> Result>, MemKvError> { + DualKeyedTraverse::next_k2(self)?.map(T::decode_kkv_tuple).transpose().map_err(Into::into) + } +} + +impl HotKv for MemKv { + type RoTx = MemKvRoTx; + type RwTx = MemKvRwTx; + + fn reader(&self) -> Result { + let guard = self + .map + .try_read() + .map_err(|_| HotKvError::Inner("Failed to acquire read lock".into()))?; + + // SAFETY: This is safe-ish, as we ensure the map is not dropped until + // the guard is also dropped. + let guard: RwLockReadGuard<'static, Store> = unsafe { std::mem::transmute(guard) }; + + Ok(MemKvRoTx { guard, _store: self.map.clone() }) + } + + fn writer(&self) -> Result { + let guard = self.map.try_write().map_err(|_| HotKvError::WriteLocked)?; + + // SAFETY: This is safe-ish, as we ensure the map is not dropped until + // the guard is also dropped. + let guard: RwLockWriteGuard<'static, Store> = unsafe { std::mem::transmute(guard) }; + + Ok(MemKvRwTx { guard, _store: self.map.clone(), queued_ops: OpStore::new() }) + } +} + +impl HotKvRead for MemKvRoTx { + type Error = MemKvError; + + type Traverse<'a> = MemKvCursor<'a>; + + fn raw_traverse<'a>(&'a self, table: &str) -> Result, Self::Error> { + let table_data = self.guard.get(table).unwrap_or(&EMPTY_TABLE); + Ok(MemKvCursor::new(table_data)) + } + + fn raw_get<'a>( + &'a self, + table: &str, + key: &[u8], + ) -> Result>, Self::Error> { + // Check queued operations first (read-your-writes consistency) + let key = MemKv::key(key); + + // SAFETY: The guard ensures the map remains valid + + Ok(self + .guard + .get(table) + .and_then(|t| t.get(&key)) + .map(|bytes| Cow::Borrowed(bytes.as_ref()))) + } + + fn raw_get_dual<'a>( + &'a self, + table: &str, + key1: &[u8], + key2: &[u8], + ) -> Result>, Self::Error> { + let key = MemKv::dual_key(key1, key2); + self.raw_get(table, &key) + } +} + +static EMPTY_TABLE: StoreTable = BTreeMap::new(); + +impl MemKvRoTx { + /// Get a cursor for the specified table + pub fn cursor<'a>(&'a self, table: &str) -> Result, MemKvError> { + let table_data = self.guard.get(table).unwrap_or(&EMPTY_TABLE); + Ok(MemKvCursor::new(table_data)) + } +} + +impl HotKvRead for MemKvRwTx { + type Error = MemKvError; + + type Traverse<'a> = MemKvCursor<'a>; + + fn raw_traverse<'a>(&'a self, table: &str) -> Result, Self::Error> { + let table_data = self.guard.get(table).unwrap_or(&EMPTY_TABLE); + Ok(MemKvCursor::new(table_data)) + } + + fn raw_get<'a>( + &'a self, + table: &str, + key: &[u8], + ) -> Result>, Self::Error> { + // Check queued operations first (read-your-writes consistency) + let key = MemKv::key(key); + + if let Some(table) = self.queued_ops.get(table) { + if table.is_clear() { + return Ok(None); + } + + match table.get(&key) { + Some(QueuedKvOp::Put { value }) => { + return Ok(Some(Cow::Borrowed(value.as_ref()))); + } + Some(QueuedKvOp::Delete) => { + return Ok(None); + } + None => {} + } + } + + // If not found in queued ops, check the underlying map + Ok(self + .guard + .get(table) + .and_then(|t| t.get(&key)) + .map(|bytes| Cow::Borrowed(bytes.as_ref()))) + } + + fn raw_get_dual<'a>( + &'a self, + table: &str, + key1: &[u8], + key2: &[u8], + ) -> Result>, Self::Error> { + let key = MemKv::dual_key(key1, key2); + self.raw_get(table, &key) + } +} + +impl MemKvRwTx { + /// Get a read-only cursor for the specified table + /// Note: This cursor will NOT see pending writes from this transaction + pub fn cursor<'a>(&'a self, table: &str) -> Result, MemKvError> { + if let Some(table_data) = self.guard.get(table) { + Ok(MemKvCursor::new(table_data)) + } else { + Err(MemKvError::HotKv(HotKvError::Inner(format!("Table '{}' not found", table).into()))) + } + } + + /// Get a mutable cursor for the specified table + /// This cursor will see both committed data and pending writes from this transaction + pub fn cursor_mut<'a>(&'a mut self, table: &str) -> Result, MemKvError> { + // Get or create the table data + let table_data = self.guard.entry(table.to_owned()).or_default(); + + // Get or create the queued operations for this table + let table_ops = self.queued_ops.entry(table.to_owned()).or_default(); + + let is_cleared = table_ops.is_clear(); + + // Extract the inner TableOp from QueuedTableOp + let ops = match table_ops { + QueuedTableOp::Modify { ops } => ops, + QueuedTableOp::Clear { new_table } => new_table, + }; + + Ok(MemKvCursorMut::new(table_data, ops, is_cleared)) + } +} + +impl HotKvWrite for MemKvRwTx { + type TraverseMut<'a> + = MemKvCursorMut<'a> + where + Self: 'a; + + fn raw_traverse_mut<'a>( + &'a mut self, + table: &str, + ) -> Result, Self::Error> { + self.cursor_mut(table) + } + + fn queue_raw_put(&mut self, table: &str, key: &[u8], value: &[u8]) -> Result<(), Self::Error> { + let key = MemKv::key(key); + + let value_bytes = Bytes::copy_from_slice(value); + + self.queued_ops + .entry(table.to_owned()) + .or_default() + .put(key, QueuedKvOp::Put { value: value_bytes }); + Ok(()) + } + + fn queue_raw_put_dual( + &mut self, + table: &str, + key1: &[u8], + key2: &[u8], + value: &[u8], + ) -> Result<(), Self::Error> { + let key = MemKv::dual_key(key1, key2); + self.queue_raw_put(table, &key, value) + } + + fn queue_raw_delete(&mut self, table: &str, key: &[u8]) -> Result<(), Self::Error> { + let key = MemKv::key(key); + + self.queued_ops.entry(table.to_owned()).or_default().delete(key); + Ok(()) + } + + fn queue_raw_clear(&mut self, table: &str) -> Result<(), Self::Error> { + self.queued_ops + .insert(table.to_owned(), QueuedTableOp::Clear { new_table: TableOp::new() }); + Ok(()) + } + + fn queue_raw_create( + &mut self, + _table: &str, + _dual_key: bool, + _dual_fixed: bool, + ) -> Result<(), Self::Error> { + Ok(()) + } + + fn raw_commit(mut self) -> Result<(), Self::Error> { + // Apply all queued operations to the map + self.commit_inner(); + + // The write guard is automatically dropped here, releasing the lock + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + hot::model::{DualTableTraverse, TableTraverse, TableTraverseMut}, + tables::{SingleKey, Table}, + }; + use alloy::primitives::{Address, U256}; + use bytes::Bytes; + + // Test table definitions + #[derive(Debug)] + struct TestTable; + + impl SingleKey for TestTable {} + + impl Table for TestTable { + const NAME: &'static str = "test_table"; + + type Key = u64; + type Value = Bytes; + } + + #[derive(Debug)] + struct AddressTable; + + impl Table for AddressTable { + const NAME: &'static str = "addresses"; + type Key = Address; + type Value = U256; + } + + impl SingleKey for AddressTable {} + + #[derive(Debug)] + struct DualTestTable; + + impl Table for DualTestTable { + const NAME: &'static str = "dual_test_table"; + type Key = u64; + type Value = Bytes; + } + + impl crate::tables::DualKeyed for DualTestTable { + type Key2 = u32; + } + + #[test] + fn test_new_store() { + let store = MemKv::new(); + let reader = store.reader().unwrap(); + + // Empty store should return None for any key + assert!(reader.raw_get("test", &[1, 2, 3]).unwrap().is_none()); + } + + #[test] + fn test_basic_put_get() { + let store = MemKv::new(); + + // Write some data + { + let mut writer = store.writer().unwrap(); + writer.queue_raw_put("table1", &[1, 2, 3], b"value1").unwrap(); + writer.queue_raw_put("table1", &[4, 5, 6], b"value2").unwrap(); + writer.raw_commit().unwrap(); + } + + // Read the data back + { + let reader = store.reader().unwrap(); + let value1 = reader.raw_get("table1", &[1, 2, 3]).unwrap(); + let value2 = reader.raw_get("table1", &[4, 5, 6]).unwrap(); + let missing = reader.raw_get("table1", &[7, 8, 9]).unwrap(); + + assert_eq!(value1.as_deref(), Some(b"value1" as &[u8])); + assert_eq!(value2.as_deref(), Some(b"value2" as &[u8])); + assert!(missing.is_none()); + } + } + + #[test] + fn test_multiple_tables() { + let store = MemKv::new(); + + // Write to different tables + { + let mut writer = store.writer().unwrap(); + writer.queue_raw_put("table1", &[1], b"table1_value").unwrap(); + writer.queue_raw_put("table2", &[1], b"table2_value").unwrap(); + writer.raw_commit().unwrap(); + } + + // Read from different tables + { + let reader = store.reader().unwrap(); + let value1 = reader.raw_get("table1", &[1]).unwrap(); + let value2 = reader.raw_get("table2", &[1]).unwrap(); + + assert_eq!(value1.as_deref(), Some(b"table1_value" as &[u8])); + assert_eq!(value2.as_deref(), Some(b"table2_value" as &[u8])); + } + } + + #[test] + fn test_overwrite_value() { + let store = MemKv::new(); + + // Write initial value + { + let mut writer = store.writer().unwrap(); + writer.queue_raw_put("table1", &[1], b"original").unwrap(); + writer.raw_commit().unwrap(); + } + + // Overwrite with new value + { + let mut writer = store.writer().unwrap(); + writer.queue_raw_put("table1", &[1], b"updated").unwrap(); + writer.raw_commit().unwrap(); + } + + // Check the value was updated + { + let reader = store.reader().unwrap(); + let value = reader.raw_get("table1", &[1]).unwrap(); + assert_eq!(value.as_deref(), Some(b"updated" as &[u8])); + } + } + + #[test] + fn test_read_your_writes() { + let store = MemKv::new(); + let mut writer = store.writer().unwrap(); + + // Queue some operations but don't commit yet + writer.queue_raw_put("table1", &[1], b"queued_value").unwrap(); + + // Should be able to read the queued value + let value = writer.raw_get("table1", &[1]).unwrap(); + assert_eq!(value.as_deref(), Some(b"queued_value" as &[u8])); + + writer.raw_commit().unwrap(); + + // After commit, other readers should see it + { + let reader = store.reader().unwrap(); + let value = reader.raw_get("table1", &[1]).unwrap(); + assert_eq!(value.as_deref(), Some(b"queued_value" as &[u8])); + } + } + + #[test] + fn test_typed_operations() { + let store = MemKv::new(); + + // Write using typed interface + { + let mut writer = store.writer().unwrap(); + writer.queue_put::(&42u64, &Bytes::from_static(b"hello world")).unwrap(); + writer.queue_put::(&100u64, &Bytes::from_static(b"another value")).unwrap(); + writer.raw_commit().unwrap(); + } + + // Read using typed interface + { + let reader = store.reader().unwrap(); + let value1 = reader.get::(&42u64).unwrap(); + let value2 = reader.get::(&100u64).unwrap(); + let missing = reader.get::(&999u64).unwrap(); + + assert_eq!(value1, Some(Bytes::from_static(b"hello world"))); + assert_eq!(value2, Some(Bytes::from_static(b"another value"))); + assert!(missing.is_none()); + } + } + + #[test] + fn test_address_table() { + let store = MemKv::new(); + + let addr1 = Address::from([0x11; 20]); + let addr2 = Address::from([0x22; 20]); + let balance1 = U256::from(1000u64); + let balance2 = U256::from(2000u64); + + // Write address data + { + let mut writer = store.writer().unwrap(); + writer.queue_put::(&addr1, &balance1).unwrap(); + writer.queue_put::(&addr2, &balance2).unwrap(); + writer.raw_commit().unwrap(); + } + + // Read address data + { + let reader = store.reader().unwrap(); + let bal1 = reader.get::(&addr1).unwrap(); + let bal2 = reader.get::(&addr2).unwrap(); + + assert_eq!(bal1, Some(balance1)); + assert_eq!(bal2, Some(balance2)); + } + } + + #[test] + fn test_batch_operations() { + let store = MemKv::new(); + + let entries = [ + (1u64, Bytes::from_static(b"first")), + (2u64, Bytes::from_static(b"second")), + (3u64, Bytes::from_static(b"third")), + ]; + + // Write batch + { + let mut writer = store.writer().unwrap(); + let entry_refs: Vec<_> = entries.iter().map(|(k, v)| (k, v)).collect(); + writer.queue_put_many::(entry_refs).unwrap(); + writer.raw_commit().unwrap(); + } + + // Read batch + { + let reader = store.reader().unwrap(); + let keys: Vec<_> = entries.iter().map(|(k, _)| k).collect(); + let values = reader.get_many::(keys).unwrap(); + + assert_eq!(values.len(), 3); + assert_eq!(values[0], (&1u64, Some(Bytes::from_static(b"first")))); + assert_eq!(values[1], (&2u64, Some(Bytes::from_static(b"second")))); + assert_eq!(values[2], (&3u64, Some(Bytes::from_static(b"third")))); + } + } + + #[test] + fn test_concurrent_readers() { + let store = MemKv::new(); + + // Write some initial data + { + let mut writer = store.writer().unwrap(); + writer.queue_raw_put("table1", &[1], b"value1").unwrap(); + writer.raw_commit().unwrap(); + } + + // Multiple readers should be able to read concurrently + let reader1 = store.reader().unwrap(); + let reader2 = store.reader().unwrap(); + + let value1 = reader1.raw_get("table1", &[1]).unwrap(); + let value2 = reader2.raw_get("table1", &[1]).unwrap(); + + assert_eq!(value1.as_deref(), Some(b"value1" as &[u8])); + assert_eq!(value2.as_deref(), Some(b"value1" as &[u8])); + } + + #[test] + fn test_write_lock_exclusivity() { + let store = MemKv::new(); + + // Get a writer + let _writer1 = store.writer().unwrap(); + + // Second writer should fail + match store.writer() { + Err(HotKvError::WriteLocked) => {} // Expected + Ok(_) => panic!("Should not be able to get second writer"), + Err(e) => panic!("Unexpected error: {:?}", e), + } + } + + #[test] + fn test_empty_values() { + let store = MemKv::new(); + + { + let mut writer = store.writer().unwrap(); + writer.queue_raw_put("table1", &[1], b"").unwrap(); + writer.raw_commit().unwrap(); + } + + { + let reader = store.reader().unwrap(); + let value = reader.raw_get("table1", &[1]).unwrap(); + assert_eq!(value.as_deref(), Some(b"" as &[u8])); + } + } + + #[test] + fn test_multiple_operations_same_transaction() { + let store = MemKv::new(); + + { + let mut writer = store.writer().unwrap(); + + // Multiple operations on same key - last one should win + writer.queue_raw_put("table1", &[1], b"first").unwrap(); + writer.queue_raw_put("table1", &[1], b"second").unwrap(); + writer.queue_raw_put("table1", &[1], b"third").unwrap(); + + // Read-your-writes should return the latest value + let value = writer.raw_get("table1", &[1]).unwrap(); + assert_eq!(value.as_deref(), Some(b"third" as &[u8])); + + writer.raw_commit().unwrap(); + } + + { + let reader = store.reader().unwrap(); + let value = reader.raw_get("table1", &[1]).unwrap(); + assert_eq!(value.as_deref(), Some(b"third" as &[u8])); + } + } + + #[test] + fn test_isolation() { + let store = MemKv::new(); + + // Write initial value + { + let mut writer = store.writer().unwrap(); + writer.queue_raw_put("table1", &[1], b"original").unwrap(); + writer.raw_commit().unwrap(); + } + + // Start a read transaction + { + let reader = store.reader().unwrap(); + let original_value = reader.raw_get("table1", &[1]).unwrap(); + assert_eq!(original_value.as_deref(), Some(b"original" as &[u8])); + } + + // Update the value in a separate transaction + { + let mut writer = store.writer().unwrap(); + writer.queue_raw_put("table1", &[1], b"updated").unwrap(); + writer.raw_commit().unwrap(); + } + + // The value should now be latest for new readers + { + // New reader should see the updated value + let new_reader = store.reader().unwrap(); + let updated_value = new_reader.raw_get("table1", &[1]).unwrap(); + assert_eq!(updated_value.as_deref(), Some(b"updated" as &[u8])); + } + } + + #[test] + fn test_rollback_on_drop() { + let store = MemKv::new(); + + { + let mut writer = store.writer().unwrap(); + writer.queue_raw_put("table1", &[1], b"should_not_persist").unwrap(); + // Drop without committing + } + + // Value should not be persisted + { + let reader = store.reader().unwrap(); + let value = reader.raw_get("table1", &[1]).unwrap(); + assert!(value.is_none()); + } + } + + #[test] + fn write_two_tables() { + let store = MemKv::new(); + + { + let mut writer = store.writer().unwrap(); + writer.queue_raw_put("table1", &[1], b"value1").unwrap(); + writer.queue_raw_put("table2", &[2], b"value2").unwrap(); + writer.raw_commit().unwrap(); + } + + { + let reader = store.reader().unwrap(); + let value1 = reader.raw_get("table1", &[1]).unwrap(); + let value2 = reader.raw_get("table2", &[2]).unwrap(); + + assert_eq!(value1.as_deref(), Some(b"value1" as &[u8])); + assert_eq!(value2.as_deref(), Some(b"value2" as &[u8])); + } + } + + #[test] + fn test_downgrades() { + let store = MemKv::new(); + { + // Write some data + // Start a read-write transaction + let mut rw_tx = store.writer().unwrap(); + rw_tx.queue_raw_put("table1", &[1, 2, 3], b"value1").unwrap(); + rw_tx.queue_raw_put("table1", &[4, 5, 6], b"value2").unwrap(); + + let ro_tx = rw_tx.commit_downgrade(); + + // Read the data back + let value1 = ro_tx.raw_get("table1", &[1, 2, 3]).unwrap(); + let value2 = ro_tx.raw_get("table1", &[4, 5, 6]).unwrap(); + + assert_eq!(value1.as_deref(), Some(b"value1" as &[u8])); + assert_eq!(value2.as_deref(), Some(b"value2" as &[u8])); + } + + { + // Start another read-write transaction + let mut rw_tx = store.writer().unwrap(); + rw_tx.queue_raw_put("table2", &[7, 8, 9], b"value3").unwrap(); + + // Value should not be set + let ro_tx = rw_tx.downgrade(); + + // Read the data back + let value3 = ro_tx.raw_get("table2", &[7, 8, 9]).unwrap(); + + assert!(value3.is_none()); + } + } + + #[test] + fn test_clear_table() { + let store = MemKv::new(); + + { + let mut writer = store.writer().unwrap(); + writer.queue_raw_put("table1", &[1], b"value1").unwrap(); + writer.queue_raw_put("table1", &[2], b"value2").unwrap(); + writer.raw_commit().unwrap(); + } + + { + let reader = store.reader().unwrap(); + + let value1 = reader.raw_get("table1", &[1]).unwrap(); + let value2 = reader.raw_get("table1", &[2]).unwrap(); + + assert_eq!(value1.as_deref(), Some(b"value1" as &[u8])); + assert_eq!(value2.as_deref(), Some(b"value2" as &[u8])); + } + + { + let mut writer = store.writer().unwrap(); + + let value1 = writer.raw_get("table1", &[1]).unwrap(); + let value2 = writer.raw_get("table1", &[2]).unwrap(); + + assert_eq!(value1.as_deref(), Some(b"value1" as &[u8])); + assert_eq!(value2.as_deref(), Some(b"value2" as &[u8])); + + writer.queue_raw_clear("table1").unwrap(); + + let value1 = writer.raw_get("table1", &[1]).unwrap(); + let value2 = writer.raw_get("table1", &[2]).unwrap(); + + assert!(value1.is_none()); + assert!(value2.is_none()); + + writer.raw_commit().unwrap(); + } + + { + let reader = store.reader().unwrap(); + let value1 = reader.raw_get("table1", &[1]).unwrap(); + let value2 = reader.raw_get("table1", &[2]).unwrap(); + + assert!(value1.is_none()); + assert!(value2.is_none()); + } + } + + // ======================================================================== + // Cursor Traversal Tests + // ======================================================================== + + #[test] + fn test_cursor_basic_navigation() { + let store = MemKv::new(); + + // Setup test data using TestTable + let test_data = vec![ + (1u64, Bytes::from_static(b"value_001")), + (2u64, Bytes::from_static(b"value_002")), + (3u64, Bytes::from_static(b"value_003")), + (10u64, Bytes::from_static(b"value_010")), + (20u64, Bytes::from_static(b"value_020")), + ]; + + // Insert data + { + let mut writer = store.writer().unwrap(); + for (key, value) in &test_data { + writer.queue_put::(key, value).unwrap(); + } + writer.raw_commit().unwrap(); + } + + // Test cursor navigation + { + let reader = store.reader().unwrap(); + let mut cursor = reader.cursor(TestTable::NAME).unwrap(); + + // Test first() + let (key, value) = TableTraverse::::first(&mut cursor).unwrap().unwrap(); + assert_eq!(key, test_data[0].0); + assert_eq!(value, test_data[0].1); + + // Test last() + let last_result = TableTraverse::::last(&mut cursor).unwrap(); + assert!(last_result.is_some()); + let (key, value) = last_result.unwrap(); + assert_eq!(key, test_data.last().unwrap().0); + assert_eq!(value, test_data.last().unwrap().1); + + // Test exact lookup + let exact_result = TableTraverse::::exact(&mut cursor, &2u64).unwrap(); + assert!(exact_result.is_some()); + assert_eq!(exact_result.unwrap(), test_data[1].1); + + // Test next_above (range lookup) + let range_result = + TableTraverse::::lower_bound(&mut cursor, &5u64).unwrap(); + assert!(range_result.is_some()); + let (key, value) = range_result.unwrap(); + assert_eq!(key, test_data[3].0); // 10u64 + assert_eq!(value, test_data[3].1); + } + } + + #[test] + fn test_cursor_sequential_navigation() { + let store = MemKv::new(); + + // Setup sequential test data using TestTable + let test_data: Vec<(u64, Bytes)> = (1..=5) + .map(|i| { + let key = i; + let value = Bytes::from(format!("value_{:03}", i)); + (key, value) + }) + .collect(); + + // Insert data + { + let mut writer = store.writer().unwrap(); + for (key, value) in &test_data { + writer.queue_put::(key, value).unwrap(); + } + writer.raw_commit().unwrap(); + } + + // Test sequential navigation + { + let reader = store.reader().unwrap(); + let mut cursor = reader.cursor(TestTable::NAME).unwrap(); + + // Start from first and traverse forward + let mut current_idx = 0; + let first_result = TableTraverse::::first(&mut cursor).unwrap(); + assert!(first_result.is_some()); + + let (key, value) = first_result.unwrap(); + assert_eq!(key, test_data[current_idx].0); + assert_eq!(value, test_data[current_idx].1); + + // Navigate forward through all entries + while current_idx < test_data.len() - 1 { + let next_result = TableTraverse::::read_next(&mut cursor).unwrap(); + assert!(next_result.is_some()); + + current_idx += 1; + let (key, value) = next_result.unwrap(); + assert_eq!(key, test_data[current_idx].0); + assert_eq!(value, test_data[current_idx].1); + } + + // Next should return None at the end + let beyond_end = TableTraverse::::read_next(&mut cursor).unwrap(); + assert!(beyond_end.is_none()); + + // Navigate backward + while current_idx > 0 { + let prev_result = TableTraverse::::read_prev(&mut cursor).unwrap(); + + assert!(prev_result.is_some()); + + current_idx -= 1; + let (key, value) = prev_result.unwrap(); + assert_eq!(key, test_data[current_idx].0); + assert_eq!(value, test_data[current_idx].1); + } + + // Previous should return None at the beginning + let before_start = TableTraverse::::read_prev(&mut cursor).unwrap(); + assert!(before_start.is_none()); + } + } + + #[test] + fn test_cursor_mut_operations() { + let store = MemKv::new(); + + let test_data = vec![ + (1u64, Bytes::from_static(b"delete_value_1")), + (2u64, Bytes::from_static(b"delete_value_2")), + (3u64, Bytes::from_static(b"delete_value_3")), + ]; + + // Insert initial data + { + let mut writer = store.writer().unwrap(); + for (key, value) in &test_data { + writer.queue_put::(key, value).unwrap(); + } + writer.raw_commit().unwrap(); + } + + // Test mutable cursor operations + { + let mut writer = store.writer().unwrap(); + let mut cursor = writer.cursor_mut(TestTable::NAME).unwrap(); + + // Navigate to middle entry + let first = TableTraverse::::first(&mut cursor).unwrap().unwrap(); + assert_eq!(first.0, test_data[0].0); + + let next = TableTraverse::::read_next(&mut cursor).unwrap().unwrap(); + assert_eq!(next.0, test_data[1].0); + + // Delete current entry (key 2) + TableTraverseMut::::delete_current(&mut cursor).unwrap(); + + writer.raw_commit().unwrap(); + } + + // Verify deletion + { + let reader = store.reader().unwrap(); + let mut cursor = reader.cursor(TestTable::NAME).unwrap(); + + // Should only have first and third entries + let first = TableTraverse::::first(&mut cursor).unwrap().unwrap(); + assert_eq!(first.0, test_data[0].0); + + let second = TableTraverse::::read_next(&mut cursor).unwrap().unwrap(); + assert_eq!(second.0, test_data[2].0); + + // Should be no more entries + let none = TableTraverse::::read_next(&mut cursor).unwrap(); + assert!(none.is_none()); + + // Verify deleted key is gone + let missing = + TableTraverse::::exact(&mut cursor, &test_data[1].0).unwrap(); + assert!(missing.is_none()); + } + } + + #[test] + fn test_table_traverse_typed() { + let store = MemKv::new(); + + // Setup test data using the test table + let test_data: Vec<(u64, bytes::Bytes)> = (0..5) + .map(|i| { + let key = i * 10; + let value = bytes::Bytes::from(format!("test_value_{}", i)); + (key, value) + }) + .collect(); + + // Insert data + { + let mut writer = store.writer().unwrap(); + for (key, value) in &test_data { + writer.queue_put::(key, value).unwrap(); + } + writer.raw_commit().unwrap(); + } + + // Test typed table traversal + { + let reader = store.reader().unwrap(); + let mut cursor = reader.cursor(TestTable::NAME).unwrap(); + + // Test first with type-safe operations + let first_raw = TableTraverse::::first(&mut cursor).unwrap(); + assert!(first_raw.is_some()); + let (first_key, first_value) = first_raw.unwrap(); + assert_eq!(first_key, test_data[0].0); + assert_eq!(first_value, test_data[0].1); + + // Test last + let last_raw = TableTraverse::::last(&mut cursor).unwrap(); + assert!(last_raw.is_some()); + let (last_key, last_value) = last_raw.unwrap(); + assert_eq!(last_key, test_data.last().unwrap().0); + assert_eq!(last_value, test_data.last().unwrap().1); + + // Test exact lookup + let target_key = &test_data[2].0; + let exact_value = + TableTraverse::::exact(&mut cursor, target_key).unwrap(); + assert!(exact_value.is_some()); + assert_eq!(exact_value.unwrap(), test_data[2].1); + + // Test range lookup + let range_key = 15u64; // Between entries 1 and 2 + + let range_result = + TableTraverse::::lower_bound(&mut cursor, &range_key).unwrap(); + assert!(range_result.is_some()); + let (found_key, found_value) = range_result.unwrap(); + assert_eq!(found_key, test_data[2].0); // key 20 + assert_eq!(found_value, test_data[2].1); + } + } + + #[test] + fn test_cursor_empty_table() { + let store = MemKv::new(); + + // Create an empty table first + { + let mut writer = store.writer().unwrap(); + writer.queue_raw_create(TestTable::NAME, false, false).unwrap(); + writer.raw_commit().unwrap(); + } + + { + let reader = store.reader().unwrap(); + let mut cursor = reader.cursor(TestTable::NAME).unwrap(); + + // All operations should return None on empty table + assert!(TableTraverse::::first(&mut cursor).unwrap().is_none()); + assert!(TableTraverse::::last(&mut cursor).unwrap().is_none()); + assert!(TableTraverse::::exact(&mut cursor, &42u64).unwrap().is_none()); + assert!( + TableTraverse::::lower_bound(&mut cursor, &42u64).unwrap().is_none() + ); + assert!(TableTraverse::::read_next(&mut cursor).unwrap().is_none()); + assert!(TableTraverse::::read_prev(&mut cursor).unwrap().is_none()); + } + } + + #[test] + fn test_cursor_state_management() { + let store = MemKv::new(); + + let test_data = vec![ + (1u64, Bytes::from_static(b"state_value_1")), + (2u64, Bytes::from_static(b"state_value_2")), + (3u64, Bytes::from_static(b"state_value_3")), + ]; + + { + let mut writer = store.writer().unwrap(); + for (key, value) in &test_data { + writer.queue_put::(key, value).unwrap(); + } + writer.raw_commit().unwrap(); + } + + { + let reader = store.reader().unwrap(); + let mut cursor = reader.cursor(TestTable::NAME).unwrap(); + + // Test that cursor operations maintain state correctly + + // Start at first + let first = TableTraverse::::first(&mut cursor).unwrap().unwrap(); + assert_eq!(first.0, test_data[0].0); + + // Move to second via next + let second = TableTraverse::::read_next(&mut cursor).unwrap().unwrap(); + assert_eq!(second.0, test_data[1].0); + + // Jump to last + let last = TableTraverse::::last(&mut cursor).unwrap().unwrap(); + assert_eq!(last.0, test_data[2].0); + + // Move back via prev + let back_to_second = + TableTraverse::::read_prev(&mut cursor).unwrap().unwrap(); + assert_eq!(back_to_second.0, test_data[1].0); + + // Use exact to jump to specific position + let exact_first = + TableTraverse::::exact(&mut cursor, &test_data[0].0).unwrap(); + assert!(exact_first.is_some()); + assert_eq!(exact_first.unwrap(), test_data[0].1); + + // Verify cursor is now positioned at first entry + let next_from_first = + TableTraverse::::read_next(&mut cursor).unwrap().unwrap(); + assert_eq!(next_from_first.0, test_data[1].0); + + // Use range lookup + let range_lookup = + TableTraverse::::lower_bound(&mut cursor, &1u64).unwrap().unwrap(); // Should find key 1 + assert_eq!(range_lookup.0, test_data[0].0); + + // Verify we can continue navigation from range position + let next_after_range = + TableTraverse::::read_next(&mut cursor).unwrap().unwrap(); + assert_eq!(next_after_range.0, test_data[1].0); + } + } + + #[test] + fn test_dual_key_operations() { + let store = MemKv::new(); + + // Test dual key storage and retrieval using DualTestTable + let dual_data = vec![ + (1u64, 100u32, Bytes::from_static(b"value1")), + (1u64, 200u32, Bytes::from_static(b"value2")), + (2u64, 100u32, Bytes::from_static(b"value3")), + ]; + + { + let mut writer = store.writer().unwrap(); + for (key1, key2, value) in &dual_data { + writer.queue_put_dual::(key1, key2, value).unwrap(); + } + writer.raw_commit().unwrap(); + } + + // Test dual key traversal + { + let reader = store.reader().unwrap(); + let mut cursor = reader.cursor(DualTestTable::NAME).unwrap(); + + // Test exact dual lookup + let exact_result = + DualTableTraverse::::exact_dual(&mut cursor, &1u64, &200u32) + .unwrap(); + assert!(exact_result.is_some()); + assert_eq!(exact_result.unwrap(), Bytes::from_static(b"value2")); + + // Test missing dual key + let missing_result = + DualTableTraverse::::exact_dual(&mut cursor, &3u64, &100u32) + .unwrap(); + assert!(missing_result.is_none()); + + // Test next_dual_above + let range_result = + DualTableTraverse::::next_dual_above(&mut cursor, &1u64, &150u32) + .unwrap(); + assert!(range_result.is_some()); + let (k, k2, value) = range_result.unwrap(); + assert_eq!(k, 1u64); + assert_eq!(k2, 200u32); + assert_eq!(value, Bytes::from_static(b"value2")); + + // Test next_k1 to find next different first key + let next_k1_result = + DualTableTraverse::::next_k1(&mut cursor).unwrap(); + assert!(next_k1_result.is_some()); + let (k, k2, value) = next_k1_result.unwrap(); + assert_eq!(k, 2u64); + assert_eq!(k2, 100u32); + assert_eq!(value, Bytes::from_static(b"value3")); + } + } +} diff --git a/crates/storage/src/hot/impls/mod.rs b/crates/storage/src/hot/impls/mod.rs new file mode 100644 index 0000000..7119d0a --- /dev/null +++ b/crates/storage/src/hot/impls/mod.rs @@ -0,0 +1,309 @@ +/// An in-memory key-value store implementation. +pub mod mem; + +/// MDBX-backed key-value store implementation. +pub mod mdbx; + +#[cfg(test)] +mod test { + use crate::{ + hot::{ + mem, + model::{HotDbRead, HotDbWrite, HotHistoryRead, HotHistoryWrite, HotKv, HotKvWrite}, + }, + tables::hot, + }; + use alloy::primitives::{B256, Bytes, U256, address, b256}; + use reth::primitives::{Account, Bytecode, Header, SealedHeader}; + use reth_db::BlockNumberList; + + #[test] + fn mem_conformance() { + let hot_kv = mem::MemKv::new(); + conformance(&hot_kv); + } + + #[test] + fn mdbx_conformance() { + let db = reth_db::test_utils::create_test_rw_db(); + + // Create tables from the `crate::tables::hot` module + let mut writer = db.db().writer().unwrap(); + + writer.queue_create::().unwrap(); + writer.queue_create::().unwrap(); + writer.queue_create::().unwrap(); + writer.queue_create::().unwrap(); + writer.queue_create::().unwrap(); + writer.queue_create::().unwrap(); + writer.queue_create::().unwrap(); + writer.queue_create::().unwrap(); + writer.queue_create::().unwrap(); + + writer.commit().expect("Failed to commit table creation"); + + conformance(db.db()); + } + + fn conformance(hot_kv: &T) { + test_header_roundtrip(hot_kv); + test_account_roundtrip(hot_kv); + test_storage_roundtrip(hot_kv); + test_bytecode_roundtrip(hot_kv); + test_account_history(hot_kv); + test_storage_history(hot_kv); + test_account_changes(hot_kv); + test_storage_changes(hot_kv); + test_missing_reads(hot_kv); + } + + /// Test writing and reading headers via HotDbWrite/HotDbRead + fn test_header_roundtrip(hot_kv: &T) { + let header = Header { number: 42, gas_limit: 1_000_000, ..Default::default() }; + let sealed = SealedHeader::seal_slow(header.clone()); + let hash = sealed.hash(); + + // Write header + { + let mut writer = hot_kv.writer().unwrap(); + writer.put_header(&sealed).unwrap(); + writer.commit().unwrap(); + } + + // Read header by number + { + let reader = hot_kv.reader().unwrap(); + let read_header = reader.get_header(42).unwrap(); + assert!(read_header.is_some()); + assert_eq!(read_header.unwrap().number, 42); + } + + // Read header number by hash + { + let reader = hot_kv.reader().unwrap(); + let read_number = reader.get_header_number(&hash).unwrap(); + assert!(read_number.is_some()); + assert_eq!(read_number.unwrap(), 42); + } + + // Read header by hash + { + let reader = hot_kv.reader().unwrap(); + let read_header = reader.header_by_hash(&hash).unwrap(); + assert!(read_header.is_some()); + assert_eq!(read_header.unwrap().number, 42); + } + } + + /// Test writing and reading accounts via HotDbWrite/HotDbRead + fn test_account_roundtrip(hot_kv: &T) { + let addr = address!("0x1234567890123456789012345678901234567890"); + let account = + Account { nonce: 5, balance: U256::from(1000), bytecode_hash: Some(B256::ZERO) }; + + // Write account + { + let mut writer = hot_kv.writer().unwrap(); + writer.put_account(&addr, &account).unwrap(); + writer.commit().unwrap(); + } + + // Read account + { + let reader = hot_kv.reader().unwrap(); + let read_account = reader.get_account(&addr).unwrap(); + assert!(read_account.is_some()); + let read_account = read_account.unwrap(); + assert_eq!(read_account.nonce, 5); + assert_eq!(read_account.balance, U256::from(1000)); + } + } + + /// Test writing and reading storage via HotDbWrite/HotDbRead + fn test_storage_roundtrip(hot_kv: &T) { + let addr = address!("0xabcdef0123456789abcdef0123456789abcdef01"); + let slot = b256!("0x0000000000000000000000000000000000000000000000000000000000000001"); + let value = U256::from(999); + + // Write storage + { + let mut writer = hot_kv.writer().unwrap(); + writer.put_storage(&addr, &slot, &value).unwrap(); + writer.commit().unwrap(); + } + + // Read storage + { + let reader = hot_kv.reader().unwrap(); + let read_value = reader.get_storage(&addr, &slot).unwrap(); + assert!(read_value.is_some()); + assert_eq!(read_value.unwrap(), U256::from(999)); + } + + // Read storage entry + { + let reader = hot_kv.reader().unwrap(); + let read_entry = reader.get_storage_entry(&addr, &slot).unwrap(); + assert!(read_entry.is_some()); + let entry = read_entry.unwrap(); + assert_eq!(entry.key, slot); + assert_eq!(entry.value, U256::from(999)); + } + } + + /// Test writing and reading bytecode via HotDbWrite/HotDbRead + fn test_bytecode_roundtrip(hot_kv: &T) { + let code = Bytes::from_static(&[0x60, 0x00, 0x60, 0x00, 0xf3]); // Simple EVM bytecode + let bytecode = Bytecode::new_raw(code); + let code_hash = bytecode.hash_slow(); + + // Write bytecode + { + let mut writer = hot_kv.writer().unwrap(); + writer.put_bytecode(&code_hash, &bytecode).unwrap(); + writer.commit().unwrap(); + } + + // Read bytecode + { + let reader = hot_kv.reader().unwrap(); + let read_bytecode = reader.get_bytecode(&code_hash).unwrap(); + assert!(read_bytecode.is_some()); + } + } + + /// Test account history via HotHistoryWrite/HotHistoryRead + fn test_account_history(hot_kv: &T) { + let addr = address!("0x1111111111111111111111111111111111111111"); + let touched_blocks = BlockNumberList::new([10, 20, 30]).unwrap(); + let latest_height = 100u64; + + // Write account history + { + let mut writer = hot_kv.writer().unwrap(); + writer.write_account_history(&addr, latest_height, &touched_blocks).unwrap(); + writer.commit().unwrap(); + } + + // Read account history + { + let reader = hot_kv.reader().unwrap(); + let read_history = reader.get_account_history(&addr, latest_height).unwrap(); + assert!(read_history.is_some()); + let history = read_history.unwrap(); + assert_eq!(history.iter().collect::>(), vec![10, 20, 30]); + } + } + + /// Test storage history via HotHistoryWrite/HotHistoryRead + fn test_storage_history(hot_kv: &T) { + let addr = address!("0x2222222222222222222222222222222222222222"); + let slot = b256!("0x0000000000000000000000000000000000000000000000000000000000000042"); + let touched_blocks = BlockNumberList::new([5, 15, 25]).unwrap(); + let highest_block = 50u64; + + // Write storage history + { + let mut writer = hot_kv.writer().unwrap(); + writer.write_storage_history(&addr, slot, highest_block, &touched_blocks).unwrap(); + writer.commit().unwrap(); + } + + // Read storage history + { + let reader = hot_kv.reader().unwrap(); + let read_history = reader.get_storage_history(&addr, slot, highest_block).unwrap(); + assert!(read_history.is_some()); + let history = read_history.unwrap(); + assert_eq!(history.iter().collect::>(), vec![5, 15, 25]); + } + } + + /// Test account change sets via HotHistoryWrite/HotHistoryRead + fn test_account_changes(hot_kv: &T) { + let addr = address!("0x3333333333333333333333333333333333333333"); + let pre_state = Account { nonce: 10, balance: U256::from(5000), bytecode_hash: None }; + let block_number = 100u64; + + // Write account change + { + let mut writer = hot_kv.writer().unwrap(); + writer.write_account_change(block_number, addr, &pre_state).unwrap(); + writer.commit().unwrap(); + } + + // Read account change + { + let reader = hot_kv.reader().unwrap(); + let read_change = reader.get_account_change(block_number, &addr).unwrap(); + assert!(read_change.is_some()); + let change = read_change.unwrap(); + assert_eq!(change.nonce, 10); + assert_eq!(change.balance, U256::from(5000)); + } + } + + /// Test storage change sets via HotHistoryWrite/HotHistoryRead + fn test_storage_changes(hot_kv: &T) { + let addr = address!("0x4444444444444444444444444444444444444444"); + let slot = b256!("0x0000000000000000000000000000000000000000000000000000000000000099"); + let pre_value = U256::from(12345); + let block_number = 200u64; + + // Write storage change + { + let mut writer = hot_kv.writer().unwrap(); + writer.write_storage_change(block_number, addr, &slot, &pre_value).unwrap(); + writer.commit().unwrap(); + } + + // Read storage change + { + let reader = hot_kv.reader().unwrap(); + let read_change = reader.get_storage_change(block_number, &addr, &slot).unwrap(); + assert!(read_change.is_some()); + assert_eq!(read_change.unwrap(), U256::from(12345)); + } + } + + /// Test that missing reads return None + fn test_missing_reads(hot_kv: &T) { + let missing_addr = address!("0x9999999999999999999999999999999999999999"); + let missing_hash = + b256!("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"); + let missing_slot = + b256!("0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"); + + let reader = hot_kv.reader().unwrap(); + + // Missing header + assert!(reader.get_header(999999).unwrap().is_none()); + + // Missing header number + assert!(reader.get_header_number(&missing_hash).unwrap().is_none()); + + // Missing account + assert!(reader.get_account(&missing_addr).unwrap().is_none()); + + // Missing storage + assert!(reader.get_storage(&missing_addr, &missing_slot).unwrap().is_none()); + + // Missing bytecode + assert!(reader.get_bytecode(&missing_hash).unwrap().is_none()); + + // Missing header by hash + assert!(reader.header_by_hash(&missing_hash).unwrap().is_none()); + + // Missing account history + assert!(reader.get_account_history(&missing_addr, 1000).unwrap().is_none()); + + // Missing storage history + assert!(reader.get_storage_history(&missing_addr, missing_slot, 1000).unwrap().is_none()); + + // Missing account change + assert!(reader.get_account_change(999999, &missing_addr).unwrap().is_none()); + + // Missing storage change + assert!(reader.get_storage_change(999999, &missing_addr, &missing_slot).unwrap().is_none()); + } +} diff --git a/crates/storage/src/hot/mod.rs b/crates/storage/src/hot/mod.rs new file mode 100644 index 0000000..4e73f9c --- /dev/null +++ b/crates/storage/src/hot/mod.rs @@ -0,0 +1,5 @@ +/// Hot storage models and traits. +pub mod model; + +mod impls; +pub use impls::{mdbx, mem}; diff --git a/crates/storage/src/hot/model/db_traits.rs b/crates/storage/src/hot/model/db_traits.rs new file mode 100644 index 0000000..9b08f49 --- /dev/null +++ b/crates/storage/src/hot/model/db_traits.rs @@ -0,0 +1,235 @@ +use crate::{ + hot::model::{HotKvRead, HotKvWrite}, + tables::hot::{self as tables}, +}; +use alloy::primitives::{Address, B256, U256}; +use reth::primitives::{Account, Bytecode, Header, SealedHeader, StorageEntry}; +use reth_db::{BlockNumberList, models::BlockNumberAddress}; +use reth_db_api::models::ShardedKey; + +/// Trait for database read operations. +pub trait HotDbRead: HotKvRead + sealed::Sealed { + /// Read a block header by its number. + fn get_header(&self, number: u64) -> Result, Self::Error> { + self.get::(&number) + } + + /// Read a block number by its hash. + fn get_header_number(&self, hash: &B256) -> Result, Self::Error> { + self.get::(hash) + } + + /// Read contract Bytecode by its hash. + fn get_bytecode(&self, code_hash: &B256) -> Result, Self::Error> { + self.get::(code_hash) + } + + /// Read an account by its address. + fn get_account(&self, address: &Address) -> Result, Self::Error> { + self.get::(address) + } + + /// Read a storage slot by its address and key. + fn get_storage(&self, address: &Address, key: &B256) -> Result, Self::Error> { + self.get_dual::(address, key) + } + + /// Read a [`StorageEntry`] by its address and key. + fn get_storage_entry( + &self, + address: &Address, + key: &B256, + ) -> Result, Self::Error> { + let opt = self.get_storage(address, key)?; + Ok(opt.map(|value| StorageEntry { key: *key, value })) + } + + /// Read a block header by its hash. + fn header_by_hash(&self, hash: &B256) -> Result, Self::Error> { + let Some(number) = self.get_header_number(hash)? else { + return Ok(None); + }; + self.get_header(number) + } +} + +impl HotDbRead for T where T: HotKvRead {} + +/// Trait for database write operations. This trait is low-level, and usage may +/// leave the database in an inconsistent state if not used carefully. Users +/// should prefer [`HotHistoryWrite`] or higher-level abstractions when +/// possible. +pub trait HotDbWrite: HotKvWrite + sealed::Sealed { + /// Write a block header. This will leave the DB in an inconsistent state + /// until the corresponding header number is also written. Users should + /// prefer [`Self::put_header`] instead. + fn put_header_inconsistent(&mut self, header: &Header) -> Result<(), Self::Error> { + self.queue_put::(&header.number, header) + } + + /// Write a block number by its hash. This will leave the DB in an + /// inconsistent state until the corresponding header is also written. + /// Users should prefer [`Self::put_header`] instead. + fn put_header_number_inconsistent( + &mut self, + hash: &B256, + number: u64, + ) -> Result<(), Self::Error> { + self.queue_put::(hash, &number) + } + + /// Write contract Bytecode by its hash. + fn put_bytecode(&mut self, code_hash: &B256, bytecode: &Bytecode) -> Result<(), Self::Error> { + self.queue_put::(code_hash, bytecode) + } + + /// Write an account by its address. + fn put_account(&mut self, address: &Address, account: &Account) -> Result<(), Self::Error> { + self.queue_put::(address, account) + } + + /// Write a storage entry by its address and key. + fn put_storage( + &mut self, + address: &Address, + key: &B256, + entry: &U256, + ) -> Result<(), Self::Error> { + self.queue_put_dual::(address, key, entry) + } + + /// Write a sealed block header (header + number). + fn put_header(&mut self, header: &SealedHeader) -> Result<(), Self::Error> { + self.put_header_inconsistent(header.header()) + .and_then(|_| self.put_header_number_inconsistent(&header.hash(), header.number)) + } + + /// Commit the write transaction. + fn commit(self) -> Result<(), Self::Error> + where + Self: Sized, + { + HotKvWrite::raw_commit(self) + } +} + +impl HotDbWrite for T where T: HotKvWrite {} + +/// Trait for history read operations. +pub trait HotHistoryRead: HotDbRead { + /// Get the list of block numbers where an account was touched. + /// Get the list of block numbers where an account was touched. + fn get_account_history( + &self, + address: &Address, + latest_height: u64, + ) -> Result, Self::Error> { + self.get_dual::(address, &latest_height) + } + /// Get the account change (pre-state) for an account at a specific block. + /// + /// If the return value is `None`, the account was not changed in that + /// block. + fn get_account_change( + &self, + block_number: u64, + address: &Address, + ) -> Result, Self::Error> { + self.get_dual::(&block_number, address) + } + + /// Get the storage history for an account and storage slot. The returned + /// list will contain block numbers where the storage slot was changed. + fn get_storage_history( + &self, + address: &Address, + slot: B256, + highest_block_number: u64, + ) -> Result, Self::Error> { + let sharded_key = ShardedKey::new(slot, highest_block_number); + self.get_dual::(address, &sharded_key) + } + + /// Get the storage change (before state) for a specific storage slot at a + /// specific block. + /// + /// If the return value is `None`, the storage slot was not changed in that + /// block. If the return value is `Some(value)`, the value is the pre-state + /// of the storage slot before the change in that block. If the value is + /// `U256::ZERO`, that indicates that the storage slot was not set before + /// the change. + fn get_storage_change( + &self, + block_number: u64, + address: &Address, + slot: &B256, + ) -> Result, Self::Error> { + let block_number_address = BlockNumberAddress((block_number, *address)); + self.get_dual::(&block_number_address, slot) + } +} + +impl HotHistoryRead for T where T: HotDbRead {} + +/// Trait for history write operations. +pub trait HotHistoryWrite: HotDbWrite { + /// Maintain a list of block numbers where an account was touched. + /// + /// Accounts are keyed + fn write_account_history( + &mut self, + address: &Address, + latest_height: u64, + touched: &BlockNumberList, + ) -> Result<(), Self::Error> { + self.queue_put_dual::(address, &latest_height, touched) + } + + /// Write an account change (pre-state) for an account at a specific + /// block. + fn write_account_change( + &mut self, + block_number: u64, + address: Address, + pre_state: &Account, + ) -> Result<(), Self::Error> { + self.queue_put_dual::(&block_number, &address, pre_state) + } + + /// Write storage history, by highest block number and touched block + /// numbers. + fn write_storage_history( + &mut self, + address: &Address, + slot: B256, + highest_block_number: u64, + touched: &BlockNumberList, + ) -> Result<(), Self::Error> { + let sharded_key = ShardedKey::new(slot, highest_block_number); + self.queue_put_dual::(address, &sharded_key, touched) + } + + /// Write a storage change (before state) for an account at a specific + /// block. + fn write_storage_change( + &mut self, + block_number: u64, + address: Address, + slot: &B256, + value: &U256, + ) -> Result<(), Self::Error> { + let block_number_address = BlockNumberAddress((block_number, address)); + self.queue_put_dual::(&block_number_address, slot, value) + } +} + +impl HotHistoryWrite for T where T: HotDbWrite + HotKvWrite {} + +mod sealed { + use crate::hot::model::HotKvRead; + + /// Sealed trait to prevent external implementations of HotDbReader and HotDbWriter. + #[allow(dead_code, unreachable_pub)] + pub trait Sealed {} + impl Sealed for T where T: HotKvRead {} +} diff --git a/crates/storage/src/hot/model/error.rs b/crates/storage/src/hot/model/error.rs new file mode 100644 index 0000000..8dd0924 --- /dev/null +++ b/crates/storage/src/hot/model/error.rs @@ -0,0 +1,42 @@ +use crate::ser::DeserError; + +/// Trait for hot storage read/write errors. +#[derive(thiserror::Error, Debug)] +pub enum HotKvError { + /// Boxed error. Indicates an issue with the DB backend. + #[error(transparent)] + Inner(#[from] Box), + + /// Deserialization error. Indicates an issue deserializing a key or value. + #[error("Deserialization error: {0}")] + Deser(#[from] crate::ser::DeserError), + + /// Indicates that a write transaction is already in progress. + #[error("A write transaction is already in progress")] + WriteLocked, +} + +impl HotKvError { + /// Internal helper to create a `HotKvError::Inner` from any error. + pub fn from_err(err: E) -> Self + where + E: std::error::Error + Send + Sync + 'static, + { + HotKvError::Inner(Box::new(err)) + } +} + +/// Trait to convert specific read errors into `HotKvError`. +pub trait HotKvReadError: std::error::Error + From + Send + Sync + 'static { + /// Convert the error into a `HotKvError`. + fn into_hot_kv_error(self) -> HotKvError; +} + +impl HotKvReadError for HotKvError { + fn into_hot_kv_error(self) -> HotKvError { + self + } +} + +/// Result type for hot storage operations. +pub type HotKvResult = Result; diff --git a/crates/storage/src/hot/model/mod.rs b/crates/storage/src/hot/model/mod.rs new file mode 100644 index 0000000..f499db7 --- /dev/null +++ b/crates/storage/src/hot/model/mod.rs @@ -0,0 +1,38 @@ +mod db_traits; +pub use db_traits::{HotDbRead, HotDbWrite, HotHistoryRead, HotHistoryWrite}; + +mod error; +pub use error::{HotKvError, HotKvReadError, HotKvResult}; + +mod revm; +pub use revm::{RevmRead, RevmWrite}; + +mod traits; +pub use traits::{HotKv, HotKvRead, HotKvWrite}; + +mod traverse; +pub use traverse::{ + DualKeyedTraverse, DualTableCursor, DualTableTraverse, KvTraverse, KvTraverseMut, TableCursor, + TableTraverse, TableTraverseMut, +}; + +use crate::tables::{DualKeyed, Table}; +use std::borrow::Cow; + +/// A key-value pair from a table. +pub type GetManyItem<'a, T> = (&'a ::Key, Option<::Value>); + +/// A key-value tuple from a table. +pub type KeyValue = (::Key, ::Value); + +/// A raw key-value pair. +pub type RawKeyValue<'a> = (Cow<'a, [u8]>, RawValue<'a>); + +/// A raw value. +pub type RawValue<'a> = Cow<'a, [u8]>; + +/// A raw dual key-value tuple. +pub type RawDualKeyValue<'a> = (Cow<'a, [u8]>, RawValue<'a>, RawValue<'a>); + +/// A dual key-value tuple from a table. +pub type DualKeyValue = (::Key, ::Key2, ::Value); diff --git a/crates/storage/src/hot/model/revm.rs b/crates/storage/src/hot/model/revm.rs new file mode 100644 index 0000000..598da12 --- /dev/null +++ b/crates/storage/src/hot/model/revm.rs @@ -0,0 +1,762 @@ +use crate::{ + hot::model::{GetManyItem, HotKvError, HotKvRead, HotKvWrite}, + tables::{ + DualKeyed, SingleKey, Table, + hot::{self, Bytecodes, PlainAccountState}, + }, +}; +use alloy::primitives::{Address, B256, KECCAK256_EMPTY}; +use core::fmt; +use reth::primitives::Account; +use std::borrow::Cow; +use trevm::revm::{ + database::{DBErrorMarker, Database, DatabaseRef, TryDatabaseCommit}, + primitives::{HashMap, StorageKey, StorageValue}, + state::{self, AccountInfo, Bytecode as RevmBytecode}, +}; + +// Error marker implementation +impl DBErrorMarker for HotKvError {} + +/// Read-only [`Database`] and [`DatabaseRef`] adapter. +pub struct RevmRead { + reader: T, +} + +impl RevmRead { + /// Create a new read adapter + pub const fn new(reader: T) -> Self { + Self { reader } + } +} + +impl fmt::Debug for RevmRead { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RevmRead").finish() + } +} + +// HotKvRead implementation for RevmRead +impl HotKvRead for RevmRead { + type Error = U::Error; + + type Traverse<'a> + = U::Traverse<'a> + where + U: 'a; + + fn raw_traverse<'a>(&'a self, table: &str) -> Result, Self::Error> { + self.reader.raw_traverse(table) + } + + fn raw_get<'a>( + &'a self, + table: &str, + key: &[u8], + ) -> Result>, Self::Error> { + self.reader.raw_get(table, key) + } + + fn raw_get_dual<'a>( + &'a self, + table: &str, + key1: &[u8], + key2: &[u8], + ) -> Result>, Self::Error> { + self.reader.raw_get_dual(table, key1, key2) + } + + fn get(&self, key: &T::Key) -> Result, Self::Error> { + self.reader.get::(key) + } + + fn get_dual( + &self, + key1: &T::Key, + key2: &T::Key2, + ) -> Result, Self::Error> { + self.reader.get_dual::(key1, key2) + } + + fn get_many<'a, T, I>(&self, keys: I) -> Result>, Self::Error> + where + T::Key: 'a, + T: SingleKey, + I: IntoIterator, + { + self.reader.get_many::(keys) + } +} + +/// Read-write REVM database adapter. This adapter allows committing changes. +/// Despite the naming of [`TryDatabaseCommit::try_commit`], the changes are +/// only persisted when [`Self::persist`] is called. This is because of a +/// mismatch in semantics between the two systems. +pub struct RevmWrite { + writer: U, +} + +impl RevmWrite { + /// Create a new write adapter + pub const fn new(writer: U) -> Self { + Self { writer } + } + + /// Persist the changes made in this write transaction. + pub fn persist(self) -> Result<(), U::Error> { + self.writer.raw_commit() + } +} + +impl fmt::Debug for RevmWrite { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("RevmWrite").finish() + } +} + +// HotKvWrite implementation for RevmWrite +impl HotKvRead for RevmWrite { + type Error = U::Error; + + type Traverse<'a> + = U::Traverse<'a> + where + U: 'a; + + fn raw_traverse<'a>(&'a self, table: &str) -> Result, Self::Error> { + self.writer.raw_traverse(table) + } + + fn raw_get<'a>( + &'a self, + table: &str, + key: &[u8], + ) -> Result>, Self::Error> { + self.writer.raw_get(table, key) + } + + fn raw_get_dual<'a>( + &'a self, + table: &str, + key1: &[u8], + key2: &[u8], + ) -> Result>, Self::Error> { + self.writer.raw_get_dual(table, key1, key2) + } + + fn get(&self, key: &T::Key) -> Result, Self::Error> { + self.writer.get::(key) + } + + fn get_dual( + &self, + key1: &T::Key, + key2: &T::Key2, + ) -> Result, Self::Error> { + self.writer.get_dual::(key1, key2) + } + + fn get_many<'a, T, I>(&self, keys: I) -> Result>, Self::Error> + where + T::Key: 'a, + T: SingleKey, + I: IntoIterator, + { + self.writer.get_many::(keys) + } +} + +impl HotKvWrite for RevmWrite { + type TraverseMut<'a> + = U::TraverseMut<'a> + where + U: 'a; + + fn raw_traverse_mut<'a>( + &'a mut self, + table: &str, + ) -> Result, Self::Error> { + self.writer.raw_traverse_mut(table) + } + + fn queue_raw_put(&mut self, table: &str, key: &[u8], value: &[u8]) -> Result<(), Self::Error> { + self.writer.queue_raw_put(table, key, value) + } + + fn queue_raw_put_dual( + &mut self, + table: &str, + key1: &[u8], + key2: &[u8], + value: &[u8], + ) -> Result<(), Self::Error> { + self.writer.queue_raw_put_dual(table, key1, key2, value) + } + + fn queue_raw_delete(&mut self, table: &str, key: &[u8]) -> Result<(), Self::Error> { + self.writer.queue_raw_delete(table, key) + } + + fn queue_raw_clear(&mut self, table: &str) -> Result<(), Self::Error> { + self.writer.queue_raw_clear(table) + } + + fn queue_raw_create( + &mut self, + table: &str, + dual_key: bool, + dual_fixed: bool, + ) -> Result<(), Self::Error> { + self.writer.queue_raw_create(table, dual_key, dual_fixed) + } + + fn raw_commit(self) -> Result<(), Self::Error> { + self.writer.raw_commit() + } + + fn queue_put( + &mut self, + key: &T::Key, + value: &T::Value, + ) -> Result<(), Self::Error> { + self.writer.queue_put::(key, value) + } + + fn queue_put_dual( + &mut self, + key1: &T::Key, + key2: &T::Key2, + value: &T::Value, + ) -> Result<(), Self::Error> { + self.writer.queue_put_dual::(key1, key2, value) + } + + fn queue_delete(&mut self, key: &T::Key) -> Result<(), Self::Error> { + self.writer.queue_delete::(key) + } + + fn queue_put_many<'a, 'b, T, I>(&mut self, entries: I) -> Result<(), Self::Error> + where + T: SingleKey, + T::Key: 'a, + T::Value: 'b, + I: IntoIterator, + { + self.writer.queue_put_many::(entries) + } + + fn queue_create(&mut self) -> Result<(), Self::Error> + where + T: Table, + { + self.writer.queue_create::() + } + + fn queue_clear(&mut self) -> Result<(), Self::Error> + where + T: Table, + { + self.writer.queue_clear::() + } +} + +// DatabaseRef implementation for RevmRead +impl DatabaseRef for RevmRead +where + T::Error: DBErrorMarker, +{ + type Error = T::Error; + + fn basic_ref(&self, address: Address) -> Result, Self::Error> { + let account_opt = self.reader.get::(&address)?; + + let Some(account) = account_opt else { + return Ok(None); + }; + + let code_hash = account.bytecode_hash.unwrap_or(KECCAK256_EMPTY); + let code = if code_hash != KECCAK256_EMPTY { + self.reader.get::(&code_hash)?.map(|b| b.0) + } else { + None + }; + + Ok(Some(AccountInfo { balance: account.balance, nonce: account.nonce, code_hash, code })) + } + + fn code_by_hash_ref(&self, code_hash: B256) -> Result { + Ok(self.reader.get::(&code_hash)?.map(|bytecode| bytecode.0).unwrap_or_default()) + } + + fn storage_ref( + &self, + address: Address, + index: StorageKey, + ) -> Result { + let key = B256::from_slice(&index.to_be_bytes::<32>()); + + Ok(self.reader.get_dual::(&address, &key)?.unwrap_or_default()) + } + + fn block_hash_ref(&self, _number: u64) -> Result { + // This would need to be implemented based on your block hash storage + // For now, return zero hash + Ok(B256::ZERO) + } +} + +// Database implementation for RevmRead +impl Database for RevmRead +where + T::Error: DBErrorMarker, +{ + type Error = T::Error; + + fn basic(&mut self, address: Address) -> Result, Self::Error> { + self.basic_ref(address) + } + + fn code_by_hash(&mut self, code_hash: B256) -> Result { + self.code_by_hash_ref(code_hash) + } + + fn storage( + &mut self, + address: Address, + index: StorageKey, + ) -> Result { + self.storage_ref(address, index) + } + + fn block_hash(&mut self, number: u64) -> Result { + self.block_hash_ref(number) + } +} + +// DatabaseRef implementation for RevmWrite (delegates to read operations) +impl DatabaseRef for RevmWrite +where + T::Error: DBErrorMarker, +{ + type Error = T::Error; + + fn basic_ref(&self, address: Address) -> Result, Self::Error> { + let account_opt = self.writer.get::(&address)?; + + let Some(account) = account_opt else { + return Ok(None); + }; + + let code_hash = account.bytecode_hash.unwrap_or(KECCAK256_EMPTY); + let code = if code_hash != KECCAK256_EMPTY { + self.writer.get::(&code_hash)?.map(|b| b.0) + } else { + None + }; + + Ok(Some(AccountInfo { balance: account.balance, nonce: account.nonce, code_hash, code })) + } + + fn code_by_hash_ref(&self, code_hash: B256) -> Result { + Ok(self.writer.get::(&code_hash)?.map(|bytecode| bytecode.0).unwrap_or_default()) + } + + fn storage_ref( + &self, + address: Address, + index: StorageKey, + ) -> Result { + let key = B256::from_slice(&index.to_be_bytes::<32>()); + Ok(self.writer.get_dual::(&address, &key)?.unwrap_or_default()) + } + + fn block_hash_ref(&self, _number: u64) -> Result { + // This would need to be implemented based on your block hash storage + // For now, return zero hash + Ok(B256::ZERO) + } +} + +// Database implementation for RevmWrite +impl Database for RevmWrite +where + T::Error: DBErrorMarker, +{ + type Error = T::Error; + + fn basic(&mut self, address: Address) -> Result, Self::Error> { + self.basic_ref(address) + } + + fn code_by_hash(&mut self, code_hash: B256) -> Result { + self.code_by_hash_ref(code_hash) + } + + fn storage( + &mut self, + address: Address, + index: StorageKey, + ) -> Result { + self.storage_ref(address, index) + } + + fn block_hash(&mut self, number: u64) -> Result { + self.block_hash_ref(number) + } +} + +// TryDatabaseCommit implementation for RevmWrite +impl TryDatabaseCommit for RevmWrite +where + T::Error: DBErrorMarker, +{ + type Error = T::Error; + + fn try_commit(&mut self, changes: HashMap) -> Result<(), Self::Error> { + for (address, account) in changes { + // Handle account info changes + let account_data = Account { + nonce: account.info.nonce, + balance: account.info.balance, + bytecode_hash: (account.info.code_hash != KECCAK256_EMPTY) + .then_some(account.info.code_hash), + }; + self.writer.queue_put::(&address, &account_data)?; + + // Handle storage changes + for (key, value) in account.storage { + let key = B256::from_slice(&key.to_be_bytes::<32>()); + self.writer.queue_put_dual::( + &address, + &key, + &value.present_value(), + )?; + } + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + hot::{ + mem::MemKv, + model::{HotKv, HotKvRead, HotKvWrite}, + }, + tables::hot::{Bytecodes, PlainAccountState}, + }; + use alloy::primitives::{Address, B256, U256}; + use reth::primitives::{Account, Bytecode}; + use trevm::revm::{ + database::{Database, DatabaseRef, TryDatabaseCommit}, + primitives::{HashMap, StorageKey, StorageValue}, + state::{Account as RevmAccount, AccountInfo, Bytecode as RevmBytecode}, + }; + + /// Create a test account with some data + fn create_test_account() -> (Address, Account) { + let address = Address::from_slice(&[0x1; 20]); + let account = Account { + nonce: 42, + balance: U256::from(1000u64), + bytecode_hash: Some(B256::from_slice(&[0x2; 32])), + }; + (address, account) + } + + /// Create test bytecode + fn create_test_bytecode() -> (B256, Bytecode) { + let hash = B256::from_slice(&[0x2; 32]); + let code = RevmBytecode::new_raw(vec![0x60, 0x80, 0x60, 0x40].into()); + let bytecode = Bytecode(code); + (hash, bytecode) + } + + #[test] + fn test_database_ref_traits() -> Result<(), Box> { + let mem_kv = MemKv::default(); + + let (address, account) = create_test_account(); + let (hash, bytecode) = create_test_bytecode(); + + { + // Setup data using HotKv + let mut writer = mem_kv.revm_writer()?; + writer.queue_put::(&address, &account)?; + writer.queue_put::(&hash, &bytecode)?; + writer.persist()?; + } + + { + // Read using REVM DatabaseRef traits + let reader = mem_kv.revm_reader()?; + + // Test basic_ref + let account_info = reader.basic_ref(address)?; + assert!(account_info.is_some()); + let info = account_info.unwrap(); + assert_eq!(info.nonce, 42); + assert_eq!(info.balance, U256::from(1000u64)); + assert_eq!(info.code_hash, hash); + + // Test code_by_hash_ref + let retrieved_code = reader.code_by_hash_ref(hash)?; + assert_eq!(retrieved_code, bytecode.0); + + // Test storage_ref (should be zero for non-existent storage) + let storage_val = reader.storage_ref(address, StorageKey::from(U256::from(123u64)))?; + assert_eq!(storage_val, U256::ZERO); + + // Test block_hash_ref + let block_hash = reader.block_hash_ref(123)?; + assert_eq!(block_hash, B256::ZERO); + } + + Ok(()) + } + + #[test] + fn test_database_mutable_traits() -> Result<(), Box> { + let mem_kv = MemKv::default(); + + let (address, account) = create_test_account(); + let (hash, bytecode) = create_test_bytecode(); + + { + // Setup data using HotKv + let mut writer = mem_kv.revm_writer()?; + writer.queue_put::(&address, &account)?; + writer.queue_put::(&hash, &bytecode)?; + writer.persist()?; + } + + { + // Read using mutable REVM Database traits + let mut reader = mem_kv.revm_reader()?; + + // Test basic + let account_info = reader.basic(address)?; + assert!(account_info.is_some()); + let info = account_info.unwrap(); + assert_eq!(info.nonce, 42); + assert_eq!(info.balance, U256::from(1000u64)); + + // Test code_by_hash + let retrieved_code = reader.code_by_hash(hash)?; + assert_eq!(retrieved_code, bytecode.0); + + // Test storage + let storage_val = reader.storage(address, StorageKey::from(U256::from(123u64)))?; + assert_eq!(storage_val, U256::ZERO); + + // Test block_hash + let block_hash = reader.block_hash(123)?; + assert_eq!(block_hash, B256::ZERO); + } + + Ok(()) + } + + #[test] + fn test_write_database_traits() -> Result<(), Box> { + let mem_kv = MemKv::default(); + + let (address, account) = create_test_account(); + let (hash, bytecode) = create_test_bytecode(); + + { + // Setup initial data + let mut writer = mem_kv.revm_writer()?; + writer.queue_put::(&address, &account)?; + writer.queue_put::(&hash, &bytecode)?; + writer.persist()?; + } + + { + // Test write operations using DatabaseRef and Database traits + let mut writer = mem_kv.revm_writer()?; + + // Test read operations work on writer + let account_info = writer.basic_ref(address)?; + assert!(account_info.is_some()); + + let account_info_mut = writer.basic(address)?; + assert!(account_info_mut.is_some()); + + let code = writer.code_by_hash_ref(hash)?; + assert_eq!(code, bytecode.0); + + let code_mut = writer.code_by_hash(hash)?; + assert_eq!(code_mut, bytecode.0); + + // Don't persist this writer to test that reads work + } + + Ok(()) + } + + #[test] + fn test_try_database_commit() -> Result<(), Box> { + let mem_kv = MemKv::default(); + + let address = Address::from_slice(&[0x1; 20]); + + { + let mut writer = mem_kv.revm_writer()?; + + // Create REVM state changes + let mut changes = HashMap::default(); + let account_info = AccountInfo { + nonce: 55, + balance: U256::from(2000u64), + code_hash: KECCAK256_EMPTY, + code: None, + }; + + let mut storage = HashMap::default(); + storage.insert( + StorageKey::from(U256::from(100u64)), + trevm::revm::state::EvmStorageSlot::new(U256::from(200u64), 0), + ); + + let revm_account = RevmAccount { + info: account_info, + storage, + status: trevm::revm::state::AccountStatus::Touched, + transaction_id: 0, + }; + + changes.insert(address, revm_account); + + // Commit changes using REVM trait + writer.try_commit(changes)?; + writer.persist()?; + } + + { + // Verify changes were persisted using HotKv traits + let reader = mem_kv.revm_reader()?; + + let account: Option = reader.get::(&address)?; + assert!(account.is_some()); + let acc = account.unwrap(); + assert_eq!(acc.nonce, 55); + assert_eq!(acc.balance, U256::from(2000u64)); + assert_eq!(acc.bytecode_hash, None); + + let key = B256::with_last_byte(100); + let storage_val: Option = + reader.get_dual::(&address, &key)?; + assert_eq!(storage_val, Some(U256::from(200u64))); + } + + Ok(()) + } + + #[test] + fn test_mixed_usage_patterns() -> Result<(), Box> { + let mem_kv = MemKv::default(); + + let address1 = Address::from_slice(&[0x1; 20]); + let address2 = Address::from_slice(&[0x2; 20]); + + // Write some data using HotKv + { + let mut writer = mem_kv.revm_writer()?; + let account = Account { nonce: 10, balance: U256::from(500u64), bytecode_hash: None }; + writer.queue_put::(&address1, &account)?; + writer.persist()?; + } + + // Write more data using REVM traits + { + let mut writer = mem_kv.revm_writer()?; + let mut changes = HashMap::default(); + let revm_account = RevmAccount { + info: AccountInfo { + nonce: 20, + balance: U256::from(1500u64), + code_hash: KECCAK256_EMPTY, + code: None, + }, + storage: HashMap::default(), + status: trevm::revm::state::AccountStatus::Touched, + transaction_id: 0, + }; + changes.insert(address2, revm_account); + writer.try_commit(changes)?; + writer.persist()?; + } + + // Read using mixed approaches + { + let reader = mem_kv.revm_reader()?; + + // Read address1 using HotKv + let account1: Option = reader.get::(&address1)?; + assert!(account1.is_some()); + assert_eq!(account1.unwrap().nonce, 10); + + // Read address2 using REVM DatabaseRef + let account2_info = reader.basic_ref(address2)?; + assert!(account2_info.is_some()); + assert_eq!(account2_info.unwrap().nonce, 20); + } + + Ok(()) + } + + #[test] + fn test_error_handling() -> Result<(), Box> { + let mem_kv = MemKv::default(); + + let address = Address::from_slice(&[0x1; 20]); + let hash = B256::from_slice(&[0x99; 32]); + + let reader = mem_kv.revm_reader()?; + + // Test reading non-existent account + let account_info = reader.basic_ref(address)?; + assert!(account_info.is_none()); + + // Test reading non-existent code + let code = reader.code_by_hash_ref(hash)?; + assert!(code.is_empty()); + + // Test reading non-existent storage + let storage = reader.storage_ref(address, StorageKey::from(U256::from(123u64)))?; + assert_eq!(storage, U256::ZERO); + + Ok(()) + } + + #[test] + fn test_concurrent_readers() -> Result<(), Box> { + let mem_kv = MemKv::default(); + + let (address, account) = create_test_account(); + + // Setup data + { + let mut writer = mem_kv.revm_writer()?; + writer.queue_put::(&address, &account)?; + writer.persist()?; + } + + // Create multiple readers + let reader1 = mem_kv.revm_reader()?; + let reader2 = mem_kv.revm_reader()?; + + // Both should read the same data + let account1 = reader1.basic_ref(address)?; + let account2 = reader2.basic_ref(address)?; + + assert_eq!(account1, account2); + assert!(account1.is_some()); + + Ok(()) + } +} diff --git a/crates/storage/src/hot/model/traits.rs b/crates/storage/src/hot/model/traits.rs new file mode 100644 index 0000000..07942b8 --- /dev/null +++ b/crates/storage/src/hot/model/traits.rs @@ -0,0 +1,345 @@ +use crate::{ + hot::model::{ + DualTableCursor, GetManyItem, HotKvError, HotKvReadError, KvTraverse, KvTraverseMut, + TableCursor, + revm::{RevmRead, RevmWrite}, + }, + ser::{KeySer, MAX_KEY_SIZE, ValSer}, + tables::{DualKeyed, SingleKey, Table}, +}; +use std::borrow::Cow; + +/// Trait for hot storage. This is a KV store with read/write transactions. +#[auto_impl::auto_impl(&, Arc, Box)] +pub trait HotKv { + /// The read-only transaction type. + type RoTx: HotKvRead; + /// The read-write transaction type. + type RwTx: HotKvWrite; + + /// Create a read-only transaction. + fn reader(&self) -> Result; + + /// Create a read-only transaction, and wrap it in an adapter for the + /// revm [`DatabaseRef`] trait. The resulting reader can be used directly + /// with [`trevm`] and [`revm`]. + /// + /// [`DatabaseRef`]: trevm::revm::database::DatabaseRef + fn revm_reader(&self) -> Result, HotKvError> { + self.reader().map(RevmRead::new) + } + + /// Create a read-write transaction. + /// + /// This is allowed to fail with [`Err(HotKvError::WriteLocked)`] if + /// multiple write transactions are not supported concurrently. + /// + /// # Returns + /// + /// - `Ok(Some(tx))` if the write transaction was created successfully. + /// - [`Err(HotKvError::WriteLocked)`] if there is already a write + /// transaction in progress. + /// - [`Err(HotKvError::Inner)`] if there was an error creating the + /// transaction. + /// + /// [`Err(HotKvError::Inner)`]: HotKvError::Inner + /// [`Err(HotKvError::WriteLocked)`]: HotKvError::WriteLocked + fn writer(&self) -> Result; + + /// Create a read-write transaction, and wrap it in an adapter for the + /// revm [`TryDatabaseCommit`] trait. The resulting writer can be used + /// directly with [`trevm`] and [`revm`]. + /// + /// + /// [`revm`]: trevm::revm + /// [`TryDatabaseCommit`]: trevm::revm::database::TryDatabaseCommit + fn revm_writer(&self) -> Result, HotKvError> { + self.writer().map(RevmWrite::new) + } +} + +/// Trait for hot storage read transactions. +#[auto_impl::auto_impl(&, Arc, Box)] +pub trait HotKvRead { + /// Error type for read operations. + type Error: HotKvReadError; + + /// The cursor type for traversing key-value pairs. + type Traverse<'a>: KvTraverse + where + Self: 'a; + + /// Get a raw cursor to traverse the database. + fn raw_traverse<'a>(&'a self, table: &str) -> Result, Self::Error>; + + /// Get a raw value from a specific table. + /// + /// The `key` buf must be <= [`MAX_KEY_SIZE`] bytes. Implementations are + /// allowed to panic if this is not the case. + /// + /// If the table is dual-keyed, the output MAY be implementation-defined. + fn raw_get<'a>(&'a self, table: &str, key: &[u8]) + -> Result>, Self::Error>; + + /// Get a raw value from a specific table with dual keys. + /// + /// If `key1` is present, but `key2` is not in the table, the output is + /// implementation-defined. For sorted databases, it SHOULD return the value + /// of the NEXT populated key. It MAY also return `None`, even if other + /// subkeys are populated. + /// + /// If the table is not dual-keyed, the output MAY be + /// implementation-defined. + fn raw_get_dual<'a>( + &'a self, + table: &str, + key1: &[u8], + key2: &[u8], + ) -> Result>, Self::Error>; + + /// Traverse a specific table. Returns a typed cursor wrapper. + fn traverse<'a, T: SingleKey>( + &'a self, + ) -> Result, T, Self::Error>, Self::Error> { + let cursor = self.raw_traverse(T::NAME)?; + Ok(TableCursor::new(cursor)) + } + + /// Traverse a specific dual-keyed table. Returns a typed dual-keyed + /// cursor wrapper. + fn traverse_dual<'a, T: DualKeyed>( + &'a self, + ) -> Result, T, Self::Error>, Self::Error> { + let cursor = self.raw_traverse(T::NAME)?; + Ok(DualTableCursor::new(cursor)) + } + + /// Get a value from a specific table. + fn get(&self, key: &T::Key) -> Result, Self::Error> { + let mut key_buf = [0u8; MAX_KEY_SIZE]; + let key_bytes = key.encode_key(&mut key_buf); + debug_assert!( + key_bytes.len() == T::Key::SIZE, + "Encoded key length does not match expected size" + ); + + let Some(value_bytes) = self.raw_get(T::NAME, key_bytes)? else { + return Ok(None); + }; + T::Value::decode_value(&value_bytes).map(Some).map_err(Into::into) + } + + /// Get a value from a specific dual-keyed table. + /// + /// If `key1` is present, but `key2` is not in the table, the output is + /// implementation-defined. For sorted databases, it SHOULD return the value + /// of the NEXT populated key. It MAY also return `None`, even if other + /// subkeys are populated. + /// + /// If the table is not dual-keyed, the output MAY be + /// implementation-defined. + fn get_dual( + &self, + key1: &T::Key, + key2: &T::Key2, + ) -> Result, Self::Error> { + let mut key1_buf = [0u8; MAX_KEY_SIZE]; + let mut key2_buf = [0u8; MAX_KEY_SIZE]; + + let key1_bytes = key1.encode_key(&mut key1_buf); + let key2_bytes = key2.encode_key(&mut key2_buf); + + let Some(value_bytes) = self.raw_get_dual(T::NAME, key1_bytes, key2_bytes)? else { + return Ok(None); + }; + T::Value::decode_value(&value_bytes).map(Some).map_err(Into::into) + } + + /// Get many values from a specific table. + /// + /// # Arguments + /// + /// * `keys` - An iterator over keys to retrieve. + /// + /// # Returns + /// + /// A vector of [`KeyValue`] where each element + /// corresponds to the value for the respective key in the input iterator. + /// If a key does not exist in the table, the corresponding element will be + /// `None`. + /// + /// Implementations ARE NOT required to preserve the order of the input + /// keys in the output vector. Users should not rely on any specific + /// ordering. + /// + /// If any error occurs during retrieval or deserialization, the entire + /// operation will return an error. + fn get_many<'a, T, I>(&self, keys: I) -> Result>, Self::Error> + where + T::Key: 'a, + T: SingleKey, + I: IntoIterator, + { + let mut key_buf = [0u8; MAX_KEY_SIZE]; + + keys.into_iter() + .map(|key| (key, self.raw_get(T::NAME, key.encode_key(&mut key_buf)))) + .map(|(key, maybe_val)| { + maybe_val + .and_then(|val| ValSer::maybe_decode_value(val.as_deref()).map_err(Into::into)) + .map(|res| (key, res)) + }) + .collect() + } +} + +/// Trait for hot storage write transactions. +pub trait HotKvWrite: HotKvRead { + /// The mutable cursor type for traversing key-value pairs. + type TraverseMut<'a>: KvTraverseMut + where + Self: 'a; + + /// Get a raw mutable cursor to traverse the database. + fn raw_traverse_mut<'a>( + &'a mut self, + table: &str, + ) -> Result, Self::Error>; + + /// Queue a raw put operation. + /// + /// The `key` buf must be <= [`MAX_KEY_SIZE`] bytes. Implementations are + /// allowed to panic if this is not the case. + fn queue_raw_put(&mut self, table: &str, key: &[u8], value: &[u8]) -> Result<(), Self::Error>; + + /// Queue a raw put operation for a dual-keyed table. + //// + /// The `key1` and `key2` buf must be <= [`MAX_KEY_SIZE`] bytes. + /// Implementations are allowed to panic if this is not the case. + fn queue_raw_put_dual( + &mut self, + table: &str, + key1: &[u8], + key2: &[u8], + value: &[u8], + ) -> Result<(), Self::Error>; + + /// Queue a raw delete operation. + /// + /// The `key` buf must be <= [`MAX_KEY_SIZE`] bytes. Implementations are + /// allowed to panic if this is not the case. + fn queue_raw_delete(&mut self, table: &str, key: &[u8]) -> Result<(), Self::Error>; + + /// Queue a raw clear operation for a specific table. + fn queue_raw_clear(&mut self, table: &str) -> Result<(), Self::Error>; + + /// Queue a raw create operation for a specific table. + /// + /// This abstraction supports two table specializations: + /// 1. `dual_key`: whether the table uses dual keys (interior maps, called + /// `DUPSORT` in LMDB/MDBX). + /// 2. `fixed_val`: whether the table has fixed-size values. + /// + /// Database implementations can use this information for optimizations. + fn queue_raw_create( + &mut self, + table: &str, + dual_key: bool, + fixed_val: bool, + ) -> Result<(), Self::Error>; + + /// Traverse a specific table. Returns a mutable typed cursor wrapper. + /// If invoked for a dual-keyed table, it will traverse the primary keys + /// only, and the return value may be implementation-defined. + fn traverse_mut<'a, T: SingleKey>( + &'a mut self, + ) -> Result, T, Self::Error>, Self::Error> { + let cursor = self.raw_traverse_mut(T::NAME)?; + Ok(TableCursor::new(cursor)) + } + + /// Traverse a specific dual-keyed table. Returns a mutable typed + /// dual-keyed cursor wrapper. + fn traverse_dual_mut<'a, T: DualKeyed>( + &'a mut self, + ) -> Result, T, Self::Error>, Self::Error> { + let cursor = self.raw_traverse_mut(T::NAME)?; + Ok(DualTableCursor::new(cursor)) + } + + /// Queue a put operation for a specific table. + fn queue_put( + &mut self, + key: &T::Key, + value: &T::Value, + ) -> Result<(), Self::Error> { + let mut key_buf = [0u8; MAX_KEY_SIZE]; + let key_bytes = key.encode_key(&mut key_buf); + let value_bytes = value.encoded(); + + self.queue_raw_put(T::NAME, key_bytes, &value_bytes) + } + + /// Queue a put operation for a specific dual-keyed table. + fn queue_put_dual( + &mut self, + key1: &T::Key, + key2: &T::Key2, + value: &T::Value, + ) -> Result<(), Self::Error> { + let mut key1_buf = [0u8; MAX_KEY_SIZE]; + let mut key2_buf = [0u8; MAX_KEY_SIZE]; + let key1_bytes = key1.encode_key(&mut key1_buf); + let key2_bytes = key2.encode_key(&mut key2_buf); + let value_bytes = value.encoded(); + + self.queue_raw_put_dual(T::NAME, key1_bytes, key2_bytes, &value_bytes) + } + + /// Queue a delete operation for a specific table. + fn queue_delete(&mut self, key: &T::Key) -> Result<(), Self::Error> { + let mut key_buf = [0u8; MAX_KEY_SIZE]; + let key_bytes = key.encode_key(&mut key_buf); + + self.queue_raw_delete(T::NAME, key_bytes) + } + + /// Queue many put operations for a specific table. + fn queue_put_many<'a, 'b, T, I>(&mut self, entries: I) -> Result<(), Self::Error> + where + T: SingleKey, + T::Key: 'a, + T::Value: 'b, + I: IntoIterator, + { + let mut key_buf = [0u8; MAX_KEY_SIZE]; + + for (key, value) in entries { + let key_bytes = key.encode_key(&mut key_buf); + let value_bytes = value.encoded(); + + self.queue_raw_put(T::NAME, key_bytes, &value_bytes)?; + } + + Ok(()) + } + + /// Queue creation of a specific table. + fn queue_create(&mut self) -> Result<(), Self::Error> + where + T: Table, + { + self.queue_raw_create(T::NAME, T::DUAL_KEY, T::IS_FIXED_VAL) + } + + /// Queue clearing all entries in a specific table. + fn queue_clear(&mut self) -> Result<(), Self::Error> + where + T: Table, + { + self.queue_raw_clear(T::NAME) + } + + /// Commit the queued operations. + fn raw_commit(self) -> Result<(), Self::Error>; +} diff --git a/crates/storage/src/hot/model/traverse.rs b/crates/storage/src/hot/model/traverse.rs new file mode 100644 index 0000000..12cefd4 --- /dev/null +++ b/crates/storage/src/hot/model/traverse.rs @@ -0,0 +1,399 @@ +//! Cursor traversal traits and typed wrappers for database navigation. + +use crate::{ + hot::model::{DualKeyValue, HotKvReadError, KeyValue, RawDualKeyValue, RawKeyValue, RawValue}, + ser::{KeySer, MAX_KEY_SIZE}, + tables::{DualKeyed, Table}, +}; +use std::ops::Range; + +/// Trait for traversing key-value pairs in the database. +pub trait KvTraverse { + /// Set position to the first key-value pair in the database, and return + /// the KV pair. + fn first<'a>(&'a mut self) -> Result>, E>; + + /// Set position to the last key-value pair in the database, and return the + /// KV pair. + fn last<'a>(&'a mut self) -> Result>, E>; + + /// Set the cursor to specific key in the database, and return the EXACT KV + /// pair if it exists. + fn exact<'a>(&'a mut self, key: &[u8]) -> Result>, E>; + + /// Seek to the next key-value pair AT OR ABOVE the specified key in the + /// database, and return that KV pair. + fn lower_bound<'a>(&'a mut self, key: &[u8]) -> Result>, E>; + + /// Get the next key-value pair in the database, and advance the cursor. + /// + /// Returning `Ok(None)` indicates the cursor is past the end of the + /// database. + fn read_next<'a>(&'a mut self) -> Result>, E>; + + /// Get the previous key-value pair in the database, and move the cursor. + /// + /// Returning `Ok(None)` indicates the cursor is before the start of the + /// database. + fn read_prev<'a>(&'a mut self) -> Result>, E>; +} + +/// Trait for traversing key-value pairs in the database with mutation +/// capabilities. +pub trait KvTraverseMut: KvTraverse { + /// Delete the current key-value pair in the database. + fn delete_current(&mut self) -> Result<(), E>; + + /// Delete a range of key-value pairs in the database, from `start_key` + fn delete_range(&mut self, range: Range<&[u8]>) -> Result<(), E> { + let _ = self.exact(range.start)?; + while let Some((key, _value)) = self.read_next()? { + if key.as_ref() >= range.end { + break; + } + self.delete_current()?; + } + Ok(()) + } +} + +/// Trait for traversing dual-keyed key-value pairs in the database. +pub trait DualKeyedTraverse: KvTraverse { + /// Set the cursor to specific dual key in the database, and return the + /// EXACT KV pair if it exists. + /// + /// Returning `Ok(None)` indicates the exact dual key does not exist. + fn exact_dual<'a>(&'a mut self, key1: &[u8], key2: &[u8]) -> Result>, E>; + + /// Seek to the next key-value pair AT or ABOVE the specified dual key in + /// the database, and return that KV pair. + /// + /// Returning `Ok(None)` indicates there are no more key-value pairs above + /// the specified dual key. + fn next_dual_above<'a>( + &'a mut self, + key1: &[u8], + key2: &[u8], + ) -> Result>, E>; + + /// Move the cursor to the next distinct key1, and return the first + /// key-value pair with that key1. + /// + /// Returning `Ok(None)` indicates there are no more distinct key1 values. + fn next_k1<'a>(&'a mut self) -> Result>, E>; + + /// Move the cursor to the next distinct key2 for the current key1, and + /// return the first key-value pair with that key2. + fn next_k2<'a>(&'a mut self) -> Result>, E>; +} + +// ============================================================================ +// Typed Extension Traits +// ============================================================================ + +/// Extension trait for typed table traversal. +/// +/// This trait provides type-safe access to table entries by encoding keys +/// and decoding values according to the table's schema. +pub trait TableTraverse: KvTraverse { + /// Get the first key-value pair in the table. + fn first(&mut self) -> Result>, E> { + KvTraverse::first(self)?.map(T::decode_kv_tuple).transpose().map_err(Into::into) + } + + /// Get the last key-value pair in the table. + fn last(&mut self) -> Result>, E> { + KvTraverse::last(self)?.map(T::decode_kv_tuple).transpose().map_err(Into::into) + } + + /// Set the cursor to a specific key and return the EXACT value if it exists. + fn exact(&mut self, key: &T::Key) -> Result, E> { + let mut key_buf = [0u8; MAX_KEY_SIZE]; + let key_bytes = key.encode_key(&mut key_buf); + + KvTraverse::exact(self, key_bytes)?.map(T::decode_value).transpose().map_err(Into::into) + } + + /// Seek to the next key-value pair AT OR ABOVE the specified key. + fn lower_bound(&mut self, key: &T::Key) -> Result>, E> { + let mut key_buf = [0u8; MAX_KEY_SIZE]; + let key_bytes = key.encode_key(&mut key_buf); + + KvTraverse::lower_bound(self, key_bytes)? + .map(T::decode_kv_tuple) + .transpose() + .map_err(Into::into) + } + + /// Get the next key-value pair and advance the cursor. + fn read_next(&mut self) -> Result>, E> { + KvTraverse::read_next(self)?.map(T::decode_kv_tuple).transpose().map_err(Into::into) + } + + /// Get the previous key-value pair and move the cursor backward. + fn read_prev(&mut self) -> Result>, E> { + KvTraverse::read_prev(self)?.map(T::decode_kv_tuple).transpose().map_err(Into::into) + } +} + +/// Blanket implementation of `TableTraverse` for any cursor that implements `KvTraverse`. +impl TableTraverse for C +where + C: KvTraverse, + T: Table, + E: HotKvReadError, +{ +} + +/// Extension trait for typed table traversal with mutation capabilities. +pub trait TableTraverseMut: KvTraverseMut { + /// Delete the current key-value pair. + fn delete_current(&mut self) -> Result<(), E> { + KvTraverseMut::delete_current(self) + } + + /// Delete a range of key-value pairs. + fn delete_range(&mut self, range: Range) -> Result<(), E> { + let mut start_key_buf = [0u8; MAX_KEY_SIZE]; + let mut end_key_buf = [0u8; MAX_KEY_SIZE]; + let start_key_bytes = range.start.encode_key(&mut start_key_buf); + let end_key_bytes = range.end.encode_key(&mut end_key_buf); + + KvTraverseMut::delete_range(self, start_key_bytes..end_key_bytes) + } +} + +/// Blanket implementation of `TableTraverseMut` for any cursor that implements `KvTraverseMut`. +impl TableTraverseMut for C +where + C: KvTraverseMut, + T: Table, + E: HotKvReadError, +{ +} + +/// A typed cursor wrapper for traversing dual-keyed tables. +/// +/// This is an extension trait rather than a wrapper struct because MDBX +/// requires specialized implementations for DUPSORT tables that need access +/// to the table type `T` to handle fixed-size values correctly. +pub trait DualTableTraverse { + /// Return the EXACT value for the specified dual key if it exists. + fn exact_dual(&mut self, key1: &T::Key, key2: &T::Key2) -> Result, E> { + let Some((k1, k2, v)) = self.next_dual_above(key1, key2)? else { + return Ok(None); + }; + + if k1 == *key1 && k2 == *key2 { Ok(Some(v)) } else { Ok(None) } + } + + /// Seek to the next key-value pair AT or ABOVE the specified dual key. + fn next_dual_above( + &mut self, + key1: &T::Key, + key2: &T::Key2, + ) -> Result>, E>; + + /// Seek to the next distinct key1, and return the first key-value pair with that key1. + fn next_k1(&mut self) -> Result>, E>; + + /// Seek to the next distinct key2 for the current key1. + fn next_k2(&mut self) -> Result>, E>; +} + +// ============================================================================ +// Wrapper Structs +// ============================================================================ + +use core::marker::PhantomData; + +/// A wrapper struct for typed table traversal. +/// +/// This struct wraps a raw cursor and provides type-safe access to table +/// entries. It implements `TableTraverse` by delegating to the inner +/// cursor. +#[derive(Debug)] +pub struct TableCursor { + inner: C, + _marker: PhantomData (T, E)>, +} + +impl TableCursor { + /// Create a new typed table cursor wrapper. + pub const fn new(cursor: C) -> Self { + Self { inner: cursor, _marker: PhantomData } + } + + /// Get a reference to the inner cursor. + pub const fn inner(&self) -> &C { + &self.inner + } + + /// Get a mutable reference to the inner cursor. + pub fn inner_mut(&mut self) -> &mut C { + &mut self.inner + } + + /// Consume the wrapper and return the inner cursor. + pub fn into_inner(self) -> C { + self.inner + } +} + +impl TableCursor +where + C: KvTraverse, + T: Table, + E: HotKvReadError, +{ + /// Get the first key-value pair in the table. + pub fn first(&mut self) -> Result>, E> { + TableTraverse::::first(&mut self.inner) + } + + /// Get the last key-value pair in the table. + pub fn last(&mut self) -> Result>, E> { + TableTraverse::::last(&mut self.inner) + } + + /// Set the cursor to a specific key and return the EXACT value if it exists. + pub fn exact(&mut self, key: &T::Key) -> Result, E> { + TableTraverse::::exact(&mut self.inner, key) + } + + /// Seek to the next key-value pair AT OR ABOVE the specified key. + pub fn lower_bound(&mut self, key: &T::Key) -> Result>, E> { + TableTraverse::::lower_bound(&mut self.inner, key) + } + + /// Get the next key-value pair and advance the cursor. + pub fn read_next(&mut self) -> Result>, E> { + TableTraverse::::read_next(&mut self.inner) + } + + /// Get the previous key-value pair and move the cursor backward. + pub fn read_prev(&mut self) -> Result>, E> { + TableTraverse::::read_prev(&mut self.inner) + } +} + +impl TableCursor +where + C: KvTraverseMut, + T: Table, + E: HotKvReadError, +{ + /// Delete the current key-value pair. + pub fn delete_current(&mut self) -> Result<(), E> { + TableTraverseMut::::delete_current(&mut self.inner) + } + + /// Delete a range of key-value pairs. + pub fn delete_range(&mut self, range: Range) -> Result<(), E> { + TableTraverseMut::::delete_range(&mut self.inner, range) + } +} + +/// A wrapper struct for typed dual-keyed table traversal. +/// +/// This struct wraps a raw cursor and provides type-safe access to dual-keyed +/// table entries. It delegates to the `DualTableTraverse` trait +/// implementation on the inner cursor. +#[derive(Debug)] +pub struct DualTableCursor { + inner: C, + _marker: PhantomData (T, E)>, +} + +impl DualTableCursor { + /// Create a new typed dual-keyed table cursor wrapper. + pub const fn new(cursor: C) -> Self { + Self { inner: cursor, _marker: PhantomData } + } + + /// Get a reference to the inner cursor. + pub const fn inner(&self) -> &C { + &self.inner + } + + /// Get a mutable reference to the inner cursor. + pub fn inner_mut(&mut self) -> &mut C { + &mut self.inner + } + + /// Consume the wrapper and return the inner cursor. + pub fn into_inner(self) -> C { + self.inner + } +} + +impl DualTableCursor +where + C: DualTableTraverse, + T: DualKeyed, + E: HotKvReadError, +{ + /// Return the EXACT value for the specified dual key if it exists. + pub fn exact_dual(&mut self, key1: &T::Key, key2: &T::Key2) -> Result, E> { + DualTableTraverse::::exact_dual(&mut self.inner, key1, key2) + } + + /// Seek to the next key-value pair AT or ABOVE the specified dual key. + pub fn next_dual_above( + &mut self, + key1: &T::Key, + key2: &T::Key2, + ) -> Result>, E> { + DualTableTraverse::::next_dual_above(&mut self.inner, key1, key2) + } + + /// Seek to the next distinct key1, and return the first key-value pair with that key1. + pub fn next_k1(&mut self) -> Result>, E> { + DualTableTraverse::::next_k1(&mut self.inner) + } + + /// Seek to the next distinct key2 for the current key1. + pub fn next_k2(&mut self) -> Result>, E> { + DualTableTraverse::::next_k2(&mut self.inner) + } +} + +// Also provide access to single-key traversal methods for dual-keyed cursors +impl DualTableCursor +where + C: KvTraverse, + T: DualKeyed, + E: HotKvReadError, +{ + /// Get the first key-value pair in the table (raw traversal). + pub fn first(&mut self) -> Result>, E> { + TableTraverse::::first(&mut self.inner) + } + + /// Get the last key-value pair in the table (raw traversal). + pub fn last(&mut self) -> Result>, E> { + TableTraverse::::last(&mut self.inner) + } + + /// Get the next key-value pair and advance the cursor. + pub fn read_next(&mut self) -> Result>, E> { + TableTraverse::::read_next(&mut self.inner) + } + + /// Get the previous key-value pair and move the cursor backward. + pub fn read_prev(&mut self) -> Result>, E> { + TableTraverse::::read_prev(&mut self.inner) + } +} + +impl DualTableCursor +where + C: KvTraverseMut, + T: DualKeyed, + E: HotKvReadError, +{ + /// Delete the current key-value pair. + pub fn delete_current(&mut self) -> Result<(), E> { + TableTraverseMut::::delete_current(&mut self.inner) + } +} diff --git a/crates/storage/src/lib.rs b/crates/storage/src/lib.rs new file mode 100644 index 0000000..381014c --- /dev/null +++ b/crates/storage/src/lib.rs @@ -0,0 +1,24 @@ +#![doc = include_str!("../README.md")] +#![warn( + missing_copy_implementations, + missing_debug_implementations, + missing_docs, + unreachable_pub, + clippy::missing_const_for_fn, + rustdoc::all +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![deny(unused_must_use, rust_2018_idioms)] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +/// Cold storage module. +pub mod cold; + +/// Hot storage module. +pub mod hot; + +/// Serialization module. +pub mod ser; + +/// Predefined tables module. +pub mod tables; diff --git a/crates/storage/src/ser/error.rs b/crates/storage/src/ser/error.rs new file mode 100644 index 0000000..de83f75 --- /dev/null +++ b/crates/storage/src/ser/error.rs @@ -0,0 +1,46 @@ +/// Error type for deserialization errors. +/// +/// Erases the underlying error type to a boxed trait object or a string +/// message, for convenience. +#[derive(thiserror::Error, Debug)] +pub enum DeserError { + /// Boxed error. + #[error(transparent)] + Boxed(Box), + + /// String error message. + #[error("{0}")] + String(String), + + /// Deserialization ended with extra bytes remaining. + #[error("inexact deserialization: {extra_bytes} extra bytes remaining")] + InexactDeser { + /// Number of extra bytes remaining after deserialization. + extra_bytes: usize, + }, + + /// Not enough data to complete deserialization. + #[error("insufficient data: needed {needed} bytes, but only {available} available")] + InsufficientData { + /// Number of bytes needed. + needed: usize, + /// Number of bytes available. + available: usize, + }, +} + +impl From<&str> for DeserError { + fn from(err: &str) -> Self { + DeserError::String(err.to_string()) + } +} + +impl DeserError { + /// Box an error into a `DeserError`. + pub fn from(err: E) -> Self + where + E: std::error::Error + Send + Sync + 'static, + { + DeserError::Boxed(Box::new(err)) + } +} diff --git a/crates/storage/src/ser/impls.rs b/crates/storage/src/ser/impls.rs new file mode 100644 index 0000000..6c4f3d1 --- /dev/null +++ b/crates/storage/src/ser/impls.rs @@ -0,0 +1,577 @@ +use crate::ser::{DeserError, KeySer, MAX_KEY_SIZE, ValSer}; +use alloy::primitives::{Address, B256, Bloom}; +use bytes::BufMut; +use reth::primitives::StorageEntry; +use reth_db::models::BlockNumberAddress; + +macro_rules! delegate_val_to_key { + ($ty:ty) => { + impl ValSer for $ty { + fn encoded_size(&self) -> usize { + ::SIZE + } + + fn encode_value_to(&self, buf: &mut B) + where + B: BufMut + AsMut<[u8]>, + { + let mut key_buf = [0u8; MAX_KEY_SIZE]; + let key_bytes = KeySer::encode_key(self, &mut key_buf); + buf.put_slice(key_bytes); + } + + fn decode_value(data: &[u8]) -> Result + where + Self: Sized, + { + KeySer::decode_key(&data) + } + } + }; +} + +macro_rules! ser_alloy_fixed { + ($size:expr) => { + impl KeySer for alloy::primitives::FixedBytes<$size> { + const SIZE: usize = $size; + + fn encode_key<'a: 'c, 'b: 'c, 'c>(&'a self, _buf: &'b mut [u8; MAX_KEY_SIZE]) -> &'c [u8] { + self.as_ref() + } + + fn decode_key(data: &[u8]) -> Result + where + Self: Sized, + { + if data.len() < $size { + return Err(DeserError::InsufficientData { + needed: $size, + available: data.len(), + }); + } + let mut this = Self::default(); + this.as_mut_slice().copy_from_slice(&data[..$size]); + Ok(this) + } + } + + delegate_val_to_key!(alloy::primitives::FixedBytes<$size>); + }; + + ($($size:expr),* $(,)?) => { + $( + ser_alloy_fixed!($size); + )+ + }; +} + +macro_rules! ser_be_num { + ($ty:ty, $size:expr) => { + impl KeySer for $ty { + const SIZE: usize = $size; + + fn encode_key<'a: 'c, 'b: 'c, 'c>(&'a self, buf: &'b mut [u8; MAX_KEY_SIZE]) -> &'c [u8] { + let be_bytes: [u8; $size] = self.to_be_bytes(); + buf[..$size].copy_from_slice(&be_bytes); + &buf[..$size] + } + + fn decode_key(data: &[u8]) -> Result + where + Self: Sized, + { + if data.len() < $size { + return Err(DeserError::InsufficientData { + needed: $size, + available: data.len(), + }); + } + let bytes: [u8; $size] = data[..$size].try_into().map_err(DeserError::from)?; + Ok(<$ty>::from_be_bytes(bytes)) + } + } + + delegate_val_to_key!($ty); + }; + ($($ty:ty, $size:expr);* $(;)?) => { + $( + ser_be_num!($ty, $size); + )+ + }; +} + +ser_be_num!( + u8, 1; + i8, 1; + u16, 2; + u32, 4; + u64, 8; + u128, 16; + i16, 2; + i32, 4; + i64, 8; + i128, 16; + usize, std::mem::size_of::(); + isize, std::mem::size_of::(); + alloy::primitives::U160, 20; + alloy::primitives::U256, 32; +); + +// NB: 52 is for AccountStorageKey which is (20 + 32). +// 65 is for Signature, which is (1 + 32 + 32). +ser_alloy_fixed!(8, 16, 20, 32, 52, 65, 256); +delegate_val_to_key!(alloy::primitives::Address); + +impl KeySer for Address { + const SIZE: usize = 20; + + fn encode_key<'a: 'c, 'b: 'c, 'c>(&'a self, _buf: &'b mut [u8; MAX_KEY_SIZE]) -> &'c [u8] { + self.as_ref() + } + + fn decode_key(data: &[u8]) -> Result { + if data.len() < Self::SIZE { + return Err(DeserError::InsufficientData { needed: Self::SIZE, available: data.len() }); + } + let mut addr = Self::default(); + addr.copy_from_slice(&data[..Self::SIZE]); + Ok(addr) + } +} + +impl ValSer for Bloom { + fn encoded_size(&self) -> usize { + self.as_slice().len() + } + + fn encode_value_to(&self, buf: &mut B) + where + B: BufMut + AsMut<[u8]>, + { + buf.put_slice(self.as_ref()); + } + + fn decode_value(data: &[u8]) -> Result + where + Self: Sized, + { + if data.len() < 256 { + return Err(DeserError::InsufficientData { needed: 256, available: data.len() }); + } + let mut bloom = Self::default(); + bloom.as_mut_slice().copy_from_slice(&data[..256]); + Ok(bloom) + } +} + +impl ValSer for bytes::Bytes { + fn encoded_size(&self) -> usize { + 2 + self.len() + } + + fn encode_value_to(&self, buf: &mut B) + where + B: BufMut + AsMut<[u8]>, + { + buf.put_u16(self.len() as u16); + buf.put_slice(self); + } + + fn decode_value(data: &[u8]) -> Result + where + Self: Sized, + { + if data.len() < 2 { + return Err(DeserError::InsufficientData { needed: 2, available: data.len() }); + } + let len = u16::from_be_bytes(data[0..2].try_into().unwrap()) as usize; + Ok(bytes::Bytes::copy_from_slice(&data[2..2 + len])) + } +} + +impl ValSer for alloy::primitives::Bytes { + fn encoded_size(&self) -> usize { + self.0.encoded_size() + } + + fn encode_value_to(&self, buf: &mut B) + where + B: BufMut + AsMut<[u8]>, + { + self.0.encode_value_to(buf); + } + + fn decode_value(data: &[u8]) -> Result + where + Self: Sized, + { + bytes::Bytes::decode_value(data).map(alloy::primitives::Bytes) + } +} + +impl ValSer for Option +where + T: ValSer, +{ + fn encoded_size(&self) -> usize { + 1 + match self { + Some(inner) => inner.encoded_size(), + None => 0, + } + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + // Simple presence flag + if let Some(inner) = self { + buf.put_u8(1); + inner.encode_value_to(buf); + } else { + buf.put_u8(0); + } + } + + fn decode_value(data: &[u8]) -> Result + where + Self: Sized, + { + let flag = data + .first() + .ok_or(DeserError::InsufficientData { needed: 1, available: data.len() })?; + match flag { + 0 => Ok(None), + 1 => Ok(Some(T::decode_value(&data[1..])?)), + _ => Err(DeserError::String(format!("Invalid Option flag: {}", flag))), + } + } +} + +impl ValSer for Vec +where + T: ValSer, +{ + fn encoded_size(&self) -> usize { + // 2 bytes for length prefix + 2 + self.iter().map(|item| item.encoded_size()).sum::() + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + buf.put_u16(self.len() as u16); + self.iter().for_each(|item| item.encode_value_to(buf)); + } + + fn decode_value(mut data: &[u8]) -> Result + where + Self: Sized, + { + if data.len() < 2 { + return Err(DeserError::InsufficientData { needed: 2, available: data.len() }); + } + + let items = u16::from_be_bytes(data[0..2].try_into().unwrap()) as usize; + data = &data[2..]; + + // Preallocate the vector + let mut vec = Vec::with_capacity(items); + + vec.spare_capacity_mut().iter_mut().try_for_each(|slot| { + // Decode the item and advance the data slice + let item = slot.write(T::decode_value(data)?); + // Advance data slice by the size of the decoded item + data = &data[item.encoded_size()..]; + Ok::<_, DeserError>(()) + })?; + + // SAFETY: + // If we did not shortcut return, we have initialized all `items` + // elements. + unsafe { + vec.set_len(items); + } + Ok(vec) + } +} + +impl KeySer for BlockNumberAddress { + const SIZE: usize = u64::SIZE + Address::SIZE; + + fn encode_key<'a: 'c, 'b: 'c, 'c>(&'a self, buf: &'b mut [u8; MAX_KEY_SIZE]) -> &'c [u8] { + buf[0..8].copy_from_slice(&self.0.0.to_be_bytes()); + buf[8..28].copy_from_slice(self.0.1.as_ref()); + &buf[..Self::SIZE] + } + + fn decode_key(data: &[u8]) -> Result { + if data.len() < Self::SIZE { + return Err(DeserError::InsufficientData { needed: Self::SIZE, available: data.len() }); + } + let number = u64::from_be_bytes(data[0..8].try_into().unwrap()); + let address = Address::from_slice(&data[8..28]); + Ok(BlockNumberAddress((number, address))) + } +} + +impl ValSer for StorageEntry { + fn encoded_size(&self) -> usize { + self.key.encoded_size() + self.value.encoded_size() + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + self.key.encode_value_to(buf); + self.value.encode_value_to(buf); + } + + fn decode_value(data: &[u8]) -> Result + where + Self: Sized, + { + let key: B256 = ValSer::decode_value(data)?; + let value = ValSer::decode_value(&data[key.encoded_size()..])?; + Ok(StorageEntry { key, value }) + } +} +#[cfg(test)] +mod tests { + use super::*; + use alloy::primitives::{Address, Bloom, Bytes as AlloBytes, FixedBytes, U160, U256}; + use bytes::Bytes; + + /// Generic roundtrip test for any ValSer type + #[track_caller] + fn test_roundtrip(original: &T) + where + T: ValSer + PartialEq + std::fmt::Debug, + { + // Encode + let mut buf = bytes::BytesMut::new(); + original.encode_value_to(&mut buf); + let encoded = buf.freeze(); + + // Assert that the encoded size matches + assert_eq!( + original.encoded_size(), + encoded.len(), + "Encoded size mismatch: expected {}, got {}", + original.encoded_size(), + encoded.len() + ); + + // Decode + let decoded = T::decode_value(&encoded).expect("Failed to decode value"); + + // Assert equality + assert_eq!(*original, decoded, "Roundtrip failed"); + } + + #[test] + fn test_integer_roundtrips() { + // Test boundary values for all integer types + test_roundtrip(&0u8); + test_roundtrip(&255u8); + test_roundtrip(&127i8); + test_roundtrip(&-128i8); + + test_roundtrip(&0u16); + test_roundtrip(&65535u16); + test_roundtrip(&32767i16); + test_roundtrip(&-32768i16); + + test_roundtrip(&0u32); + test_roundtrip(&4294967295u32); + test_roundtrip(&2147483647i32); + test_roundtrip(&-2147483648i32); + + test_roundtrip(&0u64); + test_roundtrip(&18446744073709551615u64); + test_roundtrip(&9223372036854775807i64); + test_roundtrip(&-9223372036854775808i64); + + test_roundtrip(&0u128); + test_roundtrip(&340282366920938463463374607431768211455u128); + test_roundtrip(&170141183460469231731687303715884105727i128); + test_roundtrip(&-170141183460469231731687303715884105728i128); + + test_roundtrip(&0usize); + test_roundtrip(&usize::MAX); + test_roundtrip(&0isize); + test_roundtrip(&isize::MAX); + test_roundtrip(&isize::MIN); + } + + #[test] + fn test_u256_roundtrips() { + test_roundtrip(&U256::ZERO); + test_roundtrip(&U256::from(1u64)); + test_roundtrip(&U256::from(255u64)); + test_roundtrip(&U256::from(65535u64)); + test_roundtrip(&U256::from(u64::MAX)); + test_roundtrip(&U256::MAX); + } + + #[test] + fn test_u160_roundtrips() { + test_roundtrip(&U160::ZERO); + test_roundtrip(&U160::from(1u64)); + test_roundtrip(&U160::from(u64::MAX)); + // Create a maxed U160 (20 bytes = 160 bits) + let max_u160 = U160::from_be_bytes([0xFF; 20]); + test_roundtrip(&max_u160); + } + + #[test] + fn test_address_roundtrips() { + test_roundtrip(&Address::ZERO); + // Create a test address with known pattern + let test_addr = Address::from([ + 0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF, 0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, + 0xCD, 0xEF, 0x01, 0x23, 0x45, 0x67, + ]); + test_roundtrip(&test_addr); + } + + #[test] + fn test_fixedbytes_roundtrips() { + // Test various FixedBytes sizes + test_roundtrip(&FixedBytes::<8>::ZERO); + test_roundtrip(&FixedBytes::<16>::ZERO); + test_roundtrip(&FixedBytes::<20>::ZERO); + test_roundtrip(&FixedBytes::<32>::ZERO); + test_roundtrip(&FixedBytes::<52>::ZERO); + test_roundtrip(&FixedBytes::<65>::ZERO); + test_roundtrip(&FixedBytes::<256>::ZERO); + + // Test with non-zero patterns + let pattern_32 = FixedBytes::<32>::from([ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, + 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, + 0x1D, 0x1E, 0x1F, 0x20, + ]); + test_roundtrip(&pattern_32); + } + + #[test] + fn test_bloom_roundtrips() { + test_roundtrip(&Bloom::ZERO); + // Create a bloom with some bits set + let mut bloom_data = [0u8; 256]; + bloom_data[0] = 0xFF; + bloom_data[127] = 0xAA; + bloom_data[255] = 0x55; + let bloom = Bloom::from(bloom_data); + test_roundtrip(&bloom); + } + + #[test] + fn test_bytes_roundtrips() { + // Test bytes::Bytes + test_roundtrip(&Bytes::new()); + test_roundtrip(&Bytes::from_static(b"hello world")); + test_roundtrip(&Bytes::from(vec![0x00, 0x01, 0x02, 0x03, 0xFF, 0xFE, 0xFD])); + + // Test alloy::primitives::Bytes + test_roundtrip(&AlloBytes::new()); + test_roundtrip(&AlloBytes::from_static(b"hello alloy")); + test_roundtrip(&AlloBytes::copy_from_slice(&[0xDE, 0xAD, 0xBE, 0xEF])); + } + + #[test] + fn test_option_roundtrips() { + // None variants + test_roundtrip(&None::); + test_roundtrip(&None::
); + test_roundtrip(&None::); + + // Some variants + test_roundtrip(&Some(42u32)); + test_roundtrip(&Some(u64::MAX)); + test_roundtrip(&Some(Address::ZERO)); + test_roundtrip(&Some(U256::from(12345u64))); + test_roundtrip(&Some(AlloBytes::from_static(b"test"))); + + // Nested options + test_roundtrip(&Some(Some(123u32))); + test_roundtrip(&Some(None::)); + test_roundtrip(&None::>); + } + + #[test] + fn test_vec_roundtrips() { + // Empty vectors + test_roundtrip(&Vec::::new()); + test_roundtrip(&Vec::
::new()); + + // Single element vectors + test_roundtrip(&vec![42u32]); + test_roundtrip(&vec![Address::ZERO]); + + // Multiple element vectors + test_roundtrip(&vec![1u32, 2, 3, 4, 5]); + test_roundtrip(&vec![U256::ZERO, U256::from(1u64), U256::MAX]); + + // Vector of bytes + test_roundtrip(&vec![ + AlloBytes::from_static(b"first"), + AlloBytes::from_static(b"second"), + AlloBytes::new(), + AlloBytes::from_static(b"last"), + ]); + + // Nested vectors + test_roundtrip(&vec![vec![1u32, 2, 3], vec![], vec![4, 5]]); + + // Vector of options + test_roundtrip(&vec![Some(1u32), None, Some(2u32), Some(3u32), None]); + } + + #[test] + fn test_complex_combinations() { + // Option of Vec + test_roundtrip(&Some(vec![1u32, 2, 3, 4])); + test_roundtrip(&None::>); + + // Vec of Options + test_roundtrip(&vec![Some(Address::ZERO), None, Some(Address::ZERO)]); + + // Option of Option + test_roundtrip(&Some(Some(42u32))); + test_roundtrip(&Some(None::)); + + // Complex nested structure + let complex = vec![ + Some(vec![AlloBytes::from_static(b"hello")]), + None, + Some(vec![ + AlloBytes::from_static(b"world"), + AlloBytes::new(), + AlloBytes::from_static(b"!"), + ]), + ]; + test_roundtrip(&complex); + } + + #[test] + fn test_edge_cases() { + // Maximum values that should still work + test_roundtrip(&vec![0u8; 65535]); // Max length for Vec + + // Large FixedBytes + let large_fixed = FixedBytes::<256>::from([0xFF; 256]); + test_roundtrip(&large_fixed); + + // Very large U256 + let large_u256 = U256::from_str_radix( + "115792089237316195423570985008687907853269984665640564039457584007913129639935", + 10, + ) + .unwrap(); + test_roundtrip(&large_u256); + } +} diff --git a/crates/storage/src/ser/mod.rs b/crates/storage/src/ser/mod.rs new file mode 100644 index 0000000..2f34e88 --- /dev/null +++ b/crates/storage/src/ser/mod.rs @@ -0,0 +1,9 @@ +mod error; +pub use error::DeserError; + +mod traits; +pub use traits::{KeySer, MAX_KEY_SIZE, ValSer}; + +mod impls; + +mod reth_impls; diff --git a/crates/storage/src/ser/reth_impls.rs b/crates/storage/src/ser/reth_impls.rs new file mode 100644 index 0000000..24d5dd6 --- /dev/null +++ b/crates/storage/src/ser/reth_impls.rs @@ -0,0 +1,1970 @@ +use crate::ser::{DeserError, KeySer, MAX_KEY_SIZE, ValSer}; +use alloy::{ + consensus::{EthereumTxEnvelope, Signed, TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy}, + eips::{ + eip2930::{AccessList, AccessListItem}, + eip7702::{Authorization, SignedAuthorization}, + }, + primitives::{Address, B256, FixedBytes, KECCAK256_EMPTY, Signature, TxKind, U256}, +}; +use reth::{ + primitives::{Account, Bytecode, Header, Log, LogData, TransactionSigned, TxType}, + revm::bytecode::{JumpTable, LegacyAnalyzedBytecode, eip7702::Eip7702Bytecode}, +}; +use reth_db_api::{ + BlockNumberList, + models::{AccountBeforeTx, ShardedKey, StoredBlockBodyIndices}, +}; + +// Specialized impls for the sharded key types. This was implemented +// generically, but there are only 2 types, and we can skip pushing a scratch +// buffer, because we know the 2 types involved are already fixed-size byte +// arrays. +macro_rules! sharded_key { + ($ty:ty) => { + impl KeySer for ShardedKey<$ty> { + const SIZE: usize = <$ty as KeySer>::SIZE + u64::SIZE; + + fn encode_key<'a: 'c, 'b: 'c, 'c>( + &'a self, + buf: &'b mut [u8; MAX_KEY_SIZE], + ) -> &'c [u8] { + const SIZE: usize = <$ty as KeySer>::SIZE; + + let prefix = self.key.as_slice(); + + buf[0..SIZE].copy_from_slice(prefix); + buf[SIZE..Self::SIZE].copy_from_slice(&self.highest_block_number.to_be_bytes()); + + &buf[0..Self::SIZE] + } + + fn decode_key(data: &[u8]) -> Result { + const SIZE: usize = <$ty as KeySer>::SIZE; + if data.len() < Self::SIZE { + return Err(DeserError::InsufficientData { + needed: Self::SIZE, + available: data.len(), + }); + } + + let key = <$ty as KeySer>::decode_key(&data[0..SIZE])?; + let highest_block_number = u64::decode_key(&data[SIZE..SIZE + 8])?; + Ok(Self { key, highest_block_number }) + } + } + }; +} + +sharded_key!(B256); +sharded_key!(Address); + +macro_rules! by_props { + (@size $($prop:ident),* $(,)?) => { + { + 0 $( + + $prop.encoded_size() + )+ + } + }; + (@encode $buf:ident; $($prop:ident),* $(,)?) => { + { + $( + $prop.encode_value_to($buf); + )+ + } + }; + (@decode $data:ident; $($prop:ident),* $(,)?) => { + { + $( + *$prop = ValSer::decode_value($data)?; + $data = &$data[$prop.encoded_size()..]; + )* + } + }; +} + +impl ValSer for BlockNumberList { + fn encoded_size(&self) -> usize { + 2 + self.serialized_size() + } + + fn encode_value_to(&self, mut buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + use std::io::Write; + let mut writer: bytes::buf::Writer<&mut B> = bytes::BufMut::writer(&mut buf); + + debug_assert!( + self.serialized_size() <= u16::MAX as usize, + "BlockNumberList too large to encode" + ); + + writer.write_all(&(self.serialized_size() as u16).to_be_bytes()).unwrap(); + self.serialize_into(writer).unwrap(); + } + + fn decode_value(data: &[u8]) -> Result + where + Self: Sized, + { + let size = u16::decode_value(&data[..2])? as usize; + BlockNumberList::from_bytes(&data[2..2 + size]) + .map_err(|err| DeserError::String(format!("Failed to decode BlockNumberList {err}"))) + } +} + +impl ValSer for Header { + fn encoded_size(&self) -> usize { + // NB: Destructure to ensure changes are compile errors and mistakes + // are unused var warnings. + let Header { + parent_hash, + ommers_hash, + beneficiary, + state_root, + transactions_root, + receipts_root, + logs_bloom, + difficulty, + number, + gas_limit, + gas_used, + timestamp, + extra_data, + mix_hash, + nonce, + base_fee_per_gas, + withdrawals_root, + blob_gas_used, + excess_blob_gas, + parent_beacon_block_root, + requests_hash, + } = self; + + by_props!( + @size + parent_hash, + ommers_hash, + beneficiary, + state_root, + transactions_root, + receipts_root, + logs_bloom, + difficulty, + number, + gas_limit, + gas_used, + timestamp, + extra_data, + mix_hash, + nonce, + base_fee_per_gas, + withdrawals_root, + blob_gas_used, + excess_blob_gas, + parent_beacon_block_root, + requests_hash, + ) + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + // NB: Destructure to ensure changes are compile errors and mistakes + // are unused var warnings. + let Header { + parent_hash, + ommers_hash, + beneficiary, + state_root, + transactions_root, + receipts_root, + logs_bloom, + difficulty, + number, + gas_limit, + gas_used, + timestamp, + extra_data, + mix_hash, + nonce, + base_fee_per_gas, + withdrawals_root, + blob_gas_used, + excess_blob_gas, + parent_beacon_block_root, + requests_hash, + } = self; + + by_props!( + @encode buf; + parent_hash, + ommers_hash, + beneficiary, + state_root, + transactions_root, + receipts_root, + logs_bloom, + difficulty, + number, + gas_limit, + gas_used, + timestamp, + extra_data, + mix_hash, + nonce, + base_fee_per_gas, + withdrawals_root, + blob_gas_used, + excess_blob_gas, + parent_beacon_block_root, + requests_hash, + ) + } + + fn decode_value(mut data: &[u8]) -> Result + where + Self: Sized, + { + // NB: Destructure to ensure changes are compile errors and mistakes + // are unused var warnings. + let mut h = Header::default(); + let Header { + parent_hash, + ommers_hash, + beneficiary, + state_root, + transactions_root, + receipts_root, + logs_bloom, + difficulty, + number, + gas_limit, + gas_used, + timestamp, + extra_data, + mix_hash, + nonce, + base_fee_per_gas, + withdrawals_root, + blob_gas_used, + excess_blob_gas, + parent_beacon_block_root, + requests_hash, + } = &mut h; + + by_props!( + @decode data; + parent_hash, + ommers_hash, + beneficiary, + state_root, + transactions_root, + receipts_root, + logs_bloom, + difficulty, + number, + gas_limit, + gas_used, + timestamp, + extra_data, + mix_hash, + nonce, + base_fee_per_gas, + withdrawals_root, + blob_gas_used, + excess_blob_gas, + parent_beacon_block_root, + requests_hash, + ); + Ok(h) + } +} + +impl ValSer for Account { + fn encoded_size(&self) -> usize { + // NB: Destructure to ensure changes are compile errors and mistakes + // are unused var warnings. + let Account { nonce, balance, bytecode_hash: _ } = self; + + nonce.encoded_size() + balance.encoded_size() + 32 + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + // NB: Destructure to ensure changes are compile errors and mistakes + // are unused var warnings. + let Account { nonce, balance, bytecode_hash } = self; + { + nonce.encode_value_to(buf); + balance.encode_value_to(buf); + bytecode_hash.unwrap_or(KECCAK256_EMPTY).encode_value_to(buf); + } + } + + fn decode_value(data: &[u8]) -> Result + where + Self: Sized, + { + // NB: Destructure to ensure changes are compile errors and mistakes + // are unused var warnings. + let mut account = Account::default(); + let Account { nonce, balance, bytecode_hash } = &mut account; + + let mut data = data; + { + *nonce = ValSer::decode_value(data)?; + data = &data[nonce.encoded_size()..]; + *balance = ValSer::decode_value(data)?; + data = &data[balance.encoded_size()..]; + + let bch: B256 = ValSer::decode_value(data)?; + if bch == KECCAK256_EMPTY { + *bytecode_hash = None; + } else { + *bytecode_hash = Some(bch); + } + }; + Ok(account) + } +} + +impl ValSer for LogData { + fn encoded_size(&self) -> usize { + let LogData { data, .. } = self; + let topics = self.topics(); + 2 + topics.iter().map(|t| t.encoded_size()).sum::() + data.encoded_size() + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + let LogData { data, .. } = self; + let topics = self.topics(); + buf.put_u16(topics.len() as u16); + for topic in topics { + topic.encode_value_to(buf); + } + data.encode_value_to(buf); + } + + fn decode_value(data: &[u8]) -> Result + where + Self: Sized, + { + let mut data = data; + let topics_len = u16::decode_value(&data[..2])? as usize; + data = &data[2..]; + + if topics_len > 4 { + return Err(DeserError::String("LogData topics length exceeds maximum of 4".into())); + } + + let mut topics = Vec::with_capacity(topics_len); + for _ in 0..topics_len { + let topic = B256::decode_value(data)?; + data = &data[topic.encoded_size()..]; + topics.push(topic); + } + + let log_data = alloy::primitives::Bytes::decode_value(data)?; + + Ok(LogData::new_unchecked(topics, log_data)) + } +} + +impl ValSer for Log { + fn encoded_size(&self) -> usize { + let Log { address, data } = self; + by_props!( + @size + address, + data, + ) + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + let Log { address, data } = self; + by_props!( + @encode buf; + address, + data, + ) + } + + fn decode_value(data: &[u8]) -> Result + where + Self: Sized, + { + let mut log = Log::::default(); + let Log { address, data: log_data } = &mut log; + + let mut data = data; + by_props!( + @decode data; + address, + log_data, + ); + Ok(log) + } +} + +impl ValSer for TxType { + fn encoded_size(&self) -> usize { + 1 + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + buf.put_u8(*self as u8); + } + + fn decode_value(data: &[u8]) -> Result + where + Self: Sized, + { + let byte = u8::decode_value(data)?; + TxType::try_from(byte) + .map_err(|_| DeserError::String(format!("Invalid TxType value: {}", byte))) + } +} + +impl ValSer for StoredBlockBodyIndices { + fn encoded_size(&self) -> usize { + let StoredBlockBodyIndices { first_tx_num, tx_count } = self; + by_props!( + @size + first_tx_num, + tx_count, + ) + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + let StoredBlockBodyIndices { first_tx_num, tx_count } = self; + by_props!( + @encode buf; + first_tx_num, + tx_count, + ) + } + + fn decode_value(data: &[u8]) -> Result + where + Self: Sized, + { + let mut indices = StoredBlockBodyIndices::default(); + let StoredBlockBodyIndices { first_tx_num, tx_count } = &mut indices; + + let mut data = data; + by_props!( + @decode data; + first_tx_num, + tx_count, + ); + Ok(indices) + } +} + +impl ValSer for Eip7702Bytecode { + fn encoded_size(&self) -> usize { + let Eip7702Bytecode { delegated_address, version, raw } = self; + by_props!( + @size + delegated_address, + version, + raw, + ) + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + let Eip7702Bytecode { delegated_address, version, raw } = self; + by_props!( + @encode buf; + delegated_address, + version, + raw, + ) + } + + fn decode_value(data: &[u8]) -> Result + where + Self: Sized, + { + let mut eip7702 = Eip7702Bytecode { + delegated_address: Address::ZERO, + version: 0, + raw: alloy::primitives::Bytes::new(), + }; + let Eip7702Bytecode { delegated_address, version, raw } = &mut eip7702; + + let mut data = data; + by_props!( + @decode data; + delegated_address, + version, + raw, + ); + Ok(eip7702) + } +} + +impl ValSer for JumpTable { + fn encoded_size(&self) -> usize { + 2 + 2 + self.as_slice().len() + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + debug_assert!(self.len() <= u16::MAX as usize, "JumpTable bitlen too large to encode"); + debug_assert!(self.as_slice().len() <= u16::MAX as usize, "JumpTable too large to encode"); + buf.put_u16(self.len() as u16); + buf.put_u16(self.as_slice().len() as u16); + buf.put_slice(self.as_slice()); + } + + fn decode_value(data: &[u8]) -> Result + where + Self: Sized, + { + let bit_len = u16::decode_value(&data[..2])? as usize; + let slice_len = u16::decode_value(&data[2..4])? as usize; + Ok(JumpTable::from_slice(&data[4..4 + slice_len], bit_len)) + } +} + +impl ValSer for LegacyAnalyzedBytecode { + fn encoded_size(&self) -> usize { + let bytecode = self.bytecode(); + let original_len = self.original_len(); + let jump_table = self.jump_table(); + by_props!( + @size + bytecode, + original_len, + jump_table, + ) + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + let bytecode = self.bytecode(); + let original_len = self.original_len(); + let jump_table = self.jump_table(); + by_props!( + @encode buf; + bytecode, + original_len, + jump_table, + ) + } + + fn decode_value(mut data: &[u8]) -> Result + where + Self: Sized, + { + let mut bytecode = alloy::primitives::Bytes::new(); + let mut original_len = 0usize; + let mut jump_table = JumpTable::default(); + + let bc = &mut bytecode; + let ol = &mut original_len; + let jt = &mut jump_table; + by_props!( + @decode data; + bc, + ol, + jt, + ); + Ok(LegacyAnalyzedBytecode::new(bytecode, original_len, jump_table)) + } +} + +impl ValSer for Bytecode { + fn encoded_size(&self) -> usize { + 1 + match &self.0 { + reth::revm::state::Bytecode::Eip7702(code) => code.encoded_size(), + reth::revm::state::Bytecode::LegacyAnalyzed(code) => code.encoded_size(), + } + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + match &self.0 { + reth::revm::state::Bytecode::Eip7702(code) => { + buf.put_u8(1); + code.encode_value_to(buf); + } + reth::revm::state::Bytecode::LegacyAnalyzed(code) => { + buf.put_u8(0); + code.encode_value_to(buf); + } + } + } + + fn decode_value(data: &[u8]) -> Result + where + Self: Sized, + { + let ty = u8::decode_value(&data[..1])?; + let data = &data[1..]; + match ty { + 0 => { + let analyzed = LegacyAnalyzedBytecode::decode_value(data)?; + Ok(Bytecode(reth::revm::state::Bytecode::LegacyAnalyzed(analyzed))) + } + 1 => { + let eip7702 = Eip7702Bytecode::decode_value(data)?; + Ok(Bytecode(reth::revm::state::Bytecode::Eip7702(eip7702))) + } + _ => Err(DeserError::String(format!("Invalid Bytecode type value: {}. Max is 1.", ty))), + } + } +} + +impl ValSer for AccountBeforeTx { + fn encoded_size(&self) -> usize { + let AccountBeforeTx { address, info } = self; + by_props!( + @size + address, + info, + ) + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + let AccountBeforeTx { address, info } = self; + by_props!( + @encode buf; + address, + info, + ) + } + + fn decode_value(mut data: &[u8]) -> Result + where + Self: Sized, + { + let mut abt = AccountBeforeTx::default(); + let AccountBeforeTx { address, info } = &mut abt; + + by_props!( + @decode data; + address, + info, + ); + Ok(abt) + } +} + +impl ValSer for Signature { + fn encoded_size(&self) -> usize { + 65 + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + FixedBytes(self.as_bytes()).encode_value_to(buf); + } + + fn decode_value(data: &[u8]) -> Result + where + Self: Sized, + { + let bytes = FixedBytes::<65>::decode_value(data)?; + Self::from_raw_array(bytes.as_ref()) + .map_err(|e| DeserError::String(format!("Invalid signature bytes: {}", e))) + } +} + +impl ValSer for TxKind { + fn encoded_size(&self) -> usize { + 1 + match self { + TxKind::Create => 0, + TxKind::Call(_) => 20, + } + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + match self { + TxKind::Create => { + buf.put_u8(0); + } + TxKind::Call(address) => { + buf.put_u8(1); + address.encode_value_to(buf); + } + } + } + + fn decode_value(data: &[u8]) -> Result + where + Self: Sized, + { + let ty = u8::decode_value(&data[..1])?; + let data = &data[1..]; + match ty { + 0 => Ok(TxKind::Create), + 1 => { + let address = Address::decode_value(data)?; + Ok(TxKind::Call(address)) + } + _ => Err(DeserError::String(format!("Invalid TxKind type value: {}. Max is 1.", ty))), + } + } +} + +impl ValSer for AccessListItem { + fn encoded_size(&self) -> usize { + let AccessListItem { address, storage_keys } = self; + by_props!( + @size + address, + storage_keys, + ) + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + let AccessListItem { address, storage_keys } = self; + by_props!( + @encode buf; + address, + storage_keys, + ) + } + + fn decode_value(data: &[u8]) -> Result + where + Self: Sized, + { + let mut item = AccessListItem::default(); + let AccessListItem { address, storage_keys } = &mut item; + + let mut data = data; + by_props!( + @decode data; + address, + storage_keys, + ); + Ok(item) + } +} + +impl ValSer for AccessList { + fn encoded_size(&self) -> usize { + self.0.encoded_size() + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + self.0.encode_value_to(buf); + } + + fn decode_value(data: &[u8]) -> Result + where + Self: Sized, + { + Vec::::decode_value(data).map(AccessList) + } +} + +impl ValSer for Authorization { + fn encoded_size(&self) -> usize { + let Authorization { chain_id, address, nonce } = self; + by_props!( + @size + chain_id, + address, + nonce, + ) + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + let Authorization { chain_id, address, nonce } = self; + by_props!( + @encode buf; + chain_id, + address, + nonce, + ) + } + + fn decode_value(data: &[u8]) -> Result + where + Self: Sized, + { + let mut auth = Authorization { chain_id: U256::ZERO, address: Address::ZERO, nonce: 0 }; + let Authorization { chain_id, address, nonce } = &mut auth; + + let mut data = data; + by_props!( + @decode data; + chain_id, + address, + nonce, + ); + Ok(auth) + } +} + +impl ValSer for SignedAuthorization { + fn encoded_size(&self) -> usize { + let auth = self.inner(); + let y_parity = self.y_parity(); + let r = self.r(); + let s = self.s(); + by_props!( + @size + auth, + y_parity, + r, + s, + ) + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + let auth = self.inner(); + let y_parity = &self.y_parity(); + let r = &self.r(); + let s = &self.s(); + by_props!( + @encode buf; + auth, + y_parity, + r, + s, + ) + } + + fn decode_value(mut data: &[u8]) -> Result + where + Self: Sized, + { + let mut auth = Authorization { chain_id: U256::ZERO, address: Address::ZERO, nonce: 0 }; + let mut y_parity = 0u8; + let mut r = U256::ZERO; + let mut s = U256::ZERO; + + let ap = &mut auth; + let yp = &mut y_parity; + let rr = &mut r; + let ss = &mut s; + + by_props!( + @decode data; + ap, + yp, + rr, + ss, + ); + Ok(SignedAuthorization::new_unchecked(auth, y_parity, r, s)) + } +} + +impl ValSer for TxLegacy { + fn encoded_size(&self) -> usize { + let TxLegacy { chain_id, nonce, gas_price, gas_limit, to, value, input } = self; + by_props!( + @size + chain_id, + nonce, + gas_price, + gas_limit, + to, + value, + input, + ) + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + let TxLegacy { chain_id, nonce, gas_price, gas_limit, to, value, input } = self; + by_props!( + @encode buf; + chain_id, + nonce, + gas_price, + gas_limit, + to, + value, + input, + ) + } + + fn decode_value(mut data: &[u8]) -> Result + where + Self: Sized, + { + let mut tx = TxLegacy::default(); + let TxLegacy { chain_id, nonce, gas_price, gas_limit, to, value, input } = &mut tx; + + by_props!( + @decode data; + chain_id, + nonce, + gas_price, + gas_limit, + to, + value, + input, + ); + Ok(tx) + } +} + +impl ValSer for TxEip2930 { + fn encoded_size(&self) -> usize { + let TxEip2930 { chain_id, nonce, gas_price, gas_limit, to, value, input, access_list } = + self; + by_props!( + @size + chain_id, + nonce, + gas_price, + gas_limit, + to, + value, + input, + access_list, + ) + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + let TxEip2930 { chain_id, nonce, gas_price, gas_limit, to, value, input, access_list } = + self; + by_props!( + @encode buf; + chain_id, + nonce, + gas_price, + gas_limit, + to, + value, + input, + access_list, + ) + } + + fn decode_value(mut data: &[u8]) -> Result + where + Self: Sized, + { + let mut tx = TxEip2930::default(); + let TxEip2930 { chain_id, nonce, gas_price, gas_limit, to, value, input, access_list } = + &mut tx; + + by_props!( + @decode data; + chain_id, + nonce, + gas_price, + gas_limit, + to, + value, + input, + access_list, + ); + Ok(tx) + } +} + +impl ValSer for TxEip1559 { + fn encoded_size(&self) -> usize { + let TxEip1559 { + chain_id, + nonce, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + to, + value, + access_list, + input, + } = self; + by_props!( + @size + chain_id, + nonce, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + to, + value, + access_list, + input + ) + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + let TxEip1559 { + chain_id, + nonce, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + to, + value, + access_list, + input, + } = self; + by_props!( + @encode buf; + chain_id, + nonce, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + to, + value, + access_list, + input + ) + } + + fn decode_value(data: &[u8]) -> Result + where + Self: Sized, + { + let mut tx = TxEip1559::default(); + let TxEip1559 { + chain_id, + nonce, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + to, + value, + access_list, + input, + } = &mut tx; + + let mut data = data; + by_props!( + @decode data; + chain_id, + nonce, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + to, + value, + access_list, + input + ); + Ok(tx) + } +} + +impl ValSer for TxEip4844 { + fn encoded_size(&self) -> usize { + let TxEip4844 { + chain_id, + nonce, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + to, + value, + access_list, + blob_versioned_hashes, + max_fee_per_blob_gas, + input, + } = self; + by_props!( + @size + chain_id, + nonce, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + to, + value, + access_list, + blob_versioned_hashes, + max_fee_per_blob_gas, + input, + ) + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + let TxEip4844 { + chain_id, + nonce, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + to, + value, + access_list, + blob_versioned_hashes, + max_fee_per_blob_gas, + input, + } = self; + by_props!( + @encode buf; + chain_id, + nonce, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + to, + value, + access_list, + blob_versioned_hashes, + max_fee_per_blob_gas, + input, + ) + } + + fn decode_value(data: &[u8]) -> Result + where + Self: Sized, + { + let mut tx = TxEip4844::default(); + let TxEip4844 { + chain_id, + nonce, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + to, + value, + access_list, + blob_versioned_hashes, + max_fee_per_blob_gas, + input, + } = &mut tx; + + let mut data = data; + by_props!( + @decode data; + chain_id, + nonce, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + to, + value, + access_list, + blob_versioned_hashes, + max_fee_per_blob_gas, + input, + ); + Ok(tx) + } +} + +impl ValSer for TxEip7702 { + fn encoded_size(&self) -> usize { + let TxEip7702 { + chain_id, + nonce, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + to, + value, + access_list, + authorization_list, + input, + } = self; + by_props!( + @size + chain_id, + nonce, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + to, + value, + access_list, + authorization_list, + input, + ) + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + let TxEip7702 { + chain_id, + nonce, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + to, + value, + access_list, + authorization_list, + input, + } = self; + by_props!( + @encode buf; + chain_id, + nonce, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + to, + value, + access_list, + authorization_list, + input, + ) + } + + fn decode_value(mut data: &[u8]) -> Result + where + Self: Sized, + { + let mut tx = TxEip7702::default(); + let TxEip7702 { + chain_id, + nonce, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + to, + value, + access_list, + authorization_list, + input, + } = &mut tx; + by_props!( + @decode data; + chain_id, + nonce, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + to, + value, + access_list, + authorization_list, + input, + ); + Ok(tx) + } +} + +impl ValSer for Signed +where + T: ValSer, + Sig: ValSer, +{ + fn encoded_size(&self) -> usize { + self.signature().encoded_size() + self.tx().encoded_size() + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + self.signature().encode_value_to(buf); + self.tx().encode_value_to(buf); + } + + fn decode_value(data: &[u8]) -> Result + where + Self: Sized, + { + let mut data = data; + + let signature = Sig::decode_value(data)?; + data = &data[signature.encoded_size()..]; + + let tx = T::decode_value(data)?; + + Ok(Signed::new_unhashed(tx, signature)) + } +} + +impl ValSer for TransactionSigned { + fn encoded_size(&self) -> usize { + self.tx_type().encoded_size() + + match self { + EthereumTxEnvelope::Legacy(signed) => signed.encoded_size(), + EthereumTxEnvelope::Eip2930(signed) => signed.encoded_size(), + EthereumTxEnvelope::Eip1559(signed) => signed.encoded_size(), + EthereumTxEnvelope::Eip4844(signed) => signed.encoded_size(), + EthereumTxEnvelope::Eip7702(signed) => signed.encoded_size(), + } + } + + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>, + { + self.tx_type().encode_value_to(buf); + match self { + EthereumTxEnvelope::Legacy(signed) => { + signed.encode_value_to(buf); + } + EthereumTxEnvelope::Eip2930(signed) => { + signed.encode_value_to(buf); + } + EthereumTxEnvelope::Eip1559(signed) => { + signed.encode_value_to(buf); + } + EthereumTxEnvelope::Eip4844(signed) => { + signed.encode_value_to(buf); + } + EthereumTxEnvelope::Eip7702(signed) => { + signed.encode_value_to(buf); + } + } + } + + fn decode_value(data: &[u8]) -> Result + where + Self: Sized, + { + let ty = TxType::decode_value(data)?; + let data = &data[ty.encoded_size()..]; + match ty { + TxType::Legacy => ValSer::decode_value(data).map(EthereumTxEnvelope::Legacy), + TxType::Eip2930 => ValSer::decode_value(data).map(EthereumTxEnvelope::Eip2930), + TxType::Eip1559 => ValSer::decode_value(data).map(EthereumTxEnvelope::Eip1559), + TxType::Eip4844 => ValSer::decode_value(data).map(EthereumTxEnvelope::Eip4844), + TxType::Eip7702 => ValSer::decode_value(data).map(EthereumTxEnvelope::Eip7702), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloy::primitives::{ + Address, B256, Bloom, Bytes as AlloBytes, Signature, TxKind, U256, keccak256, + }; + use alloy::{ + consensus::{TxEip1559, TxEip2930, TxEip4844, TxEip7702, TxLegacy}, + eips::{ + eip2930::{AccessList, AccessListItem}, + eip7702::{Authorization, SignedAuthorization}, + }, + }; + use reth::primitives::{Account, Header, Log, LogData, TxType}; + use reth::revm::bytecode::JumpTable; + use reth_db_api::{BlockNumberList, models::StoredBlockBodyIndices}; + + /// Generic roundtrip test for any ValSer type + #[track_caller] + fn test_roundtrip(original: &T) + where + T: ValSer + PartialEq + std::fmt::Debug, + { + // Encode + let mut buf = bytes::BytesMut::new(); + original.encode_value_to(&mut buf); + let encoded = buf.freeze(); + + // Assert that the encoded size matches + assert_eq!( + original.encoded_size(), + encoded.len(), + "Encoded size mismatch: expected {}, got {}", + original.encoded_size(), + encoded.len() + ); + + // Decode + let decoded = T::decode_value(&encoded).expect("Failed to decode value"); + + // Assert equality + assert_eq!(*original, decoded, "Roundtrip failed"); + } + + #[test] + fn test_blocknumberlist_roundtrips() { + // Empty list + test_roundtrip(&BlockNumberList::empty()); + + // Single item + let mut single = BlockNumberList::empty(); + single.push(42u64).unwrap(); + test_roundtrip(&single); + + // Multiple items + let mut multiple = BlockNumberList::empty(); + for i in [0, 1, 255, 256, 65535, 65536, u64::MAX] { + multiple.push(i).unwrap(); + } + test_roundtrip(&multiple); + } + + #[test] + fn test_account_roundtrips() { + // Default account + test_roundtrip(&Account::default()); + + // Account with values + let account = Account { + nonce: 42, + balance: U256::from(123456789u64), + bytecode_hash: Some(keccak256(b"hello world")), + }; + test_roundtrip(&account); + + // Account with max values + let max_account = Account { + nonce: u64::MAX, + balance: U256::MAX, + bytecode_hash: Some(B256::from([0xFF; 32])), + }; + test_roundtrip(&max_account); + } + + #[test] + fn test_header_roundtrips() { + // Default header + test_roundtrip(&Header::default()); + + // Header with some values + let header = Header { + number: 12345, + gas_limit: 8000000, + timestamp: 1234567890, + difficulty: U256::from(1000000u64), + ..Default::default() + }; + test_roundtrip(&header); + } + + #[test] + fn test_logdata_roundtrips() { + // Empty log data + test_roundtrip(&LogData::new_unchecked(vec![], AlloBytes::new())); + + // Log data with one topic + test_roundtrip(&LogData::new_unchecked( + vec![B256::from([1; 32])], + AlloBytes::from_static(b"hello"), + )); + + // Log data with multiple topics + test_roundtrip(&LogData::new_unchecked( + vec![ + B256::from([1; 32]), + B256::from([2; 32]), + B256::from([3; 32]), + B256::from([4; 32]), + ], + AlloBytes::from_static(b"world"), + )); + } + + #[test] + fn test_log_roundtrips() { + let log_data = LogData::new_unchecked( + vec![B256::from([1; 32]), B256::from([2; 32])], + AlloBytes::from_static(b"test log data"), + ); + let log = Log { address: Address::from([0x42; 20]), data: log_data }; + test_roundtrip(&log); + } + + #[test] + fn test_txtype_roundtrips() { + test_roundtrip(&TxType::Legacy); + test_roundtrip(&TxType::Eip2930); + test_roundtrip(&TxType::Eip1559); + test_roundtrip(&TxType::Eip4844); + test_roundtrip(&TxType::Eip7702); + } + + #[test] + fn test_stored_block_body_indices_roundtrips() { + test_roundtrip(&StoredBlockBodyIndices { first_tx_num: 0, tx_count: 0 }); + + test_roundtrip(&StoredBlockBodyIndices { first_tx_num: 12345, tx_count: 67890 }); + + test_roundtrip(&StoredBlockBodyIndices { first_tx_num: u64::MAX, tx_count: u64::MAX }); + } + + #[test] + fn test_signature_roundtrips() { + test_roundtrip(&Signature::test_signature()); + + // Zero signature + let zero_sig = Signature::new(U256::ZERO, U256::ZERO, false); + test_roundtrip(&zero_sig); + + // Max signature + let max_sig = Signature::new(U256::MAX, U256::MAX, true); + test_roundtrip(&max_sig); + } + + #[test] + fn test_txkind_roundtrips() { + test_roundtrip(&TxKind::Create); + test_roundtrip(&TxKind::Call(Address::ZERO)); + test_roundtrip(&TxKind::Call(Address::from([0xFF; 20]))); + } + + #[test] + fn test_accesslist_roundtrips() { + // Empty access list + test_roundtrip(&AccessList::default()); + + // Access list with one item + let item = AccessListItem { + address: Address::from([0x12; 20]), + storage_keys: vec![B256::from([0x34; 32])], + }; + test_roundtrip(&AccessList(vec![item])); + + // Access list with multiple items and keys + let items = vec![ + AccessListItem { + address: Address::repeat_byte(11), + storage_keys: vec![B256::from([0x22; 32]), B256::from([0x33; 32])], + }, + AccessListItem { address: Address::from([0x44; 20]), storage_keys: vec![] }, + AccessListItem { + address: Address::from([0x55; 20]), + storage_keys: vec![B256::from([0x66; 32])], + }, + ]; + test_roundtrip(&AccessList(items)); + } + + #[test] + fn test_authorization_roundtrips() { + test_roundtrip(&Authorization { + chain_id: U256::from(1u64), + address: Address::repeat_byte(11), + nonce: 0, + }); + + test_roundtrip(&Authorization { + chain_id: U256::MAX, + address: Address::from([0xFF; 20]), + nonce: u64::MAX, + }); + } + + #[test] + fn test_signed_authorization_roundtrips() { + let auth = Authorization { + chain_id: U256::from(1u64), + address: Address::repeat_byte(11), + nonce: 42, + }; + let signed_auth = + SignedAuthorization::new_unchecked(auth, 1, U256::from(12345u64), U256::from(67890u64)); + test_roundtrip(&signed_auth); + } + + #[test] + fn test_tx_legacy_roundtrips() { + test_roundtrip(&TxLegacy::default()); + + let tx = TxLegacy { + chain_id: Some(1), + nonce: 42, + gas_price: 20_000_000_000, + gas_limit: 21000u64, + to: TxKind::Call(Address::repeat_byte(11)), + value: U256::from(1000000000000000000u64), // 1 ETH in wei + input: AlloBytes::from_static(b"hello world"), + }; + test_roundtrip(&tx); + } + + #[test] + fn test_tx_eip2930_roundtrips() { + test_roundtrip(&TxEip2930::default()); + + let access_list = AccessList(vec![AccessListItem { + address: Address::from([0x22; 20]), + storage_keys: vec![B256::from([0x33; 32])], + }]); + + let tx = TxEip2930 { + chain_id: 1, + nonce: 42, + gas_price: 20_000_000_000, + gas_limit: 21000u64, + to: TxKind::Call(Address::repeat_byte(11)), + value: U256::from(1000000000000000000u64), + input: AlloBytes::from_static(b"eip2930 tx"), + access_list, + }; + test_roundtrip(&tx); + } + + #[test] + fn test_tx_eip1559_roundtrips() { + test_roundtrip(&TxEip1559::default()); + + let tx = TxEip1559 { + chain_id: 1, + nonce: 42, + gas_limit: 21000u64, + max_fee_per_gas: 30_000_000_000, + max_priority_fee_per_gas: 2_000_000_000, + to: TxKind::Call(Address::repeat_byte(11)), + value: U256::from(1000000000000000000u64), + input: AlloBytes::from_static(b"eip1559 tx"), + access_list: AccessList::default(), + }; + test_roundtrip(&tx); + } + + #[test] + fn test_tx_eip4844_roundtrips() { + test_roundtrip(&TxEip4844::default()); + + let tx = TxEip4844 { + chain_id: 1, + nonce: 42, + gas_limit: 21000u64, + max_fee_per_gas: 30_000_000_000, + max_priority_fee_per_gas: 2_000_000_000, + to: Address::repeat_byte(11), + value: U256::from(1000000000000000000u64), + input: AlloBytes::from_static(b"eip4844 tx"), + access_list: AccessList::default(), + blob_versioned_hashes: vec![B256::from([0x44; 32])], + max_fee_per_blob_gas: 1_000_000, + }; + test_roundtrip(&tx); + } + + #[test] + fn test_tx_eip7702_roundtrips() { + test_roundtrip(&TxEip7702::default()); + + let auth = SignedAuthorization::new_unchecked( + Authorization { + chain_id: U256::from(1u64), + address: Address::from([0x77; 20]), + nonce: 0, + }, + 1, + U256::from(12345u64), + U256::from(67890u64), + ); + + let tx = TxEip7702 { + chain_id: 1, + nonce: 42, + gas_limit: 21000u64, + max_fee_per_gas: 30_000_000_000, + max_priority_fee_per_gas: 2_000_000_000, + to: Address::repeat_byte(11), + value: U256::from(1000000000000000000u64), + input: AlloBytes::from_static(b"eip7702 tx"), + access_list: AccessList::default(), + authorization_list: vec![auth], + }; + test_roundtrip(&tx); + } + + #[test] + fn test_jump_table_roundtrips() { + // Empty jump table + test_roundtrip(&JumpTable::default()); + + // Jump table with some jumps + let jump_table = JumpTable::from_slice(&[0b10101010, 0b01010101], 16); + test_roundtrip(&jump_table); + } + + #[test] + fn test_complex_combinations() { + // Test a complex Header with all fields populated + let header = Header { + number: 12345, + gas_limit: 8000000, + timestamp: 1234567890, + difficulty: U256::from(1000000u64), + parent_hash: keccak256(b"parent"), + ommers_hash: keccak256(b"ommers"), + beneficiary: Address::from([0xBE; 20]), + state_root: keccak256(b"state"), + transactions_root: keccak256(b"txs"), + receipts_root: keccak256(b"receipts"), + logs_bloom: Bloom::default(), + gas_used: 7999999, + mix_hash: keccak256(b"mix"), + nonce: [0x42; 8].into(), + extra_data: AlloBytes::from_static(b"extra data"), + base_fee_per_gas: Some(1000000000), + withdrawals_root: Some(keccak256(b"withdrawals_root")), + blob_gas_used: Some(500000), + excess_blob_gas: Some(10000), + parent_beacon_block_root: Some(keccak256(b"parent_beacon_block_root")), + requests_hash: Some(keccak256(b"requests_hash")), + }; + test_roundtrip(&header); + + // Test a complex EIP-1559 transaction + let access_list = AccessList(vec![ + AccessListItem { + address: Address::repeat_byte(11), + storage_keys: vec![B256::from([0x22; 32]), B256::from([0x33; 32])], + }, + AccessListItem { address: Address::from([0x44; 20]), storage_keys: vec![] }, + ]); + + let complex_tx = TxEip1559 { + chain_id: 1, + nonce: 123456, + gas_limit: 500000u64, + max_fee_per_gas: 50_000_000_000, + max_priority_fee_per_gas: 3_000_000_000, + to: TxKind::Create, + value: U256::ZERO, + input: AlloBytes::copy_from_slice(&[0xFF; 1000]), // Large input + access_list, + }; + test_roundtrip(&complex_tx); + } + + #[test] + fn test_edge_cases() { + // Very large access list + let large_storage_keys: Vec = + (0..1000).map(|i| B256::from(U256::from(i).to_be_bytes::<32>())).collect(); + let large_access_list = AccessList(vec![AccessListItem { + address: Address::from([0xAA; 20]), + storage_keys: large_storage_keys, + }]); + test_roundtrip(&large_access_list); + + // Transaction with maximum values + let max_tx = TxEip1559 { + chain_id: u64::MAX, + nonce: u64::MAX, + gas_limit: u64::MAX, + max_fee_per_gas: u128::MAX, + max_priority_fee_per_gas: u128::MAX, + to: TxKind::Call(Address::repeat_byte(0xFF)), + value: U256::MAX, + input: AlloBytes::copy_from_slice(&[0xFF; 10000]), // Very large input + access_list: AccessList::default(), + }; + test_roundtrip(&max_tx); + + // BlockNumberList with many numbers + let mut large_list = BlockNumberList::empty(); + for i in 0..10000u64 { + large_list.push(i).unwrap(); + } + test_roundtrip(&large_list); + } + + // KeySer Tests + // ============ + + /// Generic roundtrip test for any KeySer type + #[track_caller] + fn test_key_roundtrip(original: &T) + where + T: KeySer + PartialEq + std::fmt::Debug, + { + let mut buf = [0u8; MAX_KEY_SIZE]; + let encoded = original.encode_key(&mut buf); + + // Assert that the encoded size matches the const SIZE + assert_eq!( + encoded.len(), + T::SIZE, + "Encoded key length mismatch: expected {}, got {}", + T::SIZE, + encoded.len() + ); + + // Decode and verify + let decoded = T::decode_key(encoded).expect("Failed to decode key"); + assert_eq!(*original, decoded, "Key roundtrip failed"); + } + + /// Test ordering preservation for KeySer types + #[track_caller] + fn test_key_ordering(keys: &[T]) + where + T: KeySer + Ord + std::fmt::Debug + Clone, + { + let mut sorted_keys = keys.to_vec(); + sorted_keys.sort(); + + let mut encoded_keys: Vec<_> = sorted_keys + .iter() + .map(|k| { + let mut buf = [0u8; MAX_KEY_SIZE]; + let encoded = k.encode_key(&mut buf); + encoded.to_vec() + }) + .collect(); + + // Check that encoded keys are also sorted lexicographically + let original_encoded = encoded_keys.clone(); + encoded_keys.sort(); + + assert_eq!(original_encoded, encoded_keys, "Key encoding does not preserve ordering"); + } + + #[test] + fn test_sharded_key_b256_roundtrips() { + // Test with B256 keys + let key1 = ShardedKey { key: B256::ZERO, highest_block_number: 0 }; + test_key_roundtrip(&key1); + + let key2 = ShardedKey { key: B256::repeat_byte(0xFF), highest_block_number: u64::MAX }; + test_key_roundtrip(&key2); + + let key3 = ShardedKey { + key: B256::from([ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, + 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, + 0x1D, 0x1E, 0x1F, 0x20, + ]), + highest_block_number: 12345, + }; + test_key_roundtrip(&key3); + } + + #[test] + fn test_sharded_key_address_roundtrips() { + // Test with Address keys + let key1 = ShardedKey { key: Address::ZERO, highest_block_number: 0 }; + test_key_roundtrip(&key1); + + let key2 = ShardedKey { key: Address::repeat_byte(0xFF), highest_block_number: u64::MAX }; + test_key_roundtrip(&key2); + + let key3 = ShardedKey { + key: Address::from([ + 0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0, 0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, + 0xDE, 0xF0, 0x12, 0x34, 0x56, 0x78, + ]), + highest_block_number: 9876543210, + }; + test_key_roundtrip(&key3); + } + + #[test] + fn test_sharded_key_b256_ordering() { + let keys = vec![ + ShardedKey { key: B256::ZERO, highest_block_number: 0 }, + ShardedKey { key: B256::ZERO, highest_block_number: 1 }, + ShardedKey { key: B256::ZERO, highest_block_number: u64::MAX }, + ShardedKey { key: B256::from([0x01; 32]), highest_block_number: 0 }, + ShardedKey { key: B256::from([0x01; 32]), highest_block_number: 1 }, + ShardedKey { key: B256::repeat_byte(0xFF), highest_block_number: 0 }, + ShardedKey { key: B256::repeat_byte(0xFF), highest_block_number: u64::MAX }, + ]; + test_key_ordering(&keys); + } + + #[test] + fn test_sharded_key_address_ordering() { + let keys = vec![ + ShardedKey { key: Address::ZERO, highest_block_number: 0 }, + ShardedKey { key: Address::ZERO, highest_block_number: 1 }, + ShardedKey { key: Address::ZERO, highest_block_number: u64::MAX }, + ShardedKey { key: Address::from([0x01; 20]), highest_block_number: 0 }, + ShardedKey { key: Address::from([0x01; 20]), highest_block_number: 1 }, + ShardedKey { key: Address::repeat_byte(0xFF), highest_block_number: 0 }, + ShardedKey { key: Address::repeat_byte(0xFF), highest_block_number: u64::MAX }, + ]; + test_key_ordering(&keys); + } + + #[test] + fn test_key_decode_insufficient_data() { + // Test ShardedKey with insufficient data + let short_data = [0u8; 10]; // Much smaller than required + + match ShardedKey::::decode_key(&short_data) { + Err(DeserError::InsufficientData { needed, available }) => { + assert_eq!(needed, ShardedKey::::SIZE); + assert_eq!(available, 10); + } + other => panic!("Expected InsufficientData error, got: {:?}", other), + } + + // Test ShardedKey
with insufficient data + match ShardedKey::
::decode_key(&short_data) { + Err(DeserError::InsufficientData { needed, available }) => { + assert_eq!(needed, ShardedKey::
::SIZE); + assert_eq!(available, 10); + } + other => panic!("Expected InsufficientData error, got: {:?}", other), + } + } + + #[test] + fn test_key_encode_decode_boundary_values() { + // Test boundary values for block numbers + let boundary_keys = vec![ + ShardedKey { key: B256::ZERO, highest_block_number: 0 }, + ShardedKey { key: B256::ZERO, highest_block_number: 1 }, + ShardedKey { key: B256::ZERO, highest_block_number: u64::MAX - 1 }, + ShardedKey { key: B256::ZERO, highest_block_number: u64::MAX }, + ]; + + for key in &boundary_keys { + test_key_roundtrip(key); + } + + // Test that ordering is preserved across boundaries + test_key_ordering(&boundary_keys); + } +} diff --git a/crates/storage/src/ser/traits.rs b/crates/storage/src/ser/traits.rs new file mode 100644 index 0000000..7d3fcb4 --- /dev/null +++ b/crates/storage/src/ser/traits.rs @@ -0,0 +1,117 @@ +use crate::ser::error::DeserError; +use alloy::primitives::Bytes; + +/// Maximum allowed key size in bytes. +pub const MAX_KEY_SIZE: usize = 64; + +/// Trait for key serialization with fixed-size keys of size no greater than 32 +/// bytes. +/// +/// Keys must be FIXED SIZE, of size no greater than `MAX_KEY_SIZE` (64), and +/// no less than 1. The serialization must preserve ordering, i.e., for any two +/// keys `k1` and `k2`, if `k1 > k2`, then the byte representation of `k1` +/// must be lexicographically greater than that of `k2`. +/// +/// In practice, keys are often hashes, addresses, numbers, or composites +/// of these. +pub trait KeySer: PartialOrd + Ord + Sized + Clone + core::fmt::Debug { + /// The fixed size of the serialized key in bytes. + /// Must satisfy `SIZE <= MAX_KEY_SIZE`. + const SIZE: usize; + + /// Compile-time assertion to ensure SIZE is within limits. + #[doc(hidden)] + const ASSERT: () = { + assert!( + Self::SIZE <= MAX_KEY_SIZE, + "KeySer implementations must have SIZE <= MAX_KEY_SIZE" + ); + assert!(Self::SIZE > 0, "KeySer implementations must have SIZE > 0"); + }; + + /// Encode the key, optionally using the provided buffer. + /// + /// # Returns + /// + /// A slice containing the encoded key. This may be a slice of `buf`, or may + /// be borrowed from the key itself. This slice must be <= `SIZE` bytes. + fn encode_key<'a: 'c, 'b: 'c, 'c>(&'a self, buf: &'b mut [u8; MAX_KEY_SIZE]) -> &'c [u8]; + + /// Decode a key from a byte slice. + /// + /// # Arguments + /// * `data` - Exactly `SIZE` bytes to decode from. + /// + /// # Errors + /// Returns an error if `data.len() != SIZE` or decoding fails. + fn decode_key(data: &[u8]) -> Result; + + /// Decode an optional key from an optional byte slice. + /// + /// Useful in DB decoding, where the absence of a key is represented by + /// `None`. + fn maybe_decode_key(data: Option<&[u8]>) -> Result, DeserError> { + match data { + Some(d) => Ok(Some(Self::decode_key(d)?)), + None => Ok(None), + } + } +} + +/// Trait for value serialization. +/// +/// Values can be of variable size, but must implement accurate size reporting. +/// When serialized, value sizes must be self-describing. I.e. the value must +/// tolerate being deserialized from a byte slice of arbitrary length, consuming +/// only as many bytes as needed. +/// +/// E.g. a correct implementation for an array serializes the length of the +/// array first, so that the deserializer knows how many items to expect. +pub trait ValSer { + /// The encoded size of the value in bytes. This MUST be accurate, as it is + /// used to allocate buffers for serialization. Inaccurate sizes may result + /// in panics or incorrect behavior. + fn encoded_size(&self) -> usize; + + /// Serialize the value into bytes. + fn encode_value_to(&self, buf: &mut B) + where + B: bytes::BufMut + AsMut<[u8]>; + + /// Serialize the value into bytes and return them. + fn encoded(&self) -> Bytes { + let mut buf = bytes::BytesMut::new(); + self.encode_value_to(&mut buf); + buf.freeze().into() + } + + /// Deserialize the value from bytes. + fn decode_value(data: &[u8]) -> Result + where + Self: Sized; + + /// Deserialize an optional value from an optional byte slice. + /// + /// Useful in DB decoding, where the absence of a value is represented by + /// `None`. + fn maybe_decode_value(data: Option<&[u8]>) -> Result, DeserError> + where + Self: Sized, + { + match data { + Some(d) => Ok(Some(Self::decode_value(d)?)), + None => Ok(None), + } + } + + /// Deserialize the value from bytes, ensuring all bytes are consumed. + fn decode_value_exact(data: &[u8]) -> Result + where + Self: Sized, + { + let val = Self::decode_value(data)?; + (val.encoded_size() == data.len()) + .then_some(val) + .ok_or(DeserError::InexactDeser { extra_bytes: data.len() }) + } +} diff --git a/crates/storage/src/tables/cold.rs b/crates/storage/src/tables/cold.rs new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/crates/storage/src/tables/cold.rs @@ -0,0 +1 @@ + diff --git a/crates/storage/src/tables/hot.rs b/crates/storage/src/tables/hot.rs new file mode 100644 index 0000000..7e306ea --- /dev/null +++ b/crates/storage/src/tables/hot.rs @@ -0,0 +1,49 @@ +use alloy::primitives::{Address, B256, BlockNumber, U256}; +use reth::primitives::{Account, Bytecode, Header}; +use reth_db::models::BlockNumberAddress; +use reth_db_api::{BlockNumberList, models::ShardedKey}; + +table! { + /// Records recent block Headers, by their number. + Headers Header> +} + +table! { + /// Records block numbers by hash. + HeaderNumbers BlockNumber> +} + +table! { + /// Records contract Bytecode, by its hash. + Bytecodes Bytecode> +} + +table! { + /// Records plain account states, keyed by address. + PlainAccountState
Account> +} + +table! { + /// Records plain storage states, keyed by address and storage key. + PlainStorageState
B256 => U256> is 32 +} + +table! { + /// Records account state change history, keyed by address. + AccountsHistory
u64 => BlockNumberList> +} + +table! { + /// Records account states before transactions, keyed by (block_number, address). + AccountChangeSets Address => Account> is 96 +} + +table! { + /// Records storage state change history, keyed by address and storage key. + StorageHistory
ShardedKey => BlockNumberList> +} + +table! { + /// Records account states before transactions, keyed by (address, block number). + StorageChangeSets B256 => U256> is 32 +} diff --git a/crates/storage/src/tables/macros.rs b/crates/storage/src/tables/macros.rs new file mode 100644 index 0000000..77cf8ce --- /dev/null +++ b/crates/storage/src/tables/macros.rs @@ -0,0 +1,74 @@ +macro_rules! table { + ( + @implement + #[doc = $doc:expr] + $name:ident, $key:ty, $value:ty, $dual:expr, $fixed:expr + ) => { + #[doc = $doc] + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] + pub struct $name; + + impl crate::tables::Table for $name { + const NAME: &'static str = stringify!($name); + const DUAL_KEY: bool = $dual; + const FIXED_VAL_SIZE: Option = $fixed; + + type Key = $key; + type Value = $value; + } + + }; + + ( + #[doc = $doc:expr] + $name:ident<$key:ty => $value:ty> + ) => { + table!(@implement + #[doc = $doc] + $name, + $key, + $value, + false, + None + ); + + impl crate::tables::SingleKey for $name {} + }; + + + ( + #[doc = $doc:expr] + $name:ident<$key:ty => $subkey:ty => $value:ty> + ) => { + table!(@implement + #[doc = $doc] + $name, + $key, + $value, + true, + None + ); + + impl crate::tables::DualKeyed for $name { + type Key2 = $subkey; + } + }; + + ( + #[doc = $doc:expr] + $name:ident<$key:ty => $subkey:ty => $value:ty> is $fixed:expr + ) => { + table!(@implement + #[doc = $doc] + $name, + $key, + $value, + true, + Some($fixed) + ); + + impl crate::tables::DualKeyed for $name { + type Key2 = $subkey; + } + }; +} diff --git a/crates/storage/src/tables/mod.rs b/crates/storage/src/tables/mod.rs new file mode 100644 index 0000000..f93dad5 --- /dev/null +++ b/crates/storage/src/tables/mod.rs @@ -0,0 +1,133 @@ +#[macro_use] +mod macros; + +/// Tables that are not hot. +pub mod cold; + +/// Tables that are hot, or conditionally hot. +pub mod hot; + +use crate::{ + hot::model::{DualKeyValue, KeyValue}, + ser::{DeserError, KeySer, ValSer}, +}; + +/// The maximum size of a dual key (in bytes). +pub const MAX_FIXED_VAL_SIZE: usize = 64; + +/// Trait for table definitions. +pub trait Table { + /// A short, human-readable name for the table. + const NAME: &'static str; + + /// Indicates that this table uses dual keys. + const DUAL_KEY: bool = false; + + /// True if the table is guaranteed to have fixed-size values, false + /// otherwise. + const FIXED_VAL_SIZE: Option = None; + + /// Indicates that this table has fixed-size values. + const IS_FIXED_VAL: bool = Self::FIXED_VAL_SIZE.is_some(); + + /// Compile-time assertions for the table. + #[doc(hidden)] + const ASSERT: () = { + // Ensure that fixed-size values do not exceed the maximum allowed size. + if let Some(size) = Self::FIXED_VAL_SIZE { + assert!(size <= MAX_FIXED_VAL_SIZE, "Fixed value size exceeds maximum allowed size"); + } + }; + + /// The key type. + type Key: KeySer; + /// The value type. + type Value: ValSer; + + /// Shortcut to decode a key. + fn decode_key(data: impl AsRef<[u8]>) -> Result { + ::decode_key(data.as_ref()) + } + + /// Shortcut to decode a value. + fn decode_value(data: impl AsRef<[u8]>) -> Result { + ::decode_value(data.as_ref()) + } + + /// Shortcut to decode a key-value pair. + fn decode_kv( + key_data: impl AsRef<[u8]>, + value_data: impl AsRef<[u8]>, + ) -> Result, DeserError> { + let key = Self::decode_key(key_data)?; + let value = Self::decode_value(value_data)?; + Ok((key, value)) + } + + /// Shortcut to decode a key-value tuple. + fn decode_kv_tuple( + data: (impl AsRef<[u8]>, impl AsRef<[u8]>), + ) -> Result, DeserError> { + Self::decode_kv(data.0, data.1) + } +} + +/// Trait for tables with a single key. +pub trait SingleKey: Table { + /// Compile-time assertions for the single-keyed table. + #[doc(hidden)] + const ASSERT: () = { + assert!(!Self::DUAL_KEY, "SingleKey tables must have DUAL_KEY = false"); + }; +} + +/// Trait for tables with two keys. +/// +/// This trait aims to capture tables that use a composite key made up of two +/// distinct parts. This is useful for representing (e.g.) dupsort or other +/// nested map optimizations. +pub trait DualKeyed: Table { + /// The second key type. + type Key2: KeySer; + + /// Compile-time assertions for the dual-keyed table. + #[doc(hidden)] + const ASSERT: () = { + assert!(Self::DUAL_KEY, "DualKeyed tables must have DUAL_KEY = true"); + }; + + /// Shortcut to decode the second key. + fn decode_key2(data: impl AsRef<[u8]>) -> Result { + ::decode_key(data.as_ref()) + } + + /// Shortcut to decode a prepended value. This is useful for some table + /// implementations. + fn decode_prepended_value( + data: impl AsRef<[u8]>, + ) -> Result<(Self::Key2, Self::Value), DeserError> { + let data = data.as_ref(); + let key = Self::decode_key2(&data[..Self::Key2::SIZE])?; + let value = Self::decode_value(&data[Self::Key2::SIZE..])?; + Ok((key, value)) + } + + /// Shortcut to decode a dual key-value triplet. + fn decode_kkv( + key1_data: impl AsRef<[u8]>, + key2_data: impl AsRef<[u8]>, + value_data: impl AsRef<[u8]>, + ) -> Result, DeserError> { + let key1 = Self::decode_key(key1_data)?; + let key2 = Self::decode_key2(key2_data)?; + let value = Self::decode_value(value_data)?; + Ok((key1, key2, value)) + } + + /// Shortcut to decode a dual key-value tuple. + fn decode_kkv_tuple( + data: (impl AsRef<[u8]>, impl AsRef<[u8]>, impl AsRef<[u8]>), + ) -> Result, DeserError> { + Self::decode_kkv(data.0, data.1, data.2) + } +}