From b2dff1b013e2178a6a296ec81d5fe07fd703a60e Mon Sep 17 00:00:00 2001 From: James Date: Wed, 11 Feb 2026 16:34:26 -0500 Subject: [PATCH 01/31] feat: add signet-rpc-storage crate scaffolding Add the foundational scaffolding for the signet-rpc-storage crate, which provides an Ethereum JSON-RPC server backed by signet-storage's unified storage backend, independent of reth's FullNodeComponents. This includes: - Workspace dependency additions (signet-storage, signet-cold, signet-hot, signet-storage-types) - StorageRpcCtx context struct with Arc pattern - BlockTags atomic block tag tracker for Latest/Safe/Finalized - Block ID and block tag resolution utilities - Stub eth module (endpoints to be added in follow-up) Co-Authored-By: Claude Opus 4.6 --- Cargo.toml | 5 ++ crates/rpc-storage/Cargo.toml | 29 +++++++ crates/rpc-storage/README.md | 16 ++++ crates/rpc-storage/src/ctx.rs | 125 +++++++++++++++++++++++++++++ crates/rpc-storage/src/eth.rs | 26 ++++++ crates/rpc-storage/src/lib.rs | 31 +++++++ crates/rpc-storage/src/resolve.rs | 129 ++++++++++++++++++++++++++++++ 7 files changed, 361 insertions(+) create mode 100644 crates/rpc-storage/Cargo.toml create mode 100644 crates/rpc-storage/README.md create mode 100644 crates/rpc-storage/src/ctx.rs create mode 100644 crates/rpc-storage/src/eth.rs create mode 100644 crates/rpc-storage/src/lib.rs create mode 100644 crates/rpc-storage/src/resolve.rs diff --git a/Cargo.toml b/Cargo.toml index e98d70b..272cd74 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,6 +43,7 @@ signet-node-config = { version = "0.16.0-rc.7", path = "crates/node-config" } signet-node-tests = { version = "0.16.0-rc.7", path = "crates/node-tests" } signet-node-types = { version = "0.16.0-rc.7", path = "crates/node-types" } signet-rpc = { version = "0.16.0-rc.7", path = "crates/rpc" } +signet-rpc-storage = { version = "0.16.0-rc.7", path = "crates/rpc-storage" } init4-bin-base = { version = "0.18.0-rc.8", features = ["alloy"] } @@ -55,6 +56,10 @@ signet-tx-cache = "0.16.0-rc.8" signet-types = "0.16.0-rc.8" signet-zenith = "0.16.0-rc.8" signet-journal = "0.16.0-rc.8" +signet-storage = "0.1.0" +signet-cold = "0.1.0" +signet-hot = "0.1.0" +signet-storage-types = "0.1.0" # ajj ajj = { version = "0.3.4" } diff --git a/crates/rpc-storage/Cargo.toml b/crates/rpc-storage/Cargo.toml new file mode 100644 index 0000000..5c7431e --- /dev/null +++ b/crates/rpc-storage/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "signet-rpc-storage" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +authors.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "Ethereum JSON-RPC server backed by signet-storage" + +[dependencies] +signet-storage.workspace = true +signet-cold.workspace = true +signet-hot.workspace = true +signet-storage-types.workspace = true +signet-evm.workspace = true +trevm = { workspace = true, features = ["call", "estimate_gas"] } +signet-types.workspace = true +signet-tx-cache.workspace = true +alloy.workspace = true +ajj.workspace = true +tokio.workspace = true +tracing.workspace = true +thiserror.workspace = true +serde.workspace = true + +[dev-dependencies] +tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } diff --git a/crates/rpc-storage/README.md b/crates/rpc-storage/README.md new file mode 100644 index 0000000..ec3247e --- /dev/null +++ b/crates/rpc-storage/README.md @@ -0,0 +1,16 @@ +# signet-rpc-storage + +Ethereum JSON-RPC server backed by `signet-storage`'s unified storage backend. + +This crate provides a standalone ETH RPC implementation that uses hot storage +for state queries and cold storage for block, transaction, and receipt data. +Unlike `signet-rpc`, it does not depend on reth's `FullNodeComponents`. + +## Supported Methods + +- Block queries: `eth_blockNumber`, `eth_getBlockByHash`, `eth_getBlockByNumber`, etc. +- Transaction queries: `eth_getTransactionByHash`, `eth_getTransactionReceipt`, etc. +- Account state: `eth_getBalance`, `eth_getStorageAt`, `eth_getCode`, `eth_getTransactionCount` +- EVM execution: `eth_call`, `eth_estimateGas` +- Logs: `eth_getLogs` +- Transaction submission: `eth_sendRawTransaction` (optional, via `TxCache`) diff --git a/crates/rpc-storage/src/ctx.rs b/crates/rpc-storage/src/ctx.rs new file mode 100644 index 0000000..c4db8f4 --- /dev/null +++ b/crates/rpc-storage/src/ctx.rs @@ -0,0 +1,125 @@ +//! RPC context wrapping [`UnifiedStorage`]. + +use crate::resolve::BlockTags; +use signet_cold::ColdStorageReadHandle; +use signet_hot::HotKv; +use signet_hot::model::{HotKvRead, RevmRead}; +use signet_storage::UnifiedStorage; +use signet_tx_cache::TxCache; +use signet_types::constants::SignetSystemConstants; +use std::sync::Arc; +use trevm::revm::database::DBErrorMarker; +use trevm::revm::database::StateBuilder; + +/// RPC context backed by [`UnifiedStorage`]. +/// +/// Provides access to hot storage (state), cold storage (blocks/txs/receipts), +/// block tag resolution, and optional transaction forwarding. +/// +/// # Construction +/// +/// ```ignore +/// let ctx = StorageRpcCtx::new(storage, constants, tags, Some(tx_cache), 30_000_000); +/// ``` +#[derive(Debug)] +pub struct StorageRpcCtx { + inner: Arc>, +} + +impl Clone for StorageRpcCtx { + fn clone(&self) -> Self { + Self { inner: Arc::clone(&self.inner) } + } +} + +#[derive(Debug)] +struct StorageRpcCtxInner { + storage: UnifiedStorage, + constants: SignetSystemConstants, + tags: BlockTags, + tx_cache: Option, + rpc_gas_cap: u64, +} + +impl StorageRpcCtx { + /// Create a new storage-backed RPC context. + pub fn new( + storage: UnifiedStorage, + constants: SignetSystemConstants, + tags: BlockTags, + tx_cache: Option, + rpc_gas_cap: u64, + ) -> Self { + Self { + inner: Arc::new(StorageRpcCtxInner { storage, constants, tags, tx_cache, rpc_gas_cap }), + } + } + + /// Access the unified storage. + pub fn storage(&self) -> &UnifiedStorage { + &self.inner.storage + } + + /// Get a cold storage read handle. + pub fn cold(&self) -> ColdStorageReadHandle { + self.inner.storage.cold_reader() + } + + /// Get a hot storage read transaction. + pub fn hot_reader(&self) -> signet_storage::StorageResult { + self.inner.storage.reader() + } + + /// Access the block tags. + pub fn tags(&self) -> &BlockTags { + &self.inner.tags + } + + /// Access the system constants. + pub fn constants(&self) -> &SignetSystemConstants { + &self.inner.constants + } + + /// Get the chain ID. + pub fn chain_id(&self) -> u64 { + self.inner.constants.ru_chain_id() + } + + /// Get the RPC gas cap. + pub fn rpc_gas_cap(&self) -> u64 { + self.inner.rpc_gas_cap + } + + /// Access the optional tx cache. + pub fn tx_cache(&self) -> Option<&TxCache> { + self.inner.tx_cache.as_ref() + } + + /// Create a revm-compatible database at a specific block height. + /// + /// The returned `State>` implements both `Database` and + /// `DatabaseCommit`, making it suitable for use with `signet_evm`. + pub fn revm_state_at_height( + &self, + height: u64, + ) -> signet_storage::StorageResult>> + where + H::RoTx: Send + Sync, + ::Error: DBErrorMarker, + { + let revm_read = self.inner.storage.revm_reader_at_height(height)?; + Ok(StateBuilder::new_with_database(revm_read).build()) + } + + /// Create a revm-compatible database at the current tip. + pub fn revm_state( + &self, + ) -> signet_storage::StorageResult>> + where + H::RoTx: Send + Sync, + ::Error: DBErrorMarker, + { + let revm_read = self.inner.storage.revm_reader()?; + Ok(StateBuilder::new_with_database(revm_read).build()) + } +} diff --git a/crates/rpc-storage/src/eth.rs b/crates/rpc-storage/src/eth.rs new file mode 100644 index 0000000..5910063 --- /dev/null +++ b/crates/rpc-storage/src/eth.rs @@ -0,0 +1,26 @@ +//! Eth RPC namespace endpoints. +//! +//! Endpoint implementations are provided in Plan 3. + +use crate::ctx::StorageRpcCtx; +use signet_hot::HotKv; +use signet_hot::model::HotKvRead; +use trevm::revm::database::DBErrorMarker; + +/// Errors returned by `eth_*` RPC methods. +#[derive(Debug, Clone, Copy, thiserror::Error)] +pub enum EthError { + /// Placeholder — additional variants added in Plan 3. + #[error("not yet implemented")] + NotImplemented, +} + +/// Instantiate the `eth` API router. +pub(crate) fn eth() -> ajj::Router> +where + H: HotKv + Send + Sync + 'static, + H::RoTx: Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + ajj::Router::new() +} diff --git a/crates/rpc-storage/src/lib.rs b/crates/rpc-storage/src/lib.rs new file mode 100644 index 0000000..ac3908c --- /dev/null +++ b/crates/rpc-storage/src/lib.rs @@ -0,0 +1,31 @@ +#![doc = include_str!("../README.md")] +#![warn( + missing_copy_implementations, + missing_debug_implementations, + missing_docs, + unreachable_pub, + clippy::missing_const_for_fn, + rustdoc::all +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![deny(unused_must_use, rust_2018_idioms)] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +mod ctx; +pub use ctx::StorageRpcCtx; + +mod resolve; +pub use resolve::BlockTags; + +mod eth; +pub use eth::EthError; + +/// Instantiate the `eth` API router. +pub fn eth() -> ajj::Router> +where + H: signet_hot::HotKv + Send + Sync + 'static, + H::RoTx: Send + Sync + 'static, + ::Error: trevm::revm::database::DBErrorMarker, +{ + eth::eth() +} diff --git a/crates/rpc-storage/src/resolve.rs b/crates/rpc-storage/src/resolve.rs new file mode 100644 index 0000000..7ea5ff0 --- /dev/null +++ b/crates/rpc-storage/src/resolve.rs @@ -0,0 +1,129 @@ +//! Block tag tracking and BlockId resolution. +//! +//! [`BlockTags`] holds externally-updated atomic values for Latest, Safe, +//! and Finalized block numbers. The RPC context owner is responsible for +//! updating these as the chain progresses. + +use alloy::{ + eips::{BlockId, BlockNumberOrTag}, + primitives::B256, +}; +use signet_cold::ColdStorageReadHandle; +use std::sync::{ + Arc, + atomic::{AtomicU64, Ordering}, +}; + +/// Externally-updated block tag tracker. +/// +/// Each tag is an `Arc` that the caller updates as the chain +/// progresses. The RPC layer reads these atomically for tag resolution. +/// +/// # Example +/// +/// ``` +/// use signet_rpc_storage::BlockTags; +/// +/// let tags = BlockTags::new(100, 95, 90); +/// assert_eq!(tags.latest(), 100); +/// +/// tags.set_latest(101); +/// assert_eq!(tags.latest(), 101); +/// ``` +#[derive(Debug, Clone)] +pub struct BlockTags { + latest: Arc, + safe: Arc, + finalized: Arc, +} + +impl BlockTags { + /// Create new block tags with initial values. + pub fn new(latest: u64, safe: u64, finalized: u64) -> Self { + Self { + latest: Arc::new(AtomicU64::new(latest)), + safe: Arc::new(AtomicU64::new(safe)), + finalized: Arc::new(AtomicU64::new(finalized)), + } + } + + /// Get the latest block number. + pub fn latest(&self) -> u64 { + self.latest.load(Ordering::Acquire) + } + + /// Get the safe block number. + pub fn safe(&self) -> u64 { + self.safe.load(Ordering::Acquire) + } + + /// Get the finalized block number. + pub fn finalized(&self) -> u64 { + self.finalized.load(Ordering::Acquire) + } + + /// Set the latest block number. + pub fn set_latest(&self, n: u64) { + self.latest.store(n, Ordering::Release); + } + + /// Set the safe block number. + pub fn set_safe(&self, n: u64) { + self.safe.store(n, Ordering::Release); + } + + /// Set the finalized block number. + pub fn set_finalized(&self, n: u64) { + self.finalized.store(n, Ordering::Release); + } +} + +/// Error resolving a block identifier. +#[derive(Debug, thiserror::Error)] +pub(crate) enum ResolveError { + /// Cold storage error. + #[error(transparent)] + Cold(#[from] signet_cold::ColdStorageError), + /// Block hash not found. + #[error("block hash not found: {0}")] + HashNotFound(B256), +} + +/// Resolve a [`BlockId`] to a block number. +/// +/// - `Latest` / `Pending` → `tags.latest()` +/// - `Safe` → `tags.safe()` +/// - `Finalized` → `tags.finalized()` +/// - `Earliest` → `0` +/// - `Number(n)` → `n` +/// - `Hash(h)` → cold storage header lookup → `header.number` +pub(crate) async fn resolve_block_id( + id: BlockId, + tags: &BlockTags, + cold: &ColdStorageReadHandle, +) -> Result { + match id { + BlockId::Number(tag) => resolve_block_number_or_tag(tag, tags), + BlockId::Hash(h) => { + let header = cold + .get_header_by_hash(h.block_hash) + .await? + .ok_or(ResolveError::HashNotFound(h.block_hash))?; + Ok(header.number) + } + } +} + +/// Resolve a [`BlockNumberOrTag`] to a block number (sync, no cold lookup needed). +pub(crate) fn resolve_block_number_or_tag( + tag: BlockNumberOrTag, + tags: &BlockTags, +) -> Result { + Ok(match tag { + BlockNumberOrTag::Latest | BlockNumberOrTag::Pending => tags.latest(), + BlockNumberOrTag::Safe => tags.safe(), + BlockNumberOrTag::Finalized => tags.finalized(), + BlockNumberOrTag::Earliest => 0, + BlockNumberOrTag::Number(n) => n, + }) +} From 802824be5f64c1bbbf0a61235c5dcdf1aeeb1959 Mon Sep 17 00:00:00 2001 From: James Date: Wed, 11 Feb 2026 20:45:38 -0500 Subject: [PATCH 02/31] feat: implement ETH RPC endpoints for signet-rpc-storage Implement all ETH namespace JSON-RPC endpoints backed by cold/hot storage instead of reth. Converts eth.rs placeholder into eth/ directory module with error types, helpers, and 24 supported endpoint handlers: - Simple queries: blockNumber, chainId - Block queries: getBlockByHash/Number, getBlockReceipts, headers - Transaction queries: getTransactionByHash, getTransactionReceipt, raw txs - Account state (hot storage): getBalance, getStorageAt, getCode, getTransactionCount - EVM execution: call, estimateGas (via signet-evm/trevm) - Transaction submission: sendRawTransaction (via TxCache) - Logs: getLogs with bloom filter matching Co-Authored-By: Claude Opus 4.6 --- Cargo.toml | 8 +- crates/rpc-storage/src/eth.rs | 26 - crates/rpc-storage/src/eth/endpoints.rs | 800 ++++++++++++++++++++++++ crates/rpc-storage/src/eth/error.rs | 58 ++ crates/rpc-storage/src/eth/helpers.rs | 277 ++++++++ crates/rpc-storage/src/eth/mod.rs | 86 +++ crates/rpc-storage/src/resolve.rs | 2 +- 7 files changed, 1226 insertions(+), 31 deletions(-) delete mode 100644 crates/rpc-storage/src/eth.rs create mode 100644 crates/rpc-storage/src/eth/endpoints.rs create mode 100644 crates/rpc-storage/src/eth/error.rs create mode 100644 crates/rpc-storage/src/eth/helpers.rs create mode 100644 crates/rpc-storage/src/eth/mod.rs diff --git a/Cargo.toml b/Cargo.toml index 272cd74..5a8c908 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,10 +56,10 @@ signet-tx-cache = "0.16.0-rc.8" signet-types = "0.16.0-rc.8" signet-zenith = "0.16.0-rc.8" signet-journal = "0.16.0-rc.8" -signet-storage = "0.1.0" -signet-cold = "0.1.0" -signet-hot = "0.1.0" -signet-storage-types = "0.1.0" +signet-storage = "0.2.0" +signet-cold = "0.2.0" +signet-hot = "0.2.0" +signet-storage-types = "0.2.0" # ajj ajj = { version = "0.3.4" } diff --git a/crates/rpc-storage/src/eth.rs b/crates/rpc-storage/src/eth.rs deleted file mode 100644 index 5910063..0000000 --- a/crates/rpc-storage/src/eth.rs +++ /dev/null @@ -1,26 +0,0 @@ -//! Eth RPC namespace endpoints. -//! -//! Endpoint implementations are provided in Plan 3. - -use crate::ctx::StorageRpcCtx; -use signet_hot::HotKv; -use signet_hot::model::HotKvRead; -use trevm::revm::database::DBErrorMarker; - -/// Errors returned by `eth_*` RPC methods. -#[derive(Debug, Clone, Copy, thiserror::Error)] -pub enum EthError { - /// Placeholder — additional variants added in Plan 3. - #[error("not yet implemented")] - NotImplemented, -} - -/// Instantiate the `eth` API router. -pub(crate) fn eth() -> ajj::Router> -where - H: HotKv + Send + Sync + 'static, - H::RoTx: Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - ajj::Router::new() -} diff --git a/crates/rpc-storage/src/eth/endpoints.rs b/crates/rpc-storage/src/eth/endpoints.rs new file mode 100644 index 0000000..fc24792 --- /dev/null +++ b/crates/rpc-storage/src/eth/endpoints.rs @@ -0,0 +1,800 @@ +//! ETH namespace RPC endpoint implementations. + +use crate::{ + ctx::StorageRpcCtx, + eth::helpers::{ + AddrWithBlock, BlockParams, BlockRangeInclusiveIter, CfgFiller, StorageAtArgs, TxParams, + await_handler, build_receipt, build_receipt_from_parts, build_rpc_transaction, + normalize_gas_stateless, response_tri, + }, + resolve::{resolve_block_id, resolve_block_number_or_tag}, +}; +use ajj::{HandlerCtx, ResponsePayload}; +use alloy::{ + consensus::{BlockHeader, TxReceipt}, + eips::{ + BlockId, + eip2718::{Decodable2718, Encodable2718}, + }, + primitives::{B256, U64, U256}, + rpc::types::{Block, BlockTransactions, Filter, FilteredParams, Log}, +}; +use signet_cold::{HeaderSpecifier, ReceiptSpecifier}; +use signet_hot::model::HotKvRead; +use signet_hot::{HistoryRead, HotKv, db::HotDbRead}; +use std::borrow::Cow; +use tracing::{Instrument, debug, trace_span}; +use trevm::{EstimationResult, revm::database::DBErrorMarker}; + +use super::error::CallErrorData; + +// --------------------------------------------------------------------------- +// Not Supported +// --------------------------------------------------------------------------- + +pub(crate) async fn not_supported() -> ResponsePayload<(), ()> { + ResponsePayload::internal_error_message(Cow::Borrowed("Method not supported.")) +} + +// --------------------------------------------------------------------------- +// Simple Queries +// --------------------------------------------------------------------------- + +pub(crate) async fn block_number(ctx: StorageRpcCtx) -> Result { + Ok(U64::from(ctx.tags().latest())) +} + +pub(crate) async fn chain_id(ctx: StorageRpcCtx) -> Result { + Ok(U64::from(ctx.chain_id())) +} + +// --------------------------------------------------------------------------- +// Block Queries +// --------------------------------------------------------------------------- + +pub(crate) async fn block( + hctx: HandlerCtx, + BlockParams(t, full): BlockParams, + ctx: StorageRpcCtx, +) -> Result, String> +where + T: Into, + H: HotKv + Send + Sync + 'static, + H::RoTx: Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let id = t.into(); + let full = full.unwrap_or(false); + + let task = async move { + let cold = ctx.cold(); + let block_num = resolve_block_id(id, ctx.tags(), &cold).await.map_err(|e| e.to_string())?; + + let (header, txs) = tokio::try_join!( + cold.get_header_by_number(block_num), + cold.get_transactions_in_block(block_num), + ) + .map_err(|e| e.to_string())?; + + let Some(header) = header else { + return Ok(None); + }; + + let block_hash = header.hash_slow(); + + let transactions = if full { + let base_fee = header.base_fee_per_gas(); + let rpc_txs: Vec<_> = txs + .into_iter() + .enumerate() + .map(|(i, tx)| { + let meta = signet_storage_types::ConfirmationMeta::new( + block_num, block_hash, i as u64, + ); + build_rpc_transaction(tx, &meta, base_fee).map_err(|e| e.to_string()) + }) + .collect::>()?; + BlockTransactions::Full(rpc_txs) + } else { + let hashes: Vec = txs.iter().map(|tx| *tx.tx_hash()).collect(); + BlockTransactions::Hashes(hashes) + }; + + Ok(Some(Block { + header: alloy::rpc::types::Header::new(header), + transactions, + uncles: vec![], + withdrawals: None, + })) + }; + + await_handler!(@option hctx.spawn_blocking(task)) +} + +pub(crate) async fn block_tx_count( + hctx: HandlerCtx, + (t,): (T,), + ctx: StorageRpcCtx, +) -> Result, String> +where + T: Into, + H: HotKv + Send + Sync + 'static, + H::RoTx: Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let id = t.into(); + + let task = async move { + let cold = ctx.cold(); + let block_num = resolve_block_id(id, ctx.tags(), &cold).await.map_err(|e| e.to_string())?; + + cold.get_transaction_count(block_num) + .await + .map(|c| Some(U64::from(c))) + .map_err(|e| e.to_string()) + }; + + await_handler!(@option hctx.spawn_blocking(task)) +} + +pub(crate) async fn block_receipts( + hctx: HandlerCtx, + (id,): (BlockId,), + ctx: StorageRpcCtx, +) -> Result>, String> +where + H: HotKv + Send + Sync + 'static, + H::RoTx: Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let task = async move { + let cold = ctx.cold(); + let block_num = resolve_block_id(id, ctx.tags(), &cold).await.map_err(|e| e.to_string())?; + + let (header, txs, receipts) = tokio::try_join!( + cold.get_header_by_number(block_num), + cold.get_transactions_in_block(block_num), + cold.get_receipts_in_block(block_num), + ) + .map_err(|e| e.to_string())?; + + let Some(header) = header else { + return Ok(None); + }; + + let block_hash = header.hash_slow(); + let mut log_index: u64 = 0; + + let rpc_receipts = txs + .into_iter() + .zip(receipts.iter()) + .enumerate() + .map(|(idx, (tx, receipt))| { + let prev_cumulative = idx + .checked_sub(1) + .and_then(|i| receipts.get(i)) + .map(|r| r.inner.cumulative_gas_used()) + .unwrap_or_default(); + + let gas_used = receipt.inner.cumulative_gas_used() - prev_cumulative; + let offset = log_index; + log_index += receipt.inner.logs().len() as u64; + + build_receipt_from_parts( + tx, + &header, + block_hash, + idx as u64, + receipt.clone(), + gas_used, + offset, + ) + .map_err(|e| e.to_string()) + }) + .collect::, _>>()?; + + Ok(Some(rpc_receipts)) + }; + + await_handler!(@option hctx.spawn_blocking(task)) +} + +pub(crate) async fn header_by( + hctx: HandlerCtx, + (t,): (T,), + ctx: StorageRpcCtx, +) -> Result, String> +where + T: Into, + H: HotKv + Send + Sync + 'static, + H::RoTx: Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let id = t.into(); + + let task = async move { + let cold = ctx.cold(); + let block_num = resolve_block_id(id, ctx.tags(), &cold).await.map_err(|e| e.to_string())?; + + cold.get_header_by_number(block_num) + .await + .map(|h| h.map(alloy::rpc::types::Header::new)) + .map_err(|e| e.to_string()) + }; + + await_handler!(@option hctx.spawn_blocking(task)) +} + +// --------------------------------------------------------------------------- +// Transaction Queries +// --------------------------------------------------------------------------- + +pub(crate) async fn transaction_by_hash( + hctx: HandlerCtx, + (hash,): (B256,), + ctx: StorageRpcCtx, +) -> Result, String> +where + H: HotKv + Send + Sync + 'static, + H::RoTx: Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let task = async move { + let cold = ctx.cold(); + let Some(confirmed) = cold.get_tx_by_hash(hash).await.map_err(|e| e.to_string())? else { + return Ok(None); + }; + + let (tx, meta) = confirmed.into_parts(); + + // Fetch header for base_fee + let header = + cold.get_header_by_number(meta.block_number()).await.map_err(|e| e.to_string())?; + let base_fee = header.and_then(|h| h.base_fee_per_gas()); + + build_rpc_transaction(tx, &meta, base_fee).map(Some).map_err(|e| e.to_string()) + }; + + await_handler!(@option hctx.spawn_blocking(task)) +} + +pub(crate) async fn raw_transaction_by_hash( + hctx: HandlerCtx, + (hash,): (B256,), + ctx: StorageRpcCtx, +) -> Result, String> +where + H: HotKv + Send + Sync + 'static, + H::RoTx: Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let task = async move { + ctx.cold() + .get_tx_by_hash(hash) + .await + .map(|opt| opt.map(|c| c.into_inner().encoded_2718().into())) + .map_err(|e| e.to_string()) + }; + + await_handler!(@option hctx.spawn_blocking(task)) +} + +pub(crate) async fn tx_by_block_and_index( + hctx: HandlerCtx, + (t, index): (T, U64), + ctx: StorageRpcCtx, +) -> Result, String> +where + T: Into, + H: HotKv + Send + Sync + 'static, + H::RoTx: Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let id = t.into(); + + let task = async move { + let cold = ctx.cold(); + let block_num = resolve_block_id(id, ctx.tags(), &cold).await.map_err(|e| e.to_string())?; + + let Some(confirmed) = cold + .get_tx_by_block_and_index(block_num, index.to::()) + .await + .map_err(|e| e.to_string())? + else { + return Ok(None); + }; + + let (tx, meta) = confirmed.into_parts(); + let header = + cold.get_header_by_number(meta.block_number()).await.map_err(|e| e.to_string())?; + let base_fee = header.and_then(|h| h.base_fee_per_gas()); + + build_rpc_transaction(tx, &meta, base_fee).map(Some).map_err(|e| e.to_string()) + }; + + await_handler!(@option hctx.spawn_blocking(task)) +} + +pub(crate) async fn raw_tx_by_block_and_index( + hctx: HandlerCtx, + (t, index): (T, U64), + ctx: StorageRpcCtx, +) -> Result, String> +where + T: Into, + H: HotKv + Send + Sync + 'static, + H::RoTx: Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let id = t.into(); + + let task = async move { + let cold = ctx.cold(); + let block_num = resolve_block_id(id, ctx.tags(), &cold).await.map_err(|e| e.to_string())?; + + cold.get_tx_by_block_and_index(block_num, index.to::()) + .await + .map(|opt| opt.map(|c| c.into_inner().encoded_2718().into())) + .map_err(|e| e.to_string()) + }; + + await_handler!(@option hctx.spawn_blocking(task)) +} + +pub(crate) async fn transaction_receipt( + hctx: HandlerCtx, + (hash,): (B256,), + ctx: StorageRpcCtx, +) -> Result, String> +where + H: HotKv + Send + Sync + 'static, + H::RoTx: Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let task = async move { + let Some(receipt_ctx) = ctx + .cold() + .get_receipt_with_context(ReceiptSpecifier::TxHash(hash)) + .await + .map_err(|e| e.to_string())? + else { + return Ok(None); + }; + + build_receipt(receipt_ctx).map(Some).map_err(|e| e.to_string()) + }; + + await_handler!(@option hctx.spawn_blocking(task)) +} + +// --------------------------------------------------------------------------- +// Account State (Hot Storage) +// --------------------------------------------------------------------------- + +pub(crate) async fn balance( + hctx: HandlerCtx, + AddrWithBlock(address, block): AddrWithBlock, + ctx: StorageRpcCtx, +) -> Result +where + H: HotKv + Send + Sync + 'static, + H::RoTx: Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let block = block.unwrap_or(BlockId::latest()); + + let task = async move { + let cold = ctx.cold(); + let height = resolve_block_id(block, ctx.tags(), &cold).await.map_err(|e| e.to_string())?; + + let reader = ctx.hot_reader().map_err(|e| e.to_string())?; + let acct = + reader.get_account_at_height(&address, Some(height)).map_err(|e| e.to_string())?; + + Ok(acct.map(|a| a.balance).unwrap_or_default()) + }; + + await_handler!(@option hctx.spawn_blocking(task)) +} + +pub(crate) async fn storage_at( + hctx: HandlerCtx, + StorageAtArgs(address, key, block): StorageAtArgs, + ctx: StorageRpcCtx, +) -> Result +where + H: HotKv + Send + Sync + 'static, + H::RoTx: Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let block = block.unwrap_or(BlockId::latest()); + + let task = async move { + let cold = ctx.cold(); + let height = resolve_block_id(block, ctx.tags(), &cold).await.map_err(|e| e.to_string())?; + + let reader = ctx.hot_reader().map_err(|e| e.to_string())?; + let val = reader + .get_storage_at_height(&address, &key, Some(height)) + .map_err(|e| e.to_string())?; + + Ok(val.unwrap_or_default().to_be_bytes().into()) + }; + + await_handler!(@option hctx.spawn_blocking(task)) +} + +pub(crate) async fn addr_tx_count( + hctx: HandlerCtx, + AddrWithBlock(address, block): AddrWithBlock, + ctx: StorageRpcCtx, +) -> Result +where + H: HotKv + Send + Sync + 'static, + H::RoTx: Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let block = block.unwrap_or(BlockId::latest()); + + let task = async move { + let cold = ctx.cold(); + let height = resolve_block_id(block, ctx.tags(), &cold).await.map_err(|e| e.to_string())?; + + let reader = ctx.hot_reader().map_err(|e| e.to_string())?; + let acct = + reader.get_account_at_height(&address, Some(height)).map_err(|e| e.to_string())?; + + Ok(U64::from(acct.map(|a| a.nonce).unwrap_or_default())) + }; + + await_handler!(@option hctx.spawn_blocking(task)) +} + +pub(crate) async fn code_at( + hctx: HandlerCtx, + AddrWithBlock(address, block): AddrWithBlock, + ctx: StorageRpcCtx, +) -> Result +where + H: HotKv + Send + Sync + 'static, + H::RoTx: Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let block = block.unwrap_or(BlockId::latest()); + + let task = async move { + let cold = ctx.cold(); + let height = resolve_block_id(block, ctx.tags(), &cold).await.map_err(|e| e.to_string())?; + + let reader = ctx.hot_reader().map_err(|e| e.to_string())?; + let acct = + reader.get_account_at_height(&address, Some(height)).map_err(|e| e.to_string())?; + + let Some(acct) = acct else { + return Ok(alloy::primitives::Bytes::new()); + }; + + let Some(code_hash) = acct.bytecode_hash else { + return Ok(alloy::primitives::Bytes::new()); + }; + + let code = reader.get_bytecode(&code_hash).map_err(|e| e.to_string())?; + + Ok(code.map(|c| c.original_bytes()).unwrap_or_default()) + }; + + await_handler!(@option hctx.spawn_blocking(task)) +} + +// --------------------------------------------------------------------------- +// EVM Execution +// --------------------------------------------------------------------------- + +pub(crate) async fn call( + hctx: HandlerCtx, + TxParams(mut request, block, state_overrides, block_overrides): TxParams, + ctx: StorageRpcCtx, +) -> ResponsePayload +where + H: HotKv + Send + Sync + 'static, + H::RoTx: Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let max_gas = ctx.rpc_gas_cap(); + normalize_gas_stateless(&mut request, max_gas); + + let id = block.unwrap_or(BlockId::latest()); + let span = trace_span!("eth_call", block_id = %id); + + let task = async move { + let cold = ctx.cold(); + let height = response_tri!(resolve_block_id(id, ctx.tags(), &cold).await); + + let header = response_tri!(cold.get_header_by_number(height).await); + let Some(header) = header else { + return ResponsePayload::internal_error_with_message_and_obj( + "block not found".into(), + id.to_string().into(), + ); + }; + + let db = response_tri!(ctx.revm_state_at_height(height)); + + let trevm = signet_evm::signet_evm(db, ctx.constants().clone()) + .fill_cfg(&CfgFiller(ctx.chain_id())) + .fill_block(&header); + + let trevm = response_tri!(trevm.maybe_apply_state_overrides(state_overrides.as_ref())) + .maybe_apply_block_overrides(block_overrides.as_deref()) + .fill_tx(&request); + + let mut trevm = trevm; + let new_gas = response_tri!(trevm.cap_tx_gas()); + if Some(new_gas) != request.gas { + debug!(req_gas = ?request.gas, new_gas, "capping gas for call"); + } + + let result = response_tri!(trevm.call().map_err(signet_evm::EvmErrored::into_error)); + + match result.0 { + trevm::revm::context::result::ExecutionResult::Success { output, .. } => { + ResponsePayload::Success(output.data().clone()) + } + trevm::revm::context::result::ExecutionResult::Revert { output, .. } => { + ResponsePayload::internal_error_with_message_and_obj( + "execution reverted".into(), + output.clone().into(), + ) + } + trevm::revm::context::result::ExecutionResult::Halt { reason, .. } => { + ResponsePayload::internal_error_with_message_and_obj( + "execution halted".into(), + format!("{reason:?}").into(), + ) + } + } + } + .instrument(span); + + await_handler!(@response_option hctx.spawn_blocking(task)) +} + +pub(crate) async fn estimate_gas( + hctx: HandlerCtx, + TxParams(mut request, block, state_overrides, block_overrides): TxParams, + ctx: StorageRpcCtx, +) -> ResponsePayload +where + H: HotKv + Send + Sync + 'static, + H::RoTx: Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let max_gas = ctx.rpc_gas_cap(); + normalize_gas_stateless(&mut request, max_gas); + + let id = block.unwrap_or(BlockId::pending()); + let span = trace_span!("eth_estimateGas", block_id = %id); + + let task = async move { + let cold = ctx.cold(); + let height = response_tri!(resolve_block_id(id, ctx.tags(), &cold).await); + + let header = response_tri!(cold.get_header_by_number(height).await); + let Some(header) = header else { + return ResponsePayload::internal_error_with_message_and_obj( + "block not found".into(), + id.to_string().into(), + ); + }; + + let db = response_tri!(ctx.revm_state_at_height(height)); + + let trevm = signet_evm::signet_evm(db, ctx.constants().clone()) + .fill_cfg(&CfgFiller(ctx.chain_id())) + .fill_block(&header); + + let trevm = response_tri!(trevm.maybe_apply_state_overrides(state_overrides.as_ref())) + .maybe_apply_block_overrides(block_overrides.as_deref()) + .fill_tx(&request); + + let (estimate, _) = + response_tri!(trevm.estimate_gas().map_err(signet_evm::EvmErrored::into_error)); + + match estimate { + EstimationResult::Success { limit, .. } => ResponsePayload::Success(U64::from(limit)), + EstimationResult::Revert { reason, .. } => { + ResponsePayload::internal_error_with_message_and_obj( + "execution reverted".into(), + reason.clone().into(), + ) + } + EstimationResult::Halt { reason, .. } => { + ResponsePayload::internal_error_with_message_and_obj( + "execution halted".into(), + format!("{reason:?}").into(), + ) + } + } + } + .instrument(span); + + await_handler!(@response_option hctx.spawn_blocking(task)) +} + +// --------------------------------------------------------------------------- +// Transaction Submission +// --------------------------------------------------------------------------- + +pub(crate) async fn send_raw_transaction( + hctx: HandlerCtx, + (tx,): (alloy::primitives::Bytes,), + ctx: StorageRpcCtx, +) -> Result +where + H: HotKv + Send + Sync + 'static, + H::RoTx: Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let Some(tx_cache) = ctx.tx_cache().cloned() else { + return Err("tx-cache URL not provided".to_string()); + }; + + let task = |hctx: HandlerCtx| async move { + let envelope = alloy::consensus::TxEnvelope::decode_2718(&mut tx.as_ref()) + .map_err(|e| e.to_string())?; + + let hash = *envelope.tx_hash(); + hctx.spawn(async move { + tx_cache.forward_raw_transaction(envelope).await.map_err(|e| e.to_string()) + }); + + Ok(hash) + }; + + await_handler!(@option hctx.spawn_blocking_with_ctx(task)) +} + +// --------------------------------------------------------------------------- +// Logs +// --------------------------------------------------------------------------- + +/// Maximum number of blocks per `eth_getLogs` range query. +const MAX_BLOCKS_PER_FILTER: u64 = 10_000; + +/// Maximum headers fetched per batch when scanning bloom filters. +const MAX_HEADERS_RANGE: u64 = 1_000; + +pub(crate) async fn get_logs( + hctx: HandlerCtx, + (filter,): (Filter,), + ctx: StorageRpcCtx, +) -> Result, String> +where + H: HotKv + Send + Sync + 'static, + H::RoTx: Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let task = async move { + let cold = ctx.cold(); + + // Build bloom filters for efficient block-level filtering. + let address_filter = FilteredParams::address_filter(&filter.address); + let topics_filter = FilteredParams::topics_filter(&filter.topics); + + match filter.block_option { + alloy::rpc::types::FilterBlockOption::AtBlockHash(hash) => { + let header = cold + .get_header_by_hash(hash) + .await + .map_err(|e| e.to_string())? + .ok_or_else(|| format!("block not found: {hash}"))?; + + if !FilteredParams::matches_address(header.logs_bloom, &address_filter) + || !FilteredParams::matches_topics(header.logs_bloom, &topics_filter) + { + return Ok(vec![]); + } + + let block_num = header.number; + let (txs, receipts) = tokio::try_join!( + cold.get_transactions_in_block(block_num), + cold.get_receipts_in_block(block_num), + ) + .map_err(|e| e.to_string())?; + + Ok(collect_matching_logs(&header, hash, &txs, &receipts, &filter)) + } + + alloy::rpc::types::FilterBlockOption::Range { from_block, to_block } => { + let from = from_block + .map(|b| resolve_block_number_or_tag(b, ctx.tags())) + .transpose() + .map_err(|e| e.to_string())? + .unwrap_or(0); + let to = to_block + .map(|b| resolve_block_number_or_tag(b, ctx.tags())) + .transpose() + .map_err(|e| e.to_string())? + .unwrap_or_else(|| ctx.tags().latest()); + + if to.saturating_sub(from) > MAX_BLOCKS_PER_FILTER { + return Err(format!("query exceeds max block range ({MAX_BLOCKS_PER_FILTER})")); + } + + let mut all_logs = Vec::new(); + + for (chunk_start, chunk_end) in + BlockRangeInclusiveIter::new(from..=to, MAX_HEADERS_RANGE) + { + let specs: Vec<_> = + (chunk_start..=chunk_end).map(HeaderSpecifier::Number).collect(); + + let headers = cold.get_headers(specs).await.map_err(|e| e.to_string())?; + + for (offset, maybe_header) in headers.into_iter().enumerate() { + let Some(header) = maybe_header else { + continue; + }; + + if !FilteredParams::matches_address(header.logs_bloom, &address_filter) + || !FilteredParams::matches_topics(header.logs_bloom, &topics_filter) + { + continue; + } + + let block_num = chunk_start + offset as u64; + let block_hash = header.hash_slow(); + + let (txs, receipts) = tokio::try_join!( + cold.get_transactions_in_block(block_num), + cold.get_receipts_in_block(block_num), + ) + .map_err(|e| e.to_string())?; + + let logs = + collect_matching_logs(&header, block_hash, &txs, &receipts, &filter); + all_logs.extend(logs); + } + } + + Ok(all_logs) + } + } + }; + + await_handler!(@option hctx.spawn_blocking(task)) +} + +/// Extract logs from a block's receipts that match the filter's address and topic criteria. +fn collect_matching_logs( + header: &alloy::consensus::Header, + block_hash: B256, + txs: &[signet_storage_types::TransactionSigned], + receipts: &[signet_storage_types::Receipt], + filter: &Filter, +) -> Vec { + let mut logs = Vec::new(); + let mut log_index: u64 = 0; + + for (tx_idx, (tx, receipt)) in txs.iter().zip(receipts.iter()).enumerate() { + let tx_hash = *tx.tx_hash(); + + for log in &receipt.inner.logs { + if filter.matches_address(log.address) && filter.matches_topics(log.topics()) { + logs.push(Log { + inner: log.clone(), + block_hash: Some(block_hash), + block_number: Some(header.number), + block_timestamp: Some(header.timestamp), + transaction_hash: Some(tx_hash), + transaction_index: Some(tx_idx as u64), + log_index: Some(log_index), + removed: false, + }); + } + log_index += 1; + } + } + + logs +} diff --git a/crates/rpc-storage/src/eth/error.rs b/crates/rpc-storage/src/eth/error.rs new file mode 100644 index 0000000..93322e8 --- /dev/null +++ b/crates/rpc-storage/src/eth/error.rs @@ -0,0 +1,58 @@ +//! Error types for the storage-backed ETH RPC. + +use alloy::{eips::BlockId, primitives::Bytes}; +use serde::Serialize; + +/// Errors from the storage-backed ETH RPC. +#[derive(Debug, thiserror::Error)] +pub enum EthError { + /// Cold storage error. + #[error("cold storage: {0}")] + Cold(#[from] signet_cold::ColdStorageError), + /// Hot storage error. + #[error("hot storage: {0}")] + Hot(#[from] signet_storage::StorageError), + /// Block resolution error. + #[error("resolve: {0}")] + Resolve(#[from] crate::resolve::ResolveError), + /// Invalid transaction signature. + #[error("invalid transaction signature")] + InvalidSignature, + /// Block not found. + #[error("block not found: {0}")] + BlockNotFound(BlockId), + /// EVM execution error. + #[error("evm: {0}")] + Evm(String), +} + +impl EthError { + /// Convert the error to a string for JSON-RPC responses. + pub fn into_string(self) -> String { + self.to_string() + } +} + +/// Error data for `eth_call` and `eth_estimateGas` responses. +/// +/// Serialized as JSON in the error response `data` field. +#[derive(Debug, Clone, Serialize)] +#[serde(untagged)] +pub(crate) enum CallErrorData { + /// Revert data bytes. + Bytes(Bytes), + /// Error message string. + String(String), +} + +impl From for CallErrorData { + fn from(b: Bytes) -> Self { + Self::Bytes(b) + } +} + +impl From for CallErrorData { + fn from(s: String) -> Self { + Self::String(s) + } +} diff --git a/crates/rpc-storage/src/eth/helpers.rs b/crates/rpc-storage/src/eth/helpers.rs new file mode 100644 index 0000000..553a120 --- /dev/null +++ b/crates/rpc-storage/src/eth/helpers.rs @@ -0,0 +1,277 @@ +//! Parameter types, macros, and utility helpers for ETH RPC endpoints. + +use crate::eth::error::EthError; +use alloy::{ + consensus::{ + BlockHeader, ReceiptEnvelope, ReceiptWithBloom, Transaction, TxReceipt, + transaction::{Recovered, SignerRecoverable}, + }, + eips::BlockId, + primitives::{Address, TxKind, U256}, + rpc::types::{ + BlockOverrides, Log, TransactionReceipt, TransactionRequest, state::StateOverride, + }, +}; +use serde::Deserialize; +use signet_cold::ReceiptContext; +use signet_storage_types::ConfirmationMeta; +use trevm::MIN_TRANSACTION_GAS; + +/// Args for `eth_call` and `eth_estimateGas`. +#[derive(Debug, Deserialize)] +pub(crate) struct TxParams( + pub TransactionRequest, + #[serde(default)] pub Option, + #[serde(default)] pub Option, + #[serde(default)] pub Option>, +); + +/// Args for `eth_getBlockByHash` and `eth_getBlockByNumber`. +#[derive(Debug, Deserialize)] +pub(crate) struct BlockParams(pub T, #[serde(default)] pub Option); + +/// Args for `eth_getStorageAt`. +#[derive(Debug, Deserialize)] +pub(crate) struct StorageAtArgs(pub Address, pub U256, #[serde(default)] pub Option); + +/// Args for `eth_getBalance`, `eth_getTransactionCount`, and `eth_getCode`. +#[derive(Debug, Deserialize)] +pub(crate) struct AddrWithBlock(pub Address, #[serde(default)] pub Option); + +/// Normalize transaction request gas without making DB reads. +/// +/// - If the gas is below `MIN_TRANSACTION_GAS`, set it to `None` +/// - If the gas is above the `rpc_gas_cap`, set it to the `rpc_gas_cap` +pub(crate) const fn normalize_gas_stateless(request: &mut TransactionRequest, max_gas: u64) { + match request.gas { + Some(..MIN_TRANSACTION_GAS) => request.gas = None, + Some(val) if val > max_gas => request.gas = Some(max_gas), + _ => {} + } +} + +/// Await a handler task, returning an error string on panic/cancel. +macro_rules! await_handler { + ($h:expr) => { + match $h.await { + Ok(res) => res, + Err(_) => return Err("task panicked or cancelled".to_string()), + } + }; + + (@option $h:expr) => { + match $h.await { + Ok(Some(res)) => res, + _ => return Err("task panicked or cancelled".to_string()), + } + }; + + (@response_option $h:expr) => { + match $h.await { + Ok(Some(res)) => res, + _ => { + return ajj::ResponsePayload::internal_error_message(std::borrow::Cow::Borrowed( + "task panicked or cancelled", + )) + } + } + }; +} +pub(crate) use await_handler; + +/// Try-operator for `ResponsePayload`. +macro_rules! response_tri { + ($h:expr) => { + match $h { + Ok(res) => res, + Err(err) => return ajj::ResponsePayload::internal_error_message(err.to_string().into()), + } + }; +} +pub(crate) use response_tri; + +/// An iterator that yields inclusive block ranges of a given step size. +#[derive(Debug)] +pub(crate) struct BlockRangeInclusiveIter { + iter: std::iter::StepBy>, + step: u64, + end: u64, +} + +impl BlockRangeInclusiveIter { + pub(crate) fn new(range: std::ops::RangeInclusive, step: u64) -> Self { + Self { end: *range.end(), iter: range.step_by(step as usize + 1), step } + } +} + +impl Iterator for BlockRangeInclusiveIter { + type Item = (u64, u64); + + fn next(&mut self) -> Option { + let start = self.iter.next()?; + let end = (start + self.step).min(self.end); + if start > end { + return None; + } + Some((start, end)) + } +} + +/// Small wrapper implementing [`trevm::Cfg`] to set the chain ID. +pub(crate) struct CfgFiller(pub u64); + +impl trevm::Cfg for CfgFiller { + fn fill_cfg_env(&self, cfg: &mut trevm::revm::context::CfgEnv) { + cfg.chain_id = self.0; + } +} + +/// Recover the sender of a transaction, falling back to [`MagicSig`]. +/// +/// [`MagicSig`]: signet_types::MagicSig +pub(crate) fn recover_sender( + tx: &signet_storage_types::TransactionSigned, +) -> Result { + signet_types::MagicSig::try_from_signature(tx.signature()) + .map(|s| s.rollup_sender()) + .or_else(|| SignerRecoverable::recover_signer_unchecked(tx).ok()) + .ok_or(EthError::InvalidSignature) +} + +/// Build an [`alloy::rpc::types::Transaction`] from cold storage types. +pub(crate) fn build_rpc_transaction( + tx: signet_storage_types::TransactionSigned, + meta: &ConfirmationMeta, + base_fee: Option, +) -> Result { + let sender = recover_sender(&tx)?; + + // Convert EthereumTxEnvelope → TxEnvelope (EthereumTxEnvelope) + let tx_envelope: alloy::consensus::TxEnvelope = tx.into(); + let inner = Recovered::new_unchecked(tx_envelope, sender); + + let egp = base_fee + .map(|bf| inner.effective_tip_per_gas(bf).unwrap_or_default() as u64 + bf) + .unwrap_or_else(|| inner.max_fee_per_gas() as u64); + + Ok(alloy::rpc::types::Transaction { + inner, + block_hash: Some(meta.block_hash()), + block_number: Some(meta.block_number()), + transaction_index: Some(meta.transaction_index()), + effective_gas_price: Some(egp as u128), + }) +} + +/// Build a [`TransactionReceipt`] from a [`ReceiptContext`]. +pub(crate) fn build_receipt( + ctx: ReceiptContext, +) -> Result>, EthError> { + let (receipt, meta) = ctx.receipt.into_parts(); + let gas_used = receipt.inner.cumulative_gas_used() - ctx.prior_cumulative_gas; + + build_receipt_inner( + ctx.transaction, + &ctx.header, + &meta, + receipt, + gas_used, + 0, // log_index_offset: single receipt, no prior logs + ) +} + +/// Build a [`TransactionReceipt`] from individual components. +/// +/// Used by `eth_getBlockReceipts` where all receipts in the block are available. +pub(crate) fn build_receipt_from_parts( + tx: signet_storage_types::TransactionSigned, + header: &alloy::consensus::Header, + block_hash: alloy::primitives::B256, + tx_index: u64, + receipt: signet_storage_types::Receipt, + gas_used: u64, + log_index_offset: u64, +) -> Result>, EthError> { + let meta = ConfirmationMeta::new(header.number, block_hash, tx_index); + build_receipt_inner(tx, header, &meta, receipt, gas_used, log_index_offset) +} + +/// Shared receipt builder. +fn build_receipt_inner( + tx: signet_storage_types::TransactionSigned, + header: &alloy::consensus::Header, + meta: &ConfirmationMeta, + receipt: signet_storage_types::Receipt, + gas_used: u64, + log_index_offset: u64, +) -> Result>, EthError> { + let sender = recover_sender(&tx)?; + let tx_hash = *tx.tx_hash(); + + let logs_bloom = receipt.inner.bloom(); + let status = receipt.inner.status_or_post_state(); + let cumulative_gas_used = receipt.inner.cumulative_gas_used(); + + let logs: Vec = receipt + .inner + .logs + .into_iter() + .enumerate() + .map(|(i, log)| Log { + inner: log, + block_hash: Some(meta.block_hash()), + block_number: Some(meta.block_number()), + block_timestamp: Some(header.timestamp), + transaction_hash: Some(tx_hash), + transaction_index: Some(meta.transaction_index()), + log_index: Some(log_index_offset + i as u64), + removed: false, + }) + .collect(); + + let rpc_receipt = alloy::rpc::types::eth::Receipt { status, cumulative_gas_used, logs }; + + let (contract_address, to) = match tx.kind() { + TxKind::Create => (Some(sender.create(tx.nonce())), None), + TxKind::Call(addr) => (None, Some(Address(*addr))), + }; + + let base_fee = header.base_fee_per_gas(); + let egp = base_fee + .map(|bf| tx.effective_tip_per_gas(bf).unwrap_or_default() as u64 + bf) + .unwrap_or_else(|| tx.max_fee_per_gas() as u64); + + Ok(TransactionReceipt { + inner: build_receipt_envelope( + ReceiptWithBloom { receipt: rpc_receipt, logs_bloom }, + receipt.tx_type, + ), + transaction_hash: tx_hash, + transaction_index: Some(meta.transaction_index()), + block_hash: Some(meta.block_hash()), + block_number: Some(meta.block_number()), + from: sender, + to, + gas_used, + contract_address, + effective_gas_price: egp as u128, + blob_gas_price: None, + blob_gas_used: None, + }) +} + +/// Wrap a receipt in the appropriate [`ReceiptEnvelope`] variant. +fn build_receipt_envelope( + receipt: ReceiptWithBloom>, + tx_type: alloy::consensus::TxType, +) -> ReceiptEnvelope { + match tx_type { + alloy::consensus::TxType::Legacy => ReceiptEnvelope::Legacy(receipt), + alloy::consensus::TxType::Eip2930 => ReceiptEnvelope::Eip2930(receipt), + alloy::consensus::TxType::Eip1559 => ReceiptEnvelope::Eip1559(receipt), + alloy::consensus::TxType::Eip4844 => ReceiptEnvelope::Eip4844(receipt), + alloy::consensus::TxType::Eip7702 => ReceiptEnvelope::Eip7702(receipt), + #[allow(unreachable_patterns)] + _ => unreachable!(), + } +} diff --git a/crates/rpc-storage/src/eth/mod.rs b/crates/rpc-storage/src/eth/mod.rs new file mode 100644 index 0000000..6806064 --- /dev/null +++ b/crates/rpc-storage/src/eth/mod.rs @@ -0,0 +1,86 @@ +//! ETH namespace RPC router backed by storage. + +mod endpoints; +use endpoints::*; + +mod error; +pub use error::EthError; + +mod helpers; + +use crate::StorageRpcCtx; +use alloy::{eips::BlockNumberOrTag, primitives::B256}; +use signet_hot::HotKv; +use signet_hot::model::HotKvRead; +use trevm::revm::database::DBErrorMarker; + +/// Instantiate the `eth` API router backed by storage. +pub(crate) fn eth() -> ajj::Router> +where + H: HotKv + Send + Sync + 'static, + H::RoTx: Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + ajj::Router::new() + .route("blockNumber", block_number::) + .route("chainId", chain_id::) + .route("getBlockByHash", block::) + .route("getBlockByNumber", block::) + .route("getBlockTransactionCountByHash", block_tx_count::) + .route("getBlockTransactionCountByNumber", block_tx_count::) + .route("getBlockReceipts", block_receipts::) + .route("getRawTransactionByHash", raw_transaction_by_hash::) + .route("getTransactionByHash", transaction_by_hash::) + .route("getRawTransactionByBlockHashAndIndex", raw_tx_by_block_and_index::) + .route( + "getRawTransactionByBlockNumberAndIndex", + raw_tx_by_block_and_index::, + ) + .route("getTransactionByBlockHashAndIndex", tx_by_block_and_index::) + .route("getTransactionByBlockNumberAndIndex", tx_by_block_and_index::) + .route("getTransactionReceipt", transaction_receipt::) + .route("getBlockHeaderByHash", header_by::) + .route("getBlockHeaderByNumber", header_by::) + .route("getBalance", balance::) + .route("getStorageAt", storage_at::) + .route("getTransactionCount", addr_tx_count::) + .route("getCode", code_at::) + .route("call", call::) + .route("estimateGas", estimate_gas::) + .route("sendRawTransaction", send_raw_transaction::) + .route("getLogs", get_logs::) + // --- + // Unsupported methods + // --- + .route("protocolVersion", not_supported) + .route("syncing", not_supported) + .route("gasPrice", not_supported) + .route("maxPriorityFeePerGas", not_supported) + .route("feeHistory", not_supported) + .route("coinbase", not_supported) + .route("accounts", not_supported) + .route("blobBaseFee", not_supported) + .route("getUncleCountByBlockHash", not_supported) + .route("getUncleCountByBlockNumber", not_supported) + .route("getUncleByBlockHashAndIndex", not_supported) + .route("getUncleByBlockNumberAndIndex", not_supported) + .route("getWork", not_supported) + .route("hashrate", not_supported) + .route("mining", not_supported) + .route("submitHashrate", not_supported) + .route("submitWork", not_supported) + .route("sendTransaction", not_supported) + .route("sign", not_supported) + .route("signTransaction", not_supported) + .route("signTypedData", not_supported) + .route("getProof", not_supported) + .route("createAccessList", not_supported) + .route("newFilter", not_supported) + .route("newBlockFilter", not_supported) + .route("newPendingTransactionFilter", not_supported) + .route("uninstallFilter", not_supported) + .route("getFilterChanges", not_supported) + .route("getFilterLogs", not_supported) + .route("subscribe", not_supported) + .route("unsubscribe", not_supported) +} diff --git a/crates/rpc-storage/src/resolve.rs b/crates/rpc-storage/src/resolve.rs index 7ea5ff0..8c12154 100644 --- a/crates/rpc-storage/src/resolve.rs +++ b/crates/rpc-storage/src/resolve.rs @@ -80,7 +80,7 @@ impl BlockTags { /// Error resolving a block identifier. #[derive(Debug, thiserror::Error)] -pub(crate) enum ResolveError { +pub enum ResolveError { /// Cold storage error. #[error(transparent)] Cold(#[from] signet_cold::ColdStorageError), From 477a7322d5dc8eb18ce65d880738e7212445b7b2 Mon Sep 17 00:00:00 2001 From: James Date: Thu, 12 Feb 2026 07:29:53 -0500 Subject: [PATCH 03/31] test: add integration tests for ETH RPC endpoints Add 23 integration tests covering all endpoint categories: simple queries, block/transaction lookups, account state, logs, and error cases. Tests exercise the router through the axum service layer using tower's oneshot(). Co-Authored-By: Claude Opus 4.6 --- crates/rpc-storage/Cargo.toml | 12 + crates/rpc-storage/tests/eth_rpc.rs | 578 ++++++++++++++++++++++++++++ 2 files changed, 590 insertions(+) create mode 100644 crates/rpc-storage/tests/eth_rpc.rs diff --git a/crates/rpc-storage/Cargo.toml b/crates/rpc-storage/Cargo.toml index 5c7431e..ad301ae 100644 --- a/crates/rpc-storage/Cargo.toml +++ b/crates/rpc-storage/Cargo.toml @@ -27,3 +27,15 @@ serde.workspace = true [dev-dependencies] tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } +tokio-util = "0.7" +signet-cold = { workspace = true, features = ["test-utils"] } +signet-hot = { workspace = true, features = ["test-utils"] } +signet-storage.workspace = true +signet-storage-types.workspace = true +signet-constants.workspace = true +alloy.workspace = true +serde_json.workspace = true +axum = "0.8" +tower = { version = "0.5", features = ["util"] } +http = "1" +trevm.workspace = true diff --git a/crates/rpc-storage/tests/eth_rpc.rs b/crates/rpc-storage/tests/eth_rpc.rs new file mode 100644 index 0000000..3a34732 --- /dev/null +++ b/crates/rpc-storage/tests/eth_rpc.rs @@ -0,0 +1,578 @@ +//! Integration tests for the `signet-rpc-storage` ETH RPC endpoints. +//! +//! Tests exercise the public router API via the axum service layer, using +//! in-memory storage backends (`MemKv` + `MemColdBackend`). + +use alloy::{ + consensus::{ + EthereumTxEnvelope, Header, Receipt as AlloyReceipt, SignableTransaction, Signed, TxLegacy, + TxType, + }, + primitives::{Address, B256, Log as PrimitiveLog, LogData, TxKind, U256, address, logs_bloom}, +}; +use axum::body::Body; +use http::Request; +use serde_json::{Value, json}; +use signet_cold::{BlockData, ColdStorageHandle, ColdStorageTask, mem::MemColdBackend}; +use signet_constants::SignetSystemConstants; +use signet_hot::{HotKv, db::UnsafeDbWrite, mem::MemKv}; +use signet_rpc_storage::{BlockTags, StorageRpcCtx}; +use signet_storage::UnifiedStorage; +use signet_storage_types::Receipt; +use tokio_util::sync::CancellationToken; +use tower::ServiceExt; + +// --------------------------------------------------------------------------- +// Test helpers +// --------------------------------------------------------------------------- + +/// Everything needed to make RPC calls against the storage-backed router. +struct TestHarness { + app: axum::Router, + cold: ColdStorageHandle, + hot: MemKv, + tags: BlockTags, + _cancel: CancellationToken, +} + +impl TestHarness { + /// Create a minimal harness with empty storage. + async fn new(latest: u64) -> Self { + let cancel = CancellationToken::new(); + let hot = MemKv::new(); + let cold = ColdStorageTask::spawn(MemColdBackend::new(), cancel.clone()); + let storage = UnifiedStorage::new(hot.clone(), cold.clone()); + let constants = SignetSystemConstants::test(); + let tags = BlockTags::new(latest, latest.saturating_sub(2), 0); + let ctx = StorageRpcCtx::new(storage, constants, tags.clone(), None, 30_000_000); + let app = signet_rpc_storage::eth::().into_axum("/").with_state(ctx); + + Self { app, cold, hot, tags, _cancel: cancel } + } +} + +/// Make a JSON-RPC call and return the `"result"` field. +/// +/// The `method` parameter is the short name (e.g. `"blockNumber"`), without +/// the `eth_` prefix. The router registers methods without namespace prefix. +/// +/// Panics if the response contains an `"error"` field. +async fn rpc_call(app: &axum::Router, method: &str, params: Value) -> Value { + let resp = rpc_call_raw(app, method, params).await; + if let Some(error) = resp.get("error") { + panic!("RPC error for {method}: {error}"); + } + resp["result"].clone() +} + +/// Make a JSON-RPC call and return the full response (including any error). +async fn rpc_call_raw(app: &axum::Router, method: &str, params: Value) -> Value { + let body = json!({ + "jsonrpc": "2.0", + "id": 1, + "method": method, + "params": params, + }); + + let req = Request::builder() + .method("POST") + .uri("/") + .header("content-type", "application/json") + .body(Body::from(serde_json::to_vec(&body).unwrap())) + .unwrap(); + + let response = app.clone().oneshot(req).await.unwrap(); + let bytes = axum::body::to_bytes(response.into_body(), 1024 * 1024).await.unwrap(); + serde_json::from_slice(&bytes).unwrap() +} + +// --------------------------------------------------------------------------- +// Test data builders +// --------------------------------------------------------------------------- + +/// Test address used for account state queries. +const TEST_ADDR: Address = address!("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); + +/// Test log-emitting contract address. +const LOG_ADDR: Address = address!("0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"); + +/// Test log topic. +const LOG_TOPIC: B256 = B256::repeat_byte(0xcc); + +/// Create a legacy transaction signed with a deterministic key. +/// +/// Uses alloy's signer to produce a valid ECDSA signature so that +/// `recover_sender` succeeds during RPC response building. +fn make_signed_tx(nonce: u64) -> (signet_storage_types::TransactionSigned, Address) { + use alloy::signers::{SignerSync, local::PrivateKeySigner}; + + let signer = PrivateKeySigner::from_signing_key( + alloy::signers::k256::ecdsa::SigningKey::from_slice( + &B256::repeat_byte((nonce as u8).wrapping_add(1)).0, + ) + .unwrap(), + ); + let sender = signer.address(); + + let tx = TxLegacy { + nonce, + gas_price: 1_000_000_000, + gas_limit: 21_000, + to: TxKind::Call(Address::ZERO), + value: U256::from(1000), + ..Default::default() + }; + + let sig_hash = tx.signature_hash(); + let sig = signer.sign_hash_sync(&sig_hash).unwrap(); + let signed: signet_storage_types::TransactionSigned = + EthereumTxEnvelope::Legacy(Signed::new_unhashed(tx, sig)); + + (signed, sender) +} + +/// Build a [`BlockData`] from pre-signed transactions. +/// +/// Creates receipts with incrementing `cumulative_gas_used` and optionally +/// attaches logs to each receipt. +fn make_block( + block_num: u64, + txs: Vec, + logs_per_receipt: usize, +) -> BlockData { + let receipts: Vec = txs + .iter() + .enumerate() + .map(|(i, _)| { + let logs: Vec = (0..logs_per_receipt) + .map(|l| PrimitiveLog { + address: LOG_ADDR, + data: LogData::new_unchecked( + vec![LOG_TOPIC], + alloy::primitives::Bytes::from(vec![l as u8]), + ), + }) + .collect(); + + Receipt { + tx_type: TxType::Legacy, + inner: AlloyReceipt { + status: true.into(), + cumulative_gas_used: 21_000 * (i as u64 + 1), + logs, + }, + } + }) + .collect(); + + // Compute the logs bloom from all receipt logs so getLogs bloom check passes. + let all_logs: Vec<_> = receipts.iter().flat_map(|r| r.inner.logs.iter()).collect(); + let bloom = logs_bloom(all_logs); + + let header = Header { + number: block_num, + timestamp: 1_700_000_000 + block_num, + base_fee_per_gas: Some(1_000_000_000), + logs_bloom: bloom, + ..Default::default() + }; + + BlockData::new(header, txs, receipts, vec![], None) +} + +// --------------------------------------------------------------------------- +// Group 1: Simple queries +// --------------------------------------------------------------------------- + +#[tokio::test] +async fn test_block_number() { + let h = TestHarness::new(42).await; + let result = rpc_call(&h.app, "blockNumber", json!([])).await; + assert_eq!(result, json!("0x2a")); +} + +#[tokio::test] +async fn test_chain_id() { + let h = TestHarness::new(0).await; + let result = rpc_call(&h.app, "chainId", json!([])).await; + let expected = format!("0x{:x}", SignetSystemConstants::test().ru_chain_id()); + assert_eq!(result, json!(expected)); +} + +// --------------------------------------------------------------------------- +// Group 2: Cold storage — block queries +// --------------------------------------------------------------------------- + +/// Shared setup: append a block with 2 signed transactions to cold storage. +async fn setup_cold_block(h: &TestHarness) -> (Vec, Vec
) { + let (tx0, sender0) = make_signed_tx(0); + let (tx1, sender1) = make_signed_tx(1); + + let hash0 = *tx0.tx_hash(); + let hash1 = *tx1.tx_hash(); + + let block = make_block(1, vec![tx0, tx1], 1); + h.cold.append_block(block).await.unwrap(); + h.tags.set_latest(1); + + (vec![hash0, hash1], vec![sender0, sender1]) +} + +#[tokio::test] +async fn test_get_block_by_number_hashes() { + let h = TestHarness::new(0).await; + let (tx_hashes, _) = setup_cold_block(&h).await; + + let result = rpc_call(&h.app, "getBlockByNumber", json!(["0x1", false])).await; + + assert_eq!(result["number"], json!("0x1")); + let txs = result["transactions"].as_array().unwrap(); + assert_eq!(txs.len(), 2); + // When full=false, transactions are hashes (strings) + assert!(txs[0].is_string()); + assert_eq!(txs[0].as_str().unwrap(), format!("{:?}", tx_hashes[0])); +} + +#[tokio::test] +async fn test_get_block_by_number_full() { + let h = TestHarness::new(0).await; + let (tx_hashes, senders) = setup_cold_block(&h).await; + + let result = rpc_call(&h.app, "getBlockByNumber", json!(["0x1", true])).await; + + assert_eq!(result["number"], json!("0x1")); + let txs = result["transactions"].as_array().unwrap(); + assert_eq!(txs.len(), 2); + // When full=true, transactions are objects + assert!(txs[0].is_object()); + assert_eq!(txs[0]["hash"], json!(format!("{:?}", tx_hashes[0]))); + assert_eq!(txs[0]["from"], json!(format!("{:?}", senders[0]))); + assert_eq!(txs[0]["blockNumber"], json!("0x1")); + assert_eq!(txs[0]["transactionIndex"], json!("0x0")); + assert_eq!(txs[1]["transactionIndex"], json!("0x1")); +} + +#[tokio::test] +async fn test_get_block_by_hash() { + let h = TestHarness::new(0).await; + setup_cold_block(&h).await; + + // Get the block to learn its hash + let block = rpc_call(&h.app, "getBlockByNumber", json!(["0x1", false])).await; + let block_hash = block["hash"].as_str().unwrap().to_string(); + + let result = rpc_call(&h.app, "getBlockByHash", json!([block_hash, false])).await; + assert_eq!(result["number"], json!("0x1")); + assert_eq!(result["hash"], json!(block_hash)); +} + +#[tokio::test] +async fn test_get_block_tx_count() { + let h = TestHarness::new(0).await; + setup_cold_block(&h).await; + + let result = rpc_call(&h.app, "getBlockTransactionCountByNumber", json!(["0x1"])).await; + assert_eq!(result, json!("0x2")); +} + +#[tokio::test] +async fn test_get_block_header() { + let h = TestHarness::new(0).await; + setup_cold_block(&h).await; + + let result = rpc_call(&h.app, "getBlockHeaderByNumber", json!(["0x1"])).await; + assert_eq!(result["number"], json!("0x1")); + assert!(result["baseFeePerGas"].is_string()); +} + +#[tokio::test] +async fn test_get_block_not_found() { + let h = TestHarness::new(255).await; + let result = rpc_call(&h.app, "getBlockByNumber", json!(["0xff", false])).await; + assert!(result.is_null()); +} + +// --------------------------------------------------------------------------- +// Group 3: Cold storage — transaction queries +// --------------------------------------------------------------------------- + +#[tokio::test] +async fn test_get_transaction_by_hash() { + let h = TestHarness::new(0).await; + let (tx_hashes, senders) = setup_cold_block(&h).await; + + let result = + rpc_call(&h.app, "getTransactionByHash", json!([format!("{:?}", tx_hashes[0])])).await; + + assert_eq!(result["hash"], json!(format!("{:?}", tx_hashes[0]))); + assert_eq!(result["from"], json!(format!("{:?}", senders[0]))); + assert_eq!(result["blockNumber"], json!("0x1")); + assert_eq!(result["transactionIndex"], json!("0x0")); +} + +#[tokio::test] +async fn test_get_raw_transaction_by_hash() { + let h = TestHarness::new(0).await; + let (tx_hashes, _) = setup_cold_block(&h).await; + + let result = + rpc_call(&h.app, "getRawTransactionByHash", json!([format!("{:?}", tx_hashes[0])])).await; + + // Raw transaction is a hex string + let hex = result.as_str().unwrap(); + assert!(hex.starts_with("0x")); + assert!(hex.len() > 4); +} + +#[tokio::test] +async fn test_get_tx_by_block_and_index() { + let h = TestHarness::new(0).await; + let (tx_hashes, senders) = setup_cold_block(&h).await; + + let result = + rpc_call(&h.app, "getTransactionByBlockNumberAndIndex", json!(["0x1", "0x0"])).await; + + assert_eq!(result["hash"], json!(format!("{:?}", tx_hashes[0]))); + assert_eq!(result["from"], json!(format!("{:?}", senders[0]))); +} + +#[tokio::test] +async fn test_get_transaction_receipt() { + let h = TestHarness::new(0).await; + let (tx_hashes, senders) = setup_cold_block(&h).await; + + let result = + rpc_call(&h.app, "getTransactionReceipt", json!([format!("{:?}", tx_hashes[0])])).await; + + assert_eq!(result["transactionHash"], json!(format!("{:?}", tx_hashes[0]))); + assert_eq!(result["from"], json!(format!("{:?}", senders[0]))); + assert_eq!(result["blockNumber"], json!("0x1")); + assert_eq!(result["status"], json!("0x1")); + assert_eq!(result["gasUsed"], json!("0x5208")); // 21000 +} + +#[tokio::test] +async fn test_get_block_receipts() { + let h = TestHarness::new(0).await; + setup_cold_block(&h).await; + + let result = rpc_call(&h.app, "getBlockReceipts", json!(["0x1"])).await; + + let receipts = result.as_array().unwrap(); + assert_eq!(receipts.len(), 2); + assert_eq!(receipts[0]["transactionIndex"], json!("0x0")); + assert_eq!(receipts[1]["transactionIndex"], json!("0x1")); + assert_eq!(receipts[0]["status"], json!("0x1")); + assert_eq!(receipts[1]["status"], json!("0x1")); +} + +// --------------------------------------------------------------------------- +// Group 4: Hot storage — account state +// --------------------------------------------------------------------------- + +/// Populate hot storage with a test account. +fn setup_hot_account(hot: &MemKv) { + use signet_storage_types::Account; + use trevm::revm::bytecode::Bytecode; + + let writer = hot.writer().unwrap(); + + let code = alloy::primitives::Bytes::from_static(&[0x60, 0x00, 0x60, 0x00, 0xf3]); + let bytecode = Bytecode::new_raw(code); + let code_hash = bytecode.hash_slow(); + + writer + .put_account( + &TEST_ADDR, + &Account { + nonce: 5, + balance: U256::from(1_000_000_000_000_000_000u128), + bytecode_hash: Some(code_hash), + }, + ) + .unwrap(); + + writer.put_storage(&TEST_ADDR, &U256::from(42), &U256::from(999)).unwrap(); + + writer.put_bytecode(&code_hash, &bytecode).unwrap(); + + writer.commit().unwrap(); +} + +#[tokio::test] +async fn test_get_balance() { + let h = TestHarness::new(1).await; + setup_hot_account(&h.hot); + + // Append a dummy block so tag resolution succeeds + let block = make_block(1, vec![], 0); + h.cold.append_block(block).await.unwrap(); + + let result = + rpc_call(&h.app, "getBalance", json!([format!("{:?}", TEST_ADDR), "latest"])).await; + + // 1 ETH = 10^18 + assert_eq!(result, json!("0xde0b6b3a7640000")); +} + +#[tokio::test] +async fn test_get_transaction_count() { + let h = TestHarness::new(1).await; + setup_hot_account(&h.hot); + + let block = make_block(1, vec![], 0); + h.cold.append_block(block).await.unwrap(); + + let result = + rpc_call(&h.app, "getTransactionCount", json!([format!("{:?}", TEST_ADDR), "latest"])) + .await; + + assert_eq!(result, json!("0x5")); +} + +#[tokio::test] +async fn test_get_storage_at() { + let h = TestHarness::new(1).await; + setup_hot_account(&h.hot); + + let block = make_block(1, vec![], 0); + h.cold.append_block(block).await.unwrap(); + + let slot = format!("{:#066x}", 42u64); + let result = + rpc_call(&h.app, "getStorageAt", json!([format!("{:?}", TEST_ADDR), slot, "latest"])).await; + + // 999 = 0x3e7, padded to 32 bytes + let expected = format!("{:#066x}", 999u64); + assert_eq!(result, json!(expected)); +} + +#[tokio::test] +async fn test_get_code() { + let h = TestHarness::new(1).await; + setup_hot_account(&h.hot); + + let block = make_block(1, vec![], 0); + h.cold.append_block(block).await.unwrap(); + + let result = rpc_call(&h.app, "getCode", json!([format!("{:?}", TEST_ADDR), "latest"])).await; + + assert_eq!(result, json!("0x60006000f3")); +} + +#[tokio::test] +async fn test_get_balance_unknown_account() { + let h = TestHarness::new(1).await; + + let block = make_block(1, vec![], 0); + h.cold.append_block(block).await.unwrap(); + + let unknown = Address::repeat_byte(0xff); + let result = rpc_call(&h.app, "getBalance", json!([format!("{:?}", unknown), "latest"])).await; + + assert_eq!(result, json!("0x0")); +} + +// --------------------------------------------------------------------------- +// Group 5: Logs +// --------------------------------------------------------------------------- + +#[tokio::test] +async fn test_get_logs_by_block_hash() { + let h = TestHarness::new(0).await; + + // Create block with transactions that have logs + let (tx0, _) = make_signed_tx(0); + let block = make_block(1, vec![tx0], 2); // 2 logs per receipt + h.cold.append_block(block).await.unwrap(); + h.tags.set_latest(1); + + // Get the block hash + let block_result = rpc_call(&h.app, "getBlockByNumber", json!(["0x1", false])).await; + let block_hash = block_result["hash"].as_str().unwrap().to_string(); + + let result = rpc_call( + &h.app, + "getLogs", + json!([{ + "blockHash": block_hash, + "address": format!("{:?}", LOG_ADDR), + }]), + ) + .await; + + let logs = result.as_array().unwrap(); + assert_eq!(logs.len(), 2); + assert_eq!(logs[0]["address"], json!(format!("{:?}", LOG_ADDR))); + assert_eq!(logs[0]["blockNumber"], json!("0x1")); + assert_eq!(logs[0]["logIndex"], json!("0x0")); + assert_eq!(logs[1]["logIndex"], json!("0x1")); +} + +#[tokio::test] +async fn test_get_logs_by_range() { + let h = TestHarness::new(0).await; + + let (tx0, _) = make_signed_tx(0); + let block = make_block(1, vec![tx0], 1); + h.cold.append_block(block).await.unwrap(); + h.tags.set_latest(1); + + let result = rpc_call( + &h.app, + "getLogs", + json!([{ + "fromBlock": "0x1", + "toBlock": "0x1", + "topics": [format!("{:?}", LOG_TOPIC)], + }]), + ) + .await; + + let logs = result.as_array().unwrap(); + assert_eq!(logs.len(), 1); + assert!(logs[0]["topics"].as_array().unwrap().contains(&json!(format!("{:?}", LOG_TOPIC)))); +} + +#[tokio::test] +async fn test_get_logs_empty() { + let h = TestHarness::new(0).await; + + let (tx0, _) = make_signed_tx(0); + let block = make_block(1, vec![tx0], 0); // no logs + h.cold.append_block(block).await.unwrap(); + h.tags.set_latest(1); + + let result = rpc_call( + &h.app, + "getLogs", + json!([{ + "fromBlock": "0x1", + "toBlock": "0x1", + "address": format!("{:?}", LOG_ADDR), + }]), + ) + .await; + + assert_eq!(result.as_array().unwrap().len(), 0); +} + +// --------------------------------------------------------------------------- +// Group 6: Edge cases & errors +// --------------------------------------------------------------------------- + +#[tokio::test] +async fn test_not_supported() { + let h = TestHarness::new(0).await; + let resp = rpc_call_raw(&h.app, "gasPrice", json!([])).await; + assert!(resp.get("error").is_some()); + let msg = resp["error"]["message"].as_str().unwrap(); + assert!(msg.contains("not supported"), "unexpected error: {msg}"); +} + +#[tokio::test] +async fn test_send_raw_tx_no_cache() { + let h = TestHarness::new(0).await; + let resp = rpc_call_raw(&h.app, "sendRawTransaction", json!(["0x00"])).await; + assert!(resp.get("error").is_some()); +} From a435afa7b6e904f896fd99192c85c3e393bdc19c Mon Sep 17 00:00:00 2001 From: James Date: Thu, 12 Feb 2026 07:48:21 -0500 Subject: [PATCH 04/31] fix: resolve review issues in endpoints and helpers - send_raw_transaction: log warning on forwarding failure instead of silently discarding the error - get_logs: reject reversed block ranges (from > to) with an explicit error instead of silently returning empty results - build_receipt_envelope: remove catch-all arm so new TxType variants from alloy produce a compile error instead of a runtime panic Co-Authored-By: Claude Opus 4.6 --- crates/rpc-storage/src/eth/endpoints.rs | 9 +++++++-- crates/rpc-storage/src/eth/helpers.rs | 4 +--- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/crates/rpc-storage/src/eth/endpoints.rs b/crates/rpc-storage/src/eth/endpoints.rs index fc24792..d7eb366 100644 --- a/crates/rpc-storage/src/eth/endpoints.rs +++ b/crates/rpc-storage/src/eth/endpoints.rs @@ -645,7 +645,9 @@ where let hash = *envelope.tx_hash(); hctx.spawn(async move { - tx_cache.forward_raw_transaction(envelope).await.map_err(|e| e.to_string()) + if let Err(e) = tx_cache.forward_raw_transaction(envelope).await { + tracing::warn!(%hash, err = %e, "failed to forward raw transaction"); + } }); Ok(hash) @@ -717,7 +719,10 @@ where .map_err(|e| e.to_string())? .unwrap_or_else(|| ctx.tags().latest()); - if to.saturating_sub(from) > MAX_BLOCKS_PER_FILTER { + if from > to { + return Err("fromBlock must not exceed toBlock".to_string()); + } + if to - from > MAX_BLOCKS_PER_FILTER { return Err(format!("query exceeds max block range ({MAX_BLOCKS_PER_FILTER})")); } diff --git a/crates/rpc-storage/src/eth/helpers.rs b/crates/rpc-storage/src/eth/helpers.rs index 553a120..39354ba 100644 --- a/crates/rpc-storage/src/eth/helpers.rs +++ b/crates/rpc-storage/src/eth/helpers.rs @@ -261,7 +261,7 @@ fn build_receipt_inner( } /// Wrap a receipt in the appropriate [`ReceiptEnvelope`] variant. -fn build_receipt_envelope( +const fn build_receipt_envelope( receipt: ReceiptWithBloom>, tx_type: alloy::consensus::TxType, ) -> ReceiptEnvelope { @@ -271,7 +271,5 @@ fn build_receipt_envelope( alloy::consensus::TxType::Eip1559 => ReceiptEnvelope::Eip1559(receipt), alloy::consensus::TxType::Eip4844 => ReceiptEnvelope::Eip4844(receipt), alloy::consensus::TxType::Eip7702 => ReceiptEnvelope::Eip7702(receipt), - #[allow(unreachable_patterns)] - _ => unreachable!(), } } From 601c1565a14e5ba0d4f47cc45a9b3070138428e7 Mon Sep 17 00:00:00 2001 From: James Date: Thu, 12 Feb 2026 07:51:25 -0500 Subject: [PATCH 05/31] fix: correct log ordering comments in rpc test Regular txs execute before system txs, not the other way around. Drive-by from https://github.com/init4tech/node-components/pull/74#discussion_r2798396844 Co-Authored-By: Claude Opus 4.6 --- crates/node-tests/tests/rpc.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/node-tests/tests/rpc.rs b/crates/node-tests/tests/rpc.rs index 9c183e1..a293665 100644 --- a/crates/node-tests/tests/rpc.rs +++ b/crates/node-tests/tests/rpc.rs @@ -223,13 +223,13 @@ async fn getLogs_post(ctx: &SignetTestContext, contract: &TestCounterInstance) { .await .unwrap(); - // Two logs: one from the host transact, one from the alloy tx + // Two logs: one from the alloy tx, one from the host transact assert_eq!(logs.len(), 2); let log_inner = &logs[0].inner; assert_eq!(log_inner.address, *contract.address()); - // First increment is from the host transact (system tx runs first) + // First increment is from the alloy tx (regular txs execute before system txs) assert_eq!(log_inner.topics(), &[Counter::Count::SIGNATURE_HASH, B256::with_last_byte(1)]); - // Second increment is from the alloy tx + // Second increment is from the host transact (system tx) let log_inner = &logs[1].inner; assert_eq!(log_inner.address, *contract.address()); assert_eq!(log_inner.topics(), &[Counter::Count::SIGNATURE_HASH, B256::with_last_byte(2)]); From 35ed5cb2564d5dc3a773b0432e99bb1b261409c8 Mon Sep 17 00:00:00 2001 From: James Date: Thu, 12 Feb 2026 08:07:57 -0500 Subject: [PATCH 06/31] refactor: DRY up EVM setup, fix glob import, remove dead code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Extract `resolve_evm_block` method on `StorageRpcCtx` to deduplicate the block resolution + header fetch + revm db creation shared by `call()` and `estimate_gas()`. Resolves headers directly (by hash or by tag→number) to avoid redundant cold storage lookups. - Replace glob import `use endpoints::*` with explicit imports. - Remove unused `revm_state()` method from `StorageRpcCtx`. Co-Authored-By: Claude Opus 4.6 --- crates/rpc-storage/src/ctx.rs | 44 +++++++++++++++++++++---- crates/rpc-storage/src/eth/endpoints.rs | 28 ++-------------- crates/rpc-storage/src/eth/mod.rs | 7 +++- 3 files changed, 47 insertions(+), 32 deletions(-) diff --git a/crates/rpc-storage/src/ctx.rs b/crates/rpc-storage/src/ctx.rs index c4db8f4..f8623d8 100644 --- a/crates/rpc-storage/src/ctx.rs +++ b/crates/rpc-storage/src/ctx.rs @@ -1,6 +1,10 @@ //! RPC context wrapping [`UnifiedStorage`]. -use crate::resolve::BlockTags; +use crate::{ + EthError, + resolve::{BlockTags, resolve_block_number_or_tag}, +}; +use alloy::eips::BlockId; use signet_cold::ColdStorageReadHandle; use signet_hot::HotKv; use signet_hot::model::{HotKvRead, RevmRead}; @@ -11,6 +15,18 @@ use std::sync::Arc; use trevm::revm::database::DBErrorMarker; use trevm::revm::database::StateBuilder; +/// Resolved block context for EVM execution. +/// +/// Contains the header and a revm-compatible database snapshot at the +/// resolved block height, ready for use with `signet_evm`. +#[derive(Debug)] +pub(crate) struct EvmBlockContext { + /// The resolved block header. + pub header: alloy::consensus::Header, + /// The revm database at the resolved height. + pub db: trevm::revm::database::State, +} + /// RPC context backed by [`UnifiedStorage`]. /// /// Provides access to hot storage (state), cold storage (blocks/txs/receipts), @@ -111,15 +127,31 @@ impl StorageRpcCtx { Ok(StateBuilder::new_with_database(revm_read).build()) } - /// Create a revm-compatible database at the current tip. - pub fn revm_state( + /// Resolve a [`BlockId`] to a header and revm database in one pass. + /// + /// For hash-based IDs, fetches the header directly by hash. For + /// tag/number-based IDs, resolves the tag then fetches the header by + /// number. This avoids a redundant header lookup that would occur if + /// resolving to a block number first. + pub(crate) async fn resolve_evm_block( &self, - ) -> signet_storage::StorageResult>> + id: BlockId, + ) -> Result>, EthError> where H::RoTx: Send + Sync, ::Error: DBErrorMarker, { - let revm_read = self.inner.storage.revm_reader()?; - Ok(StateBuilder::new_with_database(revm_read).build()) + let cold = self.cold(); + let header = match id { + BlockId::Hash(h) => cold.get_header_by_hash(h.block_hash).await?, + BlockId::Number(tag) => { + let height = resolve_block_number_or_tag(tag, self.tags())?; + cold.get_header_by_number(height).await? + } + } + .ok_or(EthError::BlockNotFound(id))?; + + let db = self.revm_state_at_height(header.number)?; + Ok(EvmBlockContext { header, db }) } } diff --git a/crates/rpc-storage/src/eth/endpoints.rs b/crates/rpc-storage/src/eth/endpoints.rs index d7eb366..5659bf4 100644 --- a/crates/rpc-storage/src/eth/endpoints.rs +++ b/crates/rpc-storage/src/eth/endpoints.rs @@ -1,7 +1,7 @@ //! ETH namespace RPC endpoint implementations. use crate::{ - ctx::StorageRpcCtx, + ctx::{EvmBlockContext, StorageRpcCtx}, eth::helpers::{ AddrWithBlock, BlockParams, BlockRangeInclusiveIter, CfgFiller, StorageAtArgs, TxParams, await_handler, build_receipt, build_receipt_from_parts, build_rpc_transaction, @@ -507,18 +507,7 @@ where let span = trace_span!("eth_call", block_id = %id); let task = async move { - let cold = ctx.cold(); - let height = response_tri!(resolve_block_id(id, ctx.tags(), &cold).await); - - let header = response_tri!(cold.get_header_by_number(height).await); - let Some(header) = header else { - return ResponsePayload::internal_error_with_message_and_obj( - "block not found".into(), - id.to_string().into(), - ); - }; - - let db = response_tri!(ctx.revm_state_at_height(height)); + let EvmBlockContext { header, db } = response_tri!(ctx.resolve_evm_block(id).await); let trevm = signet_evm::signet_evm(db, ctx.constants().clone()) .fill_cfg(&CfgFiller(ctx.chain_id())) @@ -576,18 +565,7 @@ where let span = trace_span!("eth_estimateGas", block_id = %id); let task = async move { - let cold = ctx.cold(); - let height = response_tri!(resolve_block_id(id, ctx.tags(), &cold).await); - - let header = response_tri!(cold.get_header_by_number(height).await); - let Some(header) = header else { - return ResponsePayload::internal_error_with_message_and_obj( - "block not found".into(), - id.to_string().into(), - ); - }; - - let db = response_tri!(ctx.revm_state_at_height(height)); + let EvmBlockContext { header, db } = response_tri!(ctx.resolve_evm_block(id).await); let trevm = signet_evm::signet_evm(db, ctx.constants().clone()) .fill_cfg(&CfgFiller(ctx.chain_id())) diff --git a/crates/rpc-storage/src/eth/mod.rs b/crates/rpc-storage/src/eth/mod.rs index 6806064..e5dece7 100644 --- a/crates/rpc-storage/src/eth/mod.rs +++ b/crates/rpc-storage/src/eth/mod.rs @@ -1,7 +1,12 @@ //! ETH namespace RPC router backed by storage. mod endpoints; -use endpoints::*; +use endpoints::{ + addr_tx_count, balance, block, block_number, block_receipts, block_tx_count, call, chain_id, + code_at, estimate_gas, get_logs, header_by, not_supported, raw_transaction_by_hash, + raw_tx_by_block_and_index, send_raw_transaction, storage_at, transaction_by_hash, + transaction_receipt, tx_by_block_and_index, +}; mod error; pub use error::EthError; From 49ffbb60348355911e950ec678a5fc81a37188c5 Mon Sep 17 00:00:00 2001 From: James Date: Thu, 12 Feb 2026 08:15:07 -0500 Subject: [PATCH 07/31] refactor: move block resolution to StorageRpcCtx methods, drop RoTx bounds MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Move `resolve_block_id` and `resolve_block_number_or_tag` from free functions in resolve.rs to `resolve_block_id` and `resolve_block_tag` methods on `StorageRpcCtx`. This eliminates repeated `ctx.tags()` and `ctx.cold()` threading at every call site. - `resolve_block_tag` returns `u64` directly (infallible) instead of `Result`, simplifying callers like `get_logs`. - Remove `H::RoTx: Send + Sync + 'static` bounds from all endpoint functions, router, and ctx methods — the trait already provides these. Co-Authored-By: Claude Opus 4.6 --- crates/rpc-storage/src/ctx.rs | 46 +++++++++++++++++++--- crates/rpc-storage/src/eth/endpoints.rs | 52 ++++++------------------- crates/rpc-storage/src/eth/mod.rs | 1 - crates/rpc-storage/src/lib.rs | 1 - crates/rpc-storage/src/resolve.rs | 45 +-------------------- 5 files changed, 54 insertions(+), 91 deletions(-) diff --git a/crates/rpc-storage/src/ctx.rs b/crates/rpc-storage/src/ctx.rs index f8623d8..754e7c5 100644 --- a/crates/rpc-storage/src/ctx.rs +++ b/crates/rpc-storage/src/ctx.rs @@ -2,9 +2,9 @@ use crate::{ EthError, - resolve::{BlockTags, resolve_block_number_or_tag}, + resolve::{BlockTags, ResolveError}, }; -use alloy::eips::BlockId; +use alloy::eips::{BlockId, BlockNumberOrTag}; use signet_cold::ColdStorageReadHandle; use signet_hot::HotKv; use signet_hot::model::{HotKvRead, RevmRead}; @@ -111,6 +111,44 @@ impl StorageRpcCtx { self.inner.tx_cache.as_ref() } + /// Resolve a [`BlockNumberOrTag`] to a block number. + /// + /// This is synchronous — no cold storage lookup is needed. + /// + /// - `Latest` / `Pending` → latest tag + /// - `Safe` → safe tag + /// - `Finalized` → finalized tag + /// - `Earliest` → `0` + /// - `Number(n)` → `n` + pub(crate) fn resolve_block_tag(&self, tag: BlockNumberOrTag) -> u64 { + match tag { + BlockNumberOrTag::Latest | BlockNumberOrTag::Pending => self.tags().latest(), + BlockNumberOrTag::Safe => self.tags().safe(), + BlockNumberOrTag::Finalized => self.tags().finalized(), + BlockNumberOrTag::Earliest => 0, + BlockNumberOrTag::Number(n) => n, + } + } + + /// Resolve a [`BlockId`] to a block number. + /// + /// For tag/number-based IDs, resolves synchronously via + /// [`resolve_block_tag`](Self::resolve_block_tag). For hash-based IDs, + /// fetches the header from cold storage to obtain the block number. + pub(crate) async fn resolve_block_id(&self, id: BlockId) -> Result { + match id { + BlockId::Number(tag) => Ok(self.resolve_block_tag(tag)), + BlockId::Hash(h) => { + let header = self + .cold() + .get_header_by_hash(h.block_hash) + .await? + .ok_or(ResolveError::HashNotFound(h.block_hash))?; + Ok(header.number) + } + } + } + /// Create a revm-compatible database at a specific block height. /// /// The returned `State>` implements both `Database` and @@ -120,7 +158,6 @@ impl StorageRpcCtx { height: u64, ) -> signet_storage::StorageResult>> where - H::RoTx: Send + Sync, ::Error: DBErrorMarker, { let revm_read = self.inner.storage.revm_reader_at_height(height)?; @@ -138,14 +175,13 @@ impl StorageRpcCtx { id: BlockId, ) -> Result>, EthError> where - H::RoTx: Send + Sync, ::Error: DBErrorMarker, { let cold = self.cold(); let header = match id { BlockId::Hash(h) => cold.get_header_by_hash(h.block_hash).await?, BlockId::Number(tag) => { - let height = resolve_block_number_or_tag(tag, self.tags())?; + let height = self.resolve_block_tag(tag); cold.get_header_by_number(height).await? } } diff --git a/crates/rpc-storage/src/eth/endpoints.rs b/crates/rpc-storage/src/eth/endpoints.rs index 5659bf4..8868696 100644 --- a/crates/rpc-storage/src/eth/endpoints.rs +++ b/crates/rpc-storage/src/eth/endpoints.rs @@ -7,7 +7,6 @@ use crate::{ await_handler, build_receipt, build_receipt_from_parts, build_rpc_transaction, normalize_gas_stateless, response_tri, }, - resolve::{resolve_block_id, resolve_block_number_or_tag}, }; use ajj::{HandlerCtx, ResponsePayload}; use alloy::{ @@ -60,7 +59,6 @@ pub(crate) async fn block( where T: Into, H: HotKv + Send + Sync + 'static, - H::RoTx: Send + Sync + 'static, ::Error: DBErrorMarker, { let id = t.into(); @@ -68,7 +66,7 @@ where let task = async move { let cold = ctx.cold(); - let block_num = resolve_block_id(id, ctx.tags(), &cold).await.map_err(|e| e.to_string())?; + let block_num = ctx.resolve_block_id(id).await.map_err(|e| e.to_string())?; let (header, txs) = tokio::try_join!( cold.get_header_by_number(block_num), @@ -119,14 +117,13 @@ pub(crate) async fn block_tx_count( where T: Into, H: HotKv + Send + Sync + 'static, - H::RoTx: Send + Sync + 'static, ::Error: DBErrorMarker, { let id = t.into(); let task = async move { let cold = ctx.cold(); - let block_num = resolve_block_id(id, ctx.tags(), &cold).await.map_err(|e| e.to_string())?; + let block_num = ctx.resolve_block_id(id).await.map_err(|e| e.to_string())?; cold.get_transaction_count(block_num) .await @@ -144,12 +141,11 @@ pub(crate) async fn block_receipts( ) -> Result>, String> where H: HotKv + Send + Sync + 'static, - H::RoTx: Send + Sync + 'static, ::Error: DBErrorMarker, { let task = async move { let cold = ctx.cold(); - let block_num = resolve_block_id(id, ctx.tags(), &cold).await.map_err(|e| e.to_string())?; + let block_num = ctx.resolve_block_id(id).await.map_err(|e| e.to_string())?; let (header, txs, receipts) = tokio::try_join!( cold.get_header_by_number(block_num), @@ -207,14 +203,13 @@ pub(crate) async fn header_by( where T: Into, H: HotKv + Send + Sync + 'static, - H::RoTx: Send + Sync + 'static, ::Error: DBErrorMarker, { let id = t.into(); let task = async move { let cold = ctx.cold(); - let block_num = resolve_block_id(id, ctx.tags(), &cold).await.map_err(|e| e.to_string())?; + let block_num = ctx.resolve_block_id(id).await.map_err(|e| e.to_string())?; cold.get_header_by_number(block_num) .await @@ -236,7 +231,6 @@ pub(crate) async fn transaction_by_hash( ) -> Result, String> where H: HotKv + Send + Sync + 'static, - H::RoTx: Send + Sync + 'static, ::Error: DBErrorMarker, { let task = async move { @@ -265,7 +259,6 @@ pub(crate) async fn raw_transaction_by_hash( ) -> Result, String> where H: HotKv + Send + Sync + 'static, - H::RoTx: Send + Sync + 'static, ::Error: DBErrorMarker, { let task = async move { @@ -287,14 +280,13 @@ pub(crate) async fn tx_by_block_and_index( where T: Into, H: HotKv + Send + Sync + 'static, - H::RoTx: Send + Sync + 'static, ::Error: DBErrorMarker, { let id = t.into(); let task = async move { let cold = ctx.cold(); - let block_num = resolve_block_id(id, ctx.tags(), &cold).await.map_err(|e| e.to_string())?; + let block_num = ctx.resolve_block_id(id).await.map_err(|e| e.to_string())?; let Some(confirmed) = cold .get_tx_by_block_and_index(block_num, index.to::()) @@ -323,14 +315,13 @@ pub(crate) async fn raw_tx_by_block_and_index( where T: Into, H: HotKv + Send + Sync + 'static, - H::RoTx: Send + Sync + 'static, ::Error: DBErrorMarker, { let id = t.into(); let task = async move { let cold = ctx.cold(); - let block_num = resolve_block_id(id, ctx.tags(), &cold).await.map_err(|e| e.to_string())?; + let block_num = ctx.resolve_block_id(id).await.map_err(|e| e.to_string())?; cold.get_tx_by_block_and_index(block_num, index.to::()) .await @@ -348,7 +339,6 @@ pub(crate) async fn transaction_receipt( ) -> Result, String> where H: HotKv + Send + Sync + 'static, - H::RoTx: Send + Sync + 'static, ::Error: DBErrorMarker, { let task = async move { @@ -378,14 +368,12 @@ pub(crate) async fn balance( ) -> Result where H: HotKv + Send + Sync + 'static, - H::RoTx: Send + Sync + 'static, ::Error: DBErrorMarker, { let block = block.unwrap_or(BlockId::latest()); let task = async move { - let cold = ctx.cold(); - let height = resolve_block_id(block, ctx.tags(), &cold).await.map_err(|e| e.to_string())?; + let height = ctx.resolve_block_id(block).await.map_err(|e| e.to_string())?; let reader = ctx.hot_reader().map_err(|e| e.to_string())?; let acct = @@ -404,14 +392,12 @@ pub(crate) async fn storage_at( ) -> Result where H: HotKv + Send + Sync + 'static, - H::RoTx: Send + Sync + 'static, ::Error: DBErrorMarker, { let block = block.unwrap_or(BlockId::latest()); let task = async move { - let cold = ctx.cold(); - let height = resolve_block_id(block, ctx.tags(), &cold).await.map_err(|e| e.to_string())?; + let height = ctx.resolve_block_id(block).await.map_err(|e| e.to_string())?; let reader = ctx.hot_reader().map_err(|e| e.to_string())?; let val = reader @@ -431,14 +417,12 @@ pub(crate) async fn addr_tx_count( ) -> Result where H: HotKv + Send + Sync + 'static, - H::RoTx: Send + Sync + 'static, ::Error: DBErrorMarker, { let block = block.unwrap_or(BlockId::latest()); let task = async move { - let cold = ctx.cold(); - let height = resolve_block_id(block, ctx.tags(), &cold).await.map_err(|e| e.to_string())?; + let height = ctx.resolve_block_id(block).await.map_err(|e| e.to_string())?; let reader = ctx.hot_reader().map_err(|e| e.to_string())?; let acct = @@ -457,14 +441,12 @@ pub(crate) async fn code_at( ) -> Result where H: HotKv + Send + Sync + 'static, - H::RoTx: Send + Sync + 'static, ::Error: DBErrorMarker, { let block = block.unwrap_or(BlockId::latest()); let task = async move { - let cold = ctx.cold(); - let height = resolve_block_id(block, ctx.tags(), &cold).await.map_err(|e| e.to_string())?; + let height = ctx.resolve_block_id(block).await.map_err(|e| e.to_string())?; let reader = ctx.hot_reader().map_err(|e| e.to_string())?; let acct = @@ -497,7 +479,6 @@ pub(crate) async fn call( ) -> ResponsePayload where H: HotKv + Send + Sync + 'static, - H::RoTx: Send + Sync + 'static, ::Error: DBErrorMarker, { let max_gas = ctx.rpc_gas_cap(); @@ -555,7 +536,6 @@ pub(crate) async fn estimate_gas( ) -> ResponsePayload where H: HotKv + Send + Sync + 'static, - H::RoTx: Send + Sync + 'static, ::Error: DBErrorMarker, { let max_gas = ctx.rpc_gas_cap(); @@ -610,7 +590,6 @@ pub(crate) async fn send_raw_transaction( ) -> Result where H: HotKv + Send + Sync + 'static, - H::RoTx: Send + Sync + 'static, ::Error: DBErrorMarker, { let Some(tx_cache) = ctx.tx_cache().cloned() else { @@ -651,7 +630,6 @@ pub(crate) async fn get_logs( ) -> Result, String> where H: HotKv + Send + Sync + 'static, - H::RoTx: Send + Sync + 'static, ::Error: DBErrorMarker, { let task = async move { @@ -686,15 +664,9 @@ where } alloy::rpc::types::FilterBlockOption::Range { from_block, to_block } => { - let from = from_block - .map(|b| resolve_block_number_or_tag(b, ctx.tags())) - .transpose() - .map_err(|e| e.to_string())? - .unwrap_or(0); + let from = from_block.map(|b| ctx.resolve_block_tag(b)).unwrap_or(0); let to = to_block - .map(|b| resolve_block_number_or_tag(b, ctx.tags())) - .transpose() - .map_err(|e| e.to_string())? + .map(|b| ctx.resolve_block_tag(b)) .unwrap_or_else(|| ctx.tags().latest()); if from > to { diff --git a/crates/rpc-storage/src/eth/mod.rs b/crates/rpc-storage/src/eth/mod.rs index e5dece7..f8f70b1 100644 --- a/crates/rpc-storage/src/eth/mod.rs +++ b/crates/rpc-storage/src/eth/mod.rs @@ -23,7 +23,6 @@ use trevm::revm::database::DBErrorMarker; pub(crate) fn eth() -> ajj::Router> where H: HotKv + Send + Sync + 'static, - H::RoTx: Send + Sync + 'static, ::Error: DBErrorMarker, { ajj::Router::new() diff --git a/crates/rpc-storage/src/lib.rs b/crates/rpc-storage/src/lib.rs index ac3908c..795e972 100644 --- a/crates/rpc-storage/src/lib.rs +++ b/crates/rpc-storage/src/lib.rs @@ -24,7 +24,6 @@ pub use eth::EthError; pub fn eth() -> ajj::Router> where H: signet_hot::HotKv + Send + Sync + 'static, - H::RoTx: Send + Sync + 'static, ::Error: trevm::revm::database::DBErrorMarker, { eth::eth() diff --git a/crates/rpc-storage/src/resolve.rs b/crates/rpc-storage/src/resolve.rs index 8c12154..f5f6ebe 100644 --- a/crates/rpc-storage/src/resolve.rs +++ b/crates/rpc-storage/src/resolve.rs @@ -4,11 +4,7 @@ //! and Finalized block numbers. The RPC context owner is responsible for //! updating these as the chain progresses. -use alloy::{ - eips::{BlockId, BlockNumberOrTag}, - primitives::B256, -}; -use signet_cold::ColdStorageReadHandle; +use alloy::primitives::B256; use std::sync::{ Arc, atomic::{AtomicU64, Ordering}, @@ -88,42 +84,3 @@ pub enum ResolveError { #[error("block hash not found: {0}")] HashNotFound(B256), } - -/// Resolve a [`BlockId`] to a block number. -/// -/// - `Latest` / `Pending` → `tags.latest()` -/// - `Safe` → `tags.safe()` -/// - `Finalized` → `tags.finalized()` -/// - `Earliest` → `0` -/// - `Number(n)` → `n` -/// - `Hash(h)` → cold storage header lookup → `header.number` -pub(crate) async fn resolve_block_id( - id: BlockId, - tags: &BlockTags, - cold: &ColdStorageReadHandle, -) -> Result { - match id { - BlockId::Number(tag) => resolve_block_number_or_tag(tag, tags), - BlockId::Hash(h) => { - let header = cold - .get_header_by_hash(h.block_hash) - .await? - .ok_or(ResolveError::HashNotFound(h.block_hash))?; - Ok(header.number) - } - } -} - -/// Resolve a [`BlockNumberOrTag`] to a block number (sync, no cold lookup needed). -pub(crate) fn resolve_block_number_or_tag( - tag: BlockNumberOrTag, - tags: &BlockTags, -) -> Result { - Ok(match tag { - BlockNumberOrTag::Latest | BlockNumberOrTag::Pending => tags.latest(), - BlockNumberOrTag::Safe => tags.safe(), - BlockNumberOrTag::Finalized => tags.finalized(), - BlockNumberOrTag::Earliest => 0, - BlockNumberOrTag::Number(n) => n, - }) -} From e7a543b4ebc0d8fb2290fc5048776d4bfc991373 Mon Sep 17 00:00:00 2001 From: James Date: Thu, 12 Feb 2026 08:33:07 -0500 Subject: [PATCH 08/31] feat: add StorageRpcConfig and integrate RPC configuration values Replace the bare `rpc_gas_cap` constructor parameter with a `StorageRpcConfig` struct that bundles all RPC configuration. This moves `max_blocks_per_filter` from a hard-coded constant to a configurable value, adds `max_logs_per_response` enforcement in `eth_getLogs`, and pre-creates a tracing semaphore for future debug endpoint concurrency limiting. Co-Authored-By: Claude Opus 4.6 --- crates/rpc-storage/src/config.rs | 62 +++++++++++++++++++++++++ crates/rpc-storage/src/ctx.rs | 37 +++++++++++---- crates/rpc-storage/src/eth/endpoints.rs | 19 +++++--- crates/rpc-storage/src/lib.rs | 3 ++ crates/rpc-storage/tests/eth_rpc.rs | 5 +- 5 files changed, 109 insertions(+), 17 deletions(-) create mode 100644 crates/rpc-storage/src/config.rs diff --git a/crates/rpc-storage/src/config.rs b/crates/rpc-storage/src/config.rs new file mode 100644 index 0000000..e8edfc6 --- /dev/null +++ b/crates/rpc-storage/src/config.rs @@ -0,0 +1,62 @@ +//! Configuration for the storage-backed RPC server. + +use std::time::Duration; + +/// Configuration for the storage-backed ETH RPC server. +/// +/// Mirrors the subset of reth's `EthConfig` that applies to +/// storage-backed RPC. Fields for subsystems not yet implemented +/// (gas oracle, fee history) will be added when those features land. +/// +/// # Example +/// +/// ``` +/// use signet_rpc_storage::StorageRpcConfig; +/// +/// // Use defaults (matches reth defaults). +/// let config = StorageRpcConfig::default(); +/// assert_eq!(config.rpc_gas_cap, 30_000_000); +/// ``` +#[derive(Debug, Clone, Copy)] +pub struct StorageRpcConfig { + /// Maximum gas for `eth_call` and `eth_estimateGas`. + /// + /// Default: `30_000_000` (30M gas). + pub rpc_gas_cap: u64, + + /// Maximum block range per `eth_getLogs` query. + /// + /// Default: `10_000`. + pub max_blocks_per_filter: u64, + + /// Maximum number of logs returned per `eth_getLogs` response. + /// Set to `0` to disable the limit. + /// + /// Default: `20_000`. + pub max_logs_per_response: usize, + + /// Maximum concurrent tracing/debug requests. + /// + /// Controls the size of the semaphore that gates debug + /// namespace calls. + /// + /// Default: `25`. + pub max_tracing_requests: usize, + + /// Time-to-live for stale filters and subscriptions. + /// + /// Default: `5 minutes`. + pub stale_filter_ttl: Duration, +} + +impl Default for StorageRpcConfig { + fn default() -> Self { + Self { + rpc_gas_cap: 30_000_000, + max_blocks_per_filter: 10_000, + max_logs_per_response: 20_000, + max_tracing_requests: 25, + stale_filter_ttl: Duration::from_secs(5 * 60), + } + } +} diff --git a/crates/rpc-storage/src/ctx.rs b/crates/rpc-storage/src/ctx.rs index 754e7c5..20e05cd 100644 --- a/crates/rpc-storage/src/ctx.rs +++ b/crates/rpc-storage/src/ctx.rs @@ -1,7 +1,7 @@ //! RPC context wrapping [`UnifiedStorage`]. use crate::{ - EthError, + EthError, StorageRpcConfig, resolve::{BlockTags, ResolveError}, }; use alloy::eips::{BlockId, BlockNumberOrTag}; @@ -12,6 +12,7 @@ use signet_storage::UnifiedStorage; use signet_tx_cache::TxCache; use signet_types::constants::SignetSystemConstants; use std::sync::Arc; +use tokio::sync::Semaphore; use trevm::revm::database::DBErrorMarker; use trevm::revm::database::StateBuilder; @@ -35,7 +36,7 @@ pub(crate) struct EvmBlockContext { /// # Construction /// /// ```ignore -/// let ctx = StorageRpcCtx::new(storage, constants, tags, Some(tx_cache), 30_000_000); +/// let ctx = StorageRpcCtx::new(storage, constants, tags, Some(tx_cache), StorageRpcConfig::default()); /// ``` #[derive(Debug)] pub struct StorageRpcCtx { @@ -54,7 +55,8 @@ struct StorageRpcCtxInner { constants: SignetSystemConstants, tags: BlockTags, tx_cache: Option, - rpc_gas_cap: u64, + config: StorageRpcConfig, + tracing_semaphore: Arc, } impl StorageRpcCtx { @@ -64,10 +66,18 @@ impl StorageRpcCtx { constants: SignetSystemConstants, tags: BlockTags, tx_cache: Option, - rpc_gas_cap: u64, + config: StorageRpcConfig, ) -> Self { + let tracing_semaphore = Arc::new(Semaphore::new(config.max_tracing_requests)); Self { - inner: Arc::new(StorageRpcCtxInner { storage, constants, tags, tx_cache, rpc_gas_cap }), + inner: Arc::new(StorageRpcCtxInner { + storage, + constants, + tags, + tx_cache, + config, + tracing_semaphore, + }), } } @@ -101,9 +111,20 @@ impl StorageRpcCtx { self.inner.constants.ru_chain_id() } - /// Get the RPC gas cap. - pub fn rpc_gas_cap(&self) -> u64 { - self.inner.rpc_gas_cap + /// Access the RPC configuration. + pub fn config(&self) -> &StorageRpcConfig { + &self.inner.config + } + + /// Acquire a permit from the tracing semaphore. + /// + /// Limits concurrent tracing/debug requests. Callers should hold + /// the permit for the duration of their tracing operation. + pub async fn acquire_tracing_permit(&self) -> tokio::sync::OwnedSemaphorePermit { + Arc::clone(&self.inner.tracing_semaphore) + .acquire_owned() + .await + .expect("tracing semaphore closed") } /// Access the optional tx cache. diff --git a/crates/rpc-storage/src/eth/endpoints.rs b/crates/rpc-storage/src/eth/endpoints.rs index 8868696..46afd1c 100644 --- a/crates/rpc-storage/src/eth/endpoints.rs +++ b/crates/rpc-storage/src/eth/endpoints.rs @@ -481,7 +481,7 @@ where H: HotKv + Send + Sync + 'static, ::Error: DBErrorMarker, { - let max_gas = ctx.rpc_gas_cap(); + let max_gas = ctx.config().rpc_gas_cap; normalize_gas_stateless(&mut request, max_gas); let id = block.unwrap_or(BlockId::latest()); @@ -538,7 +538,7 @@ where H: HotKv + Send + Sync + 'static, ::Error: DBErrorMarker, { - let max_gas = ctx.rpc_gas_cap(); + let max_gas = ctx.config().rpc_gas_cap; normalize_gas_stateless(&mut request, max_gas); let id = block.unwrap_or(BlockId::pending()); @@ -617,9 +617,6 @@ where // Logs // --------------------------------------------------------------------------- -/// Maximum number of blocks per `eth_getLogs` range query. -const MAX_BLOCKS_PER_FILTER: u64 = 10_000; - /// Maximum headers fetched per batch when scanning bloom filters. const MAX_HEADERS_RANGE: u64 = 1_000; @@ -672,8 +669,9 @@ where if from > to { return Err("fromBlock must not exceed toBlock".to_string()); } - if to - from > MAX_BLOCKS_PER_FILTER { - return Err(format!("query exceeds max block range ({MAX_BLOCKS_PER_FILTER})")); + let max_blocks = ctx.config().max_blocks_per_filter; + if to - from > max_blocks { + return Err(format!("query exceeds max block range ({max_blocks})")); } let mut all_logs = Vec::new(); @@ -709,6 +707,13 @@ where let logs = collect_matching_logs(&header, block_hash, &txs, &receipts, &filter); all_logs.extend(logs); + + let max_logs = ctx.config().max_logs_per_response; + if max_logs > 0 && all_logs.len() > max_logs { + return Err(format!( + "query exceeds max logs per response ({max_logs})" + )); + } } } diff --git a/crates/rpc-storage/src/lib.rs b/crates/rpc-storage/src/lib.rs index 795e972..fac92bc 100644 --- a/crates/rpc-storage/src/lib.rs +++ b/crates/rpc-storage/src/lib.rs @@ -11,6 +11,9 @@ #![deny(unused_must_use, rust_2018_idioms)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] +mod config; +pub use config::StorageRpcConfig; + mod ctx; pub use ctx::StorageRpcCtx; diff --git a/crates/rpc-storage/tests/eth_rpc.rs b/crates/rpc-storage/tests/eth_rpc.rs index 3a34732..3f70175 100644 --- a/crates/rpc-storage/tests/eth_rpc.rs +++ b/crates/rpc-storage/tests/eth_rpc.rs @@ -16,7 +16,7 @@ use serde_json::{Value, json}; use signet_cold::{BlockData, ColdStorageHandle, ColdStorageTask, mem::MemColdBackend}; use signet_constants::SignetSystemConstants; use signet_hot::{HotKv, db::UnsafeDbWrite, mem::MemKv}; -use signet_rpc_storage::{BlockTags, StorageRpcCtx}; +use signet_rpc_storage::{BlockTags, StorageRpcConfig, StorageRpcCtx}; use signet_storage::UnifiedStorage; use signet_storage_types::Receipt; use tokio_util::sync::CancellationToken; @@ -44,7 +44,8 @@ impl TestHarness { let storage = UnifiedStorage::new(hot.clone(), cold.clone()); let constants = SignetSystemConstants::test(); let tags = BlockTags::new(latest, latest.saturating_sub(2), 0); - let ctx = StorageRpcCtx::new(storage, constants, tags.clone(), None, 30_000_000); + let ctx = + StorageRpcCtx::new(storage, constants, tags.clone(), None, StorageRpcConfig::default()); let app = signet_rpc_storage::eth::().into_axum("/").with_state(ctx); Self { app, cold, hot, tags, _cancel: cancel } From fed52a001dcf5426be71155acacb14c3e809d487 Mon Sep 17 00:00:00 2001 From: James Date: Thu, 12 Feb 2026 09:59:50 -0500 Subject: [PATCH 09/31] feat: port missing RPC endpoints and add integration tests Add gas oracle, filters, subscriptions, debug tracing, and signet namespaces to signet-rpc-storage. Port 15 endpoints from the old reth-backed signet-rpc crate to the storage-backed architecture. New modules: - gas_oracle: cold-storage gas price oracle (suggest_tip_cap) - interest/: filter manager, subscription manager, block notifications - debug/: traceBlockByNumber, traceBlockByHash, traceTransaction - signet/: sendOrder, callBundle Wired eth endpoints: gasPrice, maxPriorityFeePerGas, feeHistory, newFilter, newBlockFilter, uninstallFilter, getFilterChanges, getFilterLogs, subscribe, unsubscribe. Integration tests cover gas/fee queries, filter lifecycle, and debug tracing with noop tracer. Co-Authored-By: Claude Opus 4.6 --- crates/rpc-storage/Cargo.toml | 5 + crates/rpc-storage/src/config.rs | 27 +- crates/rpc-storage/src/ctx.rs | 24 +- crates/rpc-storage/src/debug/endpoints.rs | 199 +++++++++++ crates/rpc-storage/src/debug/error.rs | 43 +++ crates/rpc-storage/src/debug/mod.rs | 25 ++ crates/rpc-storage/src/debug/tracer.rs | 224 ++++++++++++ crates/rpc-storage/src/eth/endpoints.rs | 386 ++++++++++++++++++++- crates/rpc-storage/src/eth/helpers.rs | 15 + crates/rpc-storage/src/eth/mod.rs | 29 +- crates/rpc-storage/src/gas_oracle.rs | 55 +++ crates/rpc-storage/src/interest/filters.rs | 326 +++++++++++++++++ crates/rpc-storage/src/interest/kind.rs | 106 ++++++ crates/rpc-storage/src/interest/mod.rs | 22 ++ crates/rpc-storage/src/interest/subs.rs | 333 ++++++++++++++++++ crates/rpc-storage/src/lib.rs | 38 +- crates/rpc-storage/src/signet/endpoints.rs | 107 ++++++ crates/rpc-storage/src/signet/error.rs | 16 + crates/rpc-storage/src/signet/mod.rs | 19 + crates/rpc-storage/tests/eth_rpc.rs | 281 ++++++++++++++- 20 files changed, 2246 insertions(+), 34 deletions(-) create mode 100644 crates/rpc-storage/src/debug/endpoints.rs create mode 100644 crates/rpc-storage/src/debug/error.rs create mode 100644 crates/rpc-storage/src/debug/mod.rs create mode 100644 crates/rpc-storage/src/debug/tracer.rs create mode 100644 crates/rpc-storage/src/gas_oracle.rs create mode 100644 crates/rpc-storage/src/interest/filters.rs create mode 100644 crates/rpc-storage/src/interest/kind.rs create mode 100644 crates/rpc-storage/src/interest/mod.rs create mode 100644 crates/rpc-storage/src/interest/subs.rs create mode 100644 crates/rpc-storage/src/signet/endpoints.rs create mode 100644 crates/rpc-storage/src/signet/error.rs create mode 100644 crates/rpc-storage/src/signet/mod.rs diff --git a/crates/rpc-storage/Cargo.toml b/crates/rpc-storage/Cargo.toml index ad301ae..4a1fe01 100644 --- a/crates/rpc-storage/Cargo.toml +++ b/crates/rpc-storage/Cargo.toml @@ -18,12 +18,17 @@ signet-evm.workspace = true trevm = { workspace = true, features = ["call", "estimate_gas"] } signet-types.workspace = true signet-tx-cache.workspace = true +signet-bundle.workspace = true alloy.workspace = true ajj.workspace = true tokio.workspace = true +tokio-util = "0.7" tracing.workspace = true thiserror.workspace = true serde.workspace = true +dashmap = "6.1.0" +revm-inspectors.workspace = true +itertools.workspace = true [dev-dependencies] tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } diff --git a/crates/rpc-storage/src/config.rs b/crates/rpc-storage/src/config.rs index e8edfc6..98c1f26 100644 --- a/crates/rpc-storage/src/config.rs +++ b/crates/rpc-storage/src/config.rs @@ -5,8 +5,7 @@ use std::time::Duration; /// Configuration for the storage-backed ETH RPC server. /// /// Mirrors the subset of reth's `EthConfig` that applies to -/// storage-backed RPC. Fields for subsystems not yet implemented -/// (gas oracle, fee history) will be added when those features land. +/// storage-backed RPC. /// /// # Example /// @@ -47,6 +46,26 @@ pub struct StorageRpcConfig { /// /// Default: `5 minutes`. pub stale_filter_ttl: Duration, + + /// Number of recent blocks to consider for gas price suggestions. + /// + /// Default: `20`. + pub gas_oracle_block_count: u64, + + /// Percentile of effective tips to use as the gas price suggestion. + /// + /// Default: `60.0`. + pub gas_oracle_percentile: f64, + + /// Maximum header history for `eth_feeHistory` without percentiles. + /// + /// Default: `1024`. + pub max_header_history: u64, + + /// Maximum block history for `eth_feeHistory` with percentiles. + /// + /// Default: `1024`. + pub max_block_history: u64, } impl Default for StorageRpcConfig { @@ -57,6 +76,10 @@ impl Default for StorageRpcConfig { max_logs_per_response: 20_000, max_tracing_requests: 25, stale_filter_ttl: Duration::from_secs(5 * 60), + gas_oracle_block_count: 20, + gas_oracle_percentile: 60.0, + max_header_history: 1024, + max_block_history: 1024, } } } diff --git a/crates/rpc-storage/src/ctx.rs b/crates/rpc-storage/src/ctx.rs index 20e05cd..a65f845 100644 --- a/crates/rpc-storage/src/ctx.rs +++ b/crates/rpc-storage/src/ctx.rs @@ -2,6 +2,7 @@ use crate::{ EthError, StorageRpcConfig, + interest::{FilterManager, NewBlockNotification, SubscriptionManager}, resolve::{BlockTags, ResolveError}, }; use alloy::eips::{BlockId, BlockNumberOrTag}; @@ -12,7 +13,7 @@ use signet_storage::UnifiedStorage; use signet_tx_cache::TxCache; use signet_types::constants::SignetSystemConstants; use std::sync::Arc; -use tokio::sync::Semaphore; +use tokio::sync::{Semaphore, broadcast}; use trevm::revm::database::DBErrorMarker; use trevm::revm::database::StateBuilder; @@ -57,18 +58,27 @@ struct StorageRpcCtxInner { tx_cache: Option, config: StorageRpcConfig, tracing_semaphore: Arc, + filter_manager: FilterManager, + sub_manager: SubscriptionManager, } impl StorageRpcCtx { /// Create a new storage-backed RPC context. + /// + /// The `notif_sender` is used by the subscription manager to receive + /// new block notifications. Callers send [`NewBlockNotification`]s on + /// this channel as blocks are appended to storage. pub fn new( storage: UnifiedStorage, constants: SignetSystemConstants, tags: BlockTags, tx_cache: Option, config: StorageRpcConfig, + notif_sender: broadcast::Sender, ) -> Self { let tracing_semaphore = Arc::new(Semaphore::new(config.max_tracing_requests)); + let filter_manager = FilterManager::new(config.stale_filter_ttl, config.stale_filter_ttl); + let sub_manager = SubscriptionManager::new(notif_sender, config.stale_filter_ttl); Self { inner: Arc::new(StorageRpcCtxInner { storage, @@ -77,6 +87,8 @@ impl StorageRpcCtx { tx_cache, config, tracing_semaphore, + filter_manager, + sub_manager, }), } } @@ -132,6 +144,16 @@ impl StorageRpcCtx { self.inner.tx_cache.as_ref() } + /// Access the filter manager. + pub(crate) fn filter_manager(&self) -> &FilterManager { + &self.inner.filter_manager + } + + /// Access the subscription manager. + pub(crate) fn sub_manager(&self) -> &SubscriptionManager { + &self.inner.sub_manager + } + /// Resolve a [`BlockNumberOrTag`] to a block number. /// /// This is synchronous — no cold storage lookup is needed. diff --git a/crates/rpc-storage/src/debug/endpoints.rs b/crates/rpc-storage/src/debug/endpoints.rs new file mode 100644 index 0000000..7662429 --- /dev/null +++ b/crates/rpc-storage/src/debug/endpoints.rs @@ -0,0 +1,199 @@ +//! Debug namespace RPC endpoint implementations. + +use crate::{ + ctx::StorageRpcCtx, + debug::DebugError, + eth::helpers::{CfgFiller, await_handler, response_tri}, +}; +use ajj::{HandlerCtx, ResponsePayload}; +use alloy::{ + consensus::BlockHeader, + eips::BlockId, + primitives::B256, + rpc::types::trace::geth::{GethDebugTracingOptions, GethTrace, TraceResult}, +}; +use itertools::Itertools; +use signet_evm::EvmErrored; +use signet_hot::HotKv; +use signet_hot::model::HotKvRead; +use signet_types::MagicSig; +use tracing::Instrument; +use trevm::revm::database::DBErrorMarker; + +/// Params for `debug_traceBlockByNumber` and `debug_traceBlockByHash`. +#[derive(Debug, serde::Deserialize)] +pub(super) struct TraceBlockParams(T, #[serde(default)] Option); + +/// Params for `debug_traceTransaction`. +#[derive(Debug, serde::Deserialize)] +pub(super) struct TraceTransactionParams(B256, #[serde(default)] Option); + +/// `debug_traceBlockByNumber` and `debug_traceBlockByHash` handler. +pub(super) async fn trace_block( + hctx: HandlerCtx, + TraceBlockParams(id, opts): TraceBlockParams, + ctx: StorageRpcCtx, +) -> ResponsePayload, DebugError> +where + T: Into, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let opts = response_tri!(opts.ok_or(DebugError::InvalidTracerConfig)); + + let _permit = ctx.acquire_tracing_permit().await; + + let id = id.into(); + let span = tracing::debug_span!("traceBlock", ?id, tracer = ?opts.tracer.as_ref()); + + let fut = async move { + let cold = ctx.cold(); + let block_num = response_tri!( + ctx.resolve_block_id(id) + .await + .map_err(|e| { DebugError::BlockNotFound(e.to_string()) }) + ); + + let (header, txs) = response_tri!( + tokio::try_join!( + cold.get_header_by_number(block_num), + cold.get_transactions_in_block(block_num), + ) + .map_err(|e| DebugError::Cold(e.to_string())) + ); + + let Some(header) = header else { + return ResponsePayload::internal_error_message( + format!("block not found: {id}").into(), + ); + }; + + let block_hash = header.hash_slow(); + + tracing::debug!(number = header.number, "Loaded block"); + + let mut frames = Vec::with_capacity(txs.len()); + + // State BEFORE this block + let db = response_tri!( + ctx.revm_state_at_height(header.number.saturating_sub(1)) + .map_err(|e| DebugError::Hot(e.to_string())) + ); + + let mut trevm = signet_evm::signet_evm(db, ctx.constants().clone()) + .fill_cfg(&CfgFiller(ctx.chain_id())) + .fill_block(&header); + + let mut txns = txs.iter().enumerate().peekable(); + for (idx, tx) in txns + .by_ref() + .peeking_take_while(|(_, t)| MagicSig::try_from_signature(t.signature()).is_none()) + { + let tx_info = alloy::rpc::types::TransactionInfo { + hash: Some(*tx.tx_hash()), + index: Some(idx as u64), + block_hash: Some(block_hash), + block_number: Some(header.number), + base_fee: header.base_fee_per_gas(), + }; + + let t = trevm.fill_tx(tx); + let frame; + (frame, trevm) = response_tri!(crate::debug::tracer::trace(t, &opts, tx_info)); + frames.push(TraceResult::Success { result: frame, tx_hash: Some(*tx.tx_hash()) }); + + tracing::debug!(tx_index = idx, tx_hash = ?tx.tx_hash(), "Traced transaction"); + } + + ResponsePayload::Success(frames) + } + .instrument(span); + + await_handler!(@response_option hctx.spawn_blocking(fut)) +} + +/// `debug_traceTransaction` handler. +pub(super) async fn trace_transaction( + hctx: HandlerCtx, + TraceTransactionParams(tx_hash, opts): TraceTransactionParams, + ctx: StorageRpcCtx, +) -> ResponsePayload +where + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let opts = response_tri!(opts.ok_or(DebugError::InvalidTracerConfig)); + + let _permit = ctx.acquire_tracing_permit().await; + + let span = tracing::debug_span!("traceTransaction", %tx_hash, tracer = ?opts.tracer.as_ref()); + + let fut = async move { + let cold = ctx.cold(); + + // Look up the transaction and its containing block + let confirmed = response_tri!( + cold.get_tx_by_hash(tx_hash).await.map_err(|e| DebugError::Cold(e.to_string())) + ); + + let confirmed = response_tri!(confirmed.ok_or(DebugError::TransactionNotFound)); + let (_tx, meta) = confirmed.into_parts(); + + let block_num = meta.block_number(); + let block_hash = meta.block_hash(); + + let (header, txs) = response_tri!( + tokio::try_join!( + cold.get_header_by_number(block_num), + cold.get_transactions_in_block(block_num), + ) + .map_err(|e| DebugError::Cold(e.to_string())) + ); + + let header = + response_tri!(header.ok_or(DebugError::BlockNotFound(format!("block {block_num}")))); + + tracing::debug!(number = block_num, "Loaded containing block"); + + // State BEFORE this block + let db = response_tri!( + ctx.revm_state_at_height(block_num.saturating_sub(1)) + .map_err(|e| DebugError::Hot(e.to_string())) + ); + + let mut trevm = signet_evm::signet_evm(db, ctx.constants().clone()) + .fill_cfg(&CfgFiller(ctx.chain_id())) + .fill_block(&header); + + // Replay all transactions up to (but not including) the target + let mut txns = txs.iter().enumerate().peekable(); + for (_idx, tx) in txns.by_ref().peeking_take_while(|(_, t)| t.tx_hash() != &tx_hash) { + if MagicSig::try_from_signature(tx.signature()).is_some() { + return ResponsePayload::internal_error_message( + DebugError::TransactionNotFound.to_string().into(), + ); + } + + trevm = response_tri!(trevm.run_tx(tx).map_err(EvmErrored::into_error)).accept_state(); + } + + let (index, tx) = response_tri!(txns.next().ok_or(DebugError::TransactionNotFound)); + + let trevm = trevm.fill_tx(tx); + + let tx_info = alloy::rpc::types::TransactionInfo { + hash: Some(*tx.tx_hash()), + index: Some(index as u64), + block_hash: Some(block_hash), + block_number: Some(header.number), + base_fee: header.base_fee_per_gas(), + }; + + let res = response_tri!(crate::debug::tracer::trace(trevm, &opts, tx_info)).0; + + ResponsePayload::Success(res) + } + .instrument(span); + + await_handler!(@response_option hctx.spawn_blocking(fut)) +} diff --git a/crates/rpc-storage/src/debug/error.rs b/crates/rpc-storage/src/debug/error.rs new file mode 100644 index 0000000..bc87c42 --- /dev/null +++ b/crates/rpc-storage/src/debug/error.rs @@ -0,0 +1,43 @@ +//! Error types for the debug namespace. + +/// Errors that can occur in the `debug` namespace. +#[derive(Debug, Clone, thiserror::Error)] +pub enum DebugError { + /// Cold storage error. + #[error("cold storage: {0}")] + Cold(String), + /// Hot storage error. + #[error("hot storage: {0}")] + Hot(String), + /// Invalid tracer configuration. + #[error("invalid tracer config")] + InvalidTracerConfig, + /// Unsupported tracer type. + #[error("unsupported: {0}")] + Unsupported(&'static str), + /// EVM execution error. + #[error("evm: {0}")] + Evm(String), + /// Block not found. + #[error("block not found: {0}")] + BlockNotFound(String), + /// Transaction not found. + #[error("transaction not found")] + TransactionNotFound, +} + +impl DebugError { + /// Convert to a string by value. + pub fn into_string(self) -> String { + self.to_string() + } +} + +impl serde::Serialize for DebugError { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(&self.to_string()) + } +} diff --git a/crates/rpc-storage/src/debug/mod.rs b/crates/rpc-storage/src/debug/mod.rs new file mode 100644 index 0000000..172a2fc --- /dev/null +++ b/crates/rpc-storage/src/debug/mod.rs @@ -0,0 +1,25 @@ +//! Debug namespace RPC router backed by storage. + +mod endpoints; +use endpoints::{trace_block, trace_transaction}; +mod error; +pub use error::DebugError; +pub(crate) mod tracer; + +use crate::ctx::StorageRpcCtx; +use alloy::{eips::BlockNumberOrTag, primitives::B256}; +use signet_hot::HotKv; +use signet_hot::model::HotKvRead; +use trevm::revm::database::DBErrorMarker; + +/// Instantiate a `debug` API router backed by storage. +pub(crate) fn debug() -> ajj::Router> +where + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + ajj::Router::new() + .route("traceBlockByNumber", trace_block::) + .route("traceBlockByHash", trace_block::) + .route("traceTransaction", trace_transaction::) +} diff --git a/crates/rpc-storage/src/debug/tracer.rs b/crates/rpc-storage/src/debug/tracer.rs new file mode 100644 index 0000000..52ed546 --- /dev/null +++ b/crates/rpc-storage/src/debug/tracer.rs @@ -0,0 +1,224 @@ +//! Core tracing logic for the debug namespace. +//! +//! Largely adapted from reth: `crates/rpc/rpc/src/debug.rs`. + +use crate::debug::DebugError; +use alloy::rpc::types::{ + TransactionInfo, + trace::geth::{ + FourByteFrame, GethDebugBuiltInTracerType, GethDebugTracerConfig, GethDebugTracerType, + GethDebugTracingOptions, GethTrace, NoopFrame, + }, +}; +use revm_inspectors::tracing::{ + FourByteInspector, MuxInspector, TracingInspector, TracingInspectorConfig, +}; +use signet_evm::{EvmNeedsTx, EvmReady}; +use tracing::instrument; +use trevm::{ + helpers::Ctx, + revm::{Database, DatabaseCommit, DatabaseRef, Inspector, context::ContextTr}, +}; + +/// Trace a transaction using the provided EVM and tracing options. +#[instrument(skip(trevm, config, tx_info), fields(tx_hash = ?tx_info.hash))] +pub(super) fn trace( + trevm: EvmReady, + config: &GethDebugTracingOptions, + tx_info: TransactionInfo, +) -> Result<(GethTrace, EvmNeedsTx), DebugError> +where + Db: Database + DatabaseCommit + DatabaseRef, + Insp: Inspector>, +{ + let Some(tracer) = &config.tracer else { + return Err(DebugError::InvalidTracerConfig); + }; + + let GethDebugTracerType::BuiltInTracer(built_in) = tracer else { + return Err(DebugError::Unsupported("JS tracer")); + }; + + match built_in { + GethDebugBuiltInTracerType::Erc7562Tracer => { + Err(DebugError::Unsupported("ERC-7562 tracing is not yet implemented")) + } + GethDebugBuiltInTracerType::FourByteTracer => trace_four_byte(trevm), + GethDebugBuiltInTracerType::CallTracer => trace_call(&config.tracer_config, trevm), + GethDebugBuiltInTracerType::FlatCallTracer => { + trace_flat_call(&config.tracer_config, trevm, tx_info) + } + GethDebugBuiltInTracerType::PreStateTracer => trace_pre_state(&config.tracer_config, trevm), + GethDebugBuiltInTracerType::NoopTracer => Ok(( + NoopFrame::default().into(), + trevm + .run() + .map_err(|err| DebugError::Evm(err.into_error().to_string()))? + .accept_state(), + )), + GethDebugBuiltInTracerType::MuxTracer => trace_mux(&config.tracer_config, trevm, tx_info), + } +} + +fn trace_four_byte( + trevm: EvmReady, +) -> Result<(GethTrace, EvmNeedsTx), DebugError> +where + Db: Database + DatabaseCommit, + Insp: Inspector>, +{ + let mut four_byte = FourByteInspector::default(); + let trevm = trevm + .try_with_inspector(&mut four_byte, |trevm| trevm.run()) + .map_err(|e| DebugError::Evm(e.into_error().to_string()))?; + Ok((FourByteFrame::from(four_byte).into(), trevm.accept_state())) +} + +fn trace_call( + tracer_config: &GethDebugTracerConfig, + trevm: EvmReady, +) -> Result<(GethTrace, EvmNeedsTx), DebugError> +where + Db: Database + DatabaseCommit, + Insp: Inspector>, +{ + let call_config = + tracer_config.clone().into_call_config().map_err(|_| DebugError::InvalidTracerConfig)?; + + let mut inspector = + TracingInspector::new(TracingInspectorConfig::from_geth_call_config(&call_config)); + + let trevm = trevm + .try_with_inspector(&mut inspector, |trevm| trevm.run()) + .map_err(|e| DebugError::Evm(e.into_error().to_string()))?; + + let frame = inspector + .with_transaction_gas_limit(trevm.gas_limit()) + .into_geth_builder() + .geth_call_traces(call_config, trevm.gas_used()); + + Ok((frame.into(), trevm.accept_state())) +} + +fn trace_pre_state( + tracer_config: &GethDebugTracerConfig, + trevm: EvmReady, +) -> Result<(GethTrace, EvmNeedsTx), DebugError> +where + Db: Database + DatabaseCommit + DatabaseRef, + Insp: Inspector>, +{ + let prestate_config = tracer_config + .clone() + .into_pre_state_config() + .map_err(|_| DebugError::InvalidTracerConfig)?; + + let mut inspector = + TracingInspector::new(TracingInspectorConfig::from_geth_prestate_config(&prestate_config)); + + let trevm = trevm + .try_with_inspector(&mut inspector, |trevm| trevm.run()) + .map_err(|e| DebugError::Evm(e.into_error().to_string()))?; + let gas_limit = trevm.gas_limit(); + + // NB: state must be UNCOMMITTED for prestate diff computation. + let (result, mut trevm) = trevm.take_result_and_state(); + + let frame = inspector + .with_transaction_gas_limit(gas_limit) + .into_geth_builder() + .geth_prestate_traces(&result, &prestate_config, trevm.inner_mut_unchecked().db_mut()) + .map_err(|err| DebugError::Evm(err.to_string()))?; + + // Equivalent to `trevm.accept_state()`. + trevm.inner_mut_unchecked().db_mut().commit(result.state); + + Ok((frame.into(), trevm)) +} + +fn trace_flat_call( + tracer_config: &GethDebugTracerConfig, + trevm: EvmReady, + tx_info: TransactionInfo, +) -> Result<(GethTrace, EvmNeedsTx), DebugError> +where + Db: Database + DatabaseCommit, + Insp: Inspector>, +{ + let flat_call_config = tracer_config + .clone() + .into_flat_call_config() + .map_err(|_| DebugError::InvalidTracerConfig)?; + + let mut inspector = + TracingInspector::new(TracingInspectorConfig::from_flat_call_config(&flat_call_config)); + + let trevm = trevm + .try_with_inspector(&mut inspector, |trevm| trevm.run()) + .map_err(|e| DebugError::Evm(e.into_error().to_string()))?; + + let frame = inspector + .with_transaction_gas_limit(trevm.gas_limit()) + .into_parity_builder() + .into_localized_transaction_traces(tx_info); + + Ok((frame.into(), trevm.accept_state())) +} + +fn trace_mux( + tracer_config: &GethDebugTracerConfig, + trevm: EvmReady, + tx_info: TransactionInfo, +) -> Result<(GethTrace, EvmNeedsTx), DebugError> +where + Db: Database + DatabaseCommit + DatabaseRef, + Insp: Inspector>, +{ + let mux_config = + tracer_config.clone().into_mux_config().map_err(|_| DebugError::InvalidTracerConfig)?; + + let mut inspector = MuxInspector::try_from_config(mux_config) + .map_err(|err| DebugError::Evm(err.to_string()))?; + + let trevm = trevm + .try_with_inspector(&mut inspector, |trevm| trevm.run()) + .map_err(|e| DebugError::Evm(e.into_error().to_string()))?; + + // NB: state must be UNCOMMITTED for prestate diff computation. + let (result, mut trevm) = trevm.take_result_and_state(); + + let frame = inspector + .try_into_mux_frame(&result, trevm.inner_mut_unchecked().db_mut(), tx_info) + .map_err(|err| DebugError::Evm(err.to_string()))?; + + // Equivalent to `trevm.accept_state()`. + trevm.inner_mut_unchecked().db_mut().commit(result.state); + + Ok((frame.into(), trevm)) +} + +// Some code in this file has been copied and modified from reth +// +// The original license is included below: +// +// The MIT License (MIT) +// +// Copyright (c) 2022-2025 Reth Contributors +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +//. +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. diff --git a/crates/rpc-storage/src/eth/endpoints.rs b/crates/rpc-storage/src/eth/endpoints.rs index 46afd1c..4e010f0 100644 --- a/crates/rpc-storage/src/eth/endpoints.rs +++ b/crates/rpc-storage/src/eth/endpoints.rs @@ -3,20 +3,23 @@ use crate::{ ctx::{EvmBlockContext, StorageRpcCtx}, eth::helpers::{ - AddrWithBlock, BlockParams, BlockRangeInclusiveIter, CfgFiller, StorageAtArgs, TxParams, - await_handler, build_receipt, build_receipt_from_parts, build_rpc_transaction, - normalize_gas_stateless, response_tri, + AddrWithBlock, BlockParams, BlockRangeInclusiveIter, CfgFiller, FeeHistoryArgs, + StorageAtArgs, SubscribeArgs, TxParams, await_handler, build_receipt, + build_receipt_from_parts, build_rpc_transaction, normalize_gas_stateless, response_tri, }, + gas_oracle, + interest::{FilterOutput, InterestKind}, }; use ajj::{HandlerCtx, ResponsePayload}; use alloy::{ - consensus::{BlockHeader, TxReceipt}, + consensus::{BlockHeader, Transaction, TxReceipt}, eips::{ - BlockId, + BlockId, BlockNumberOrTag, + eip1559::BaseFeeParams, eip2718::{Decodable2718, Encodable2718}, }, primitives::{B256, U64, U256}, - rpc::types::{Block, BlockTransactions, Filter, FilteredParams, Log}, + rpc::types::{Block, BlockTransactions, FeeHistory, Filter, FilteredParams, Log}, }; use signet_cold::{HeaderSpecifier, ReceiptSpecifier}; use signet_hot::model::HotKvRead; @@ -47,6 +50,215 @@ pub(crate) async fn chain_id(ctx: StorageRpcCtx) -> Result Ok(U64::from(ctx.chain_id())) } +// --------------------------------------------------------------------------- +// Gas & Fee Queries +// --------------------------------------------------------------------------- + +pub(crate) async fn gas_price(hctx: HandlerCtx, ctx: StorageRpcCtx) -> Result +where + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let task = async move { + let latest = ctx.tags().latest(); + let cold = ctx.cold(); + + let tip = gas_oracle::suggest_tip_cap(&cold, latest, ctx.config()) + .await + .map_err(|e| e.to_string())?; + + let base_fee = cold + .get_header_by_number(latest) + .await + .map_err(|e| e.to_string())? + .and_then(|h| h.base_fee_per_gas) + .unwrap_or_default(); + + Ok(tip + U256::from(base_fee)) + }; + + await_handler!(@option hctx.spawn_blocking(task)) +} + +pub(crate) async fn max_priority_fee_per_gas( + hctx: HandlerCtx, + ctx: StorageRpcCtx, +) -> Result +where + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let task = async move { + let latest = ctx.tags().latest(); + gas_oracle::suggest_tip_cap(&ctx.cold(), latest, ctx.config()) + .await + .map_err(|e| e.to_string()) + }; + + await_handler!(@option hctx.spawn_blocking(task)) +} + +pub(crate) async fn fee_history( + hctx: HandlerCtx, + FeeHistoryArgs(block_count, newest, reward_percentiles): FeeHistoryArgs, + ctx: StorageRpcCtx, +) -> Result +where + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let task = async move { + let mut block_count = block_count.to::(); + + if block_count == 0 { + return Ok(FeeHistory::default()); + } + + let max_fee_history = if reward_percentiles.is_none() { + ctx.config().max_header_history + } else { + ctx.config().max_block_history + }; + + block_count = block_count.min(max_fee_history); + + let mut newest = newest; + if newest.is_pending() { + newest = BlockNumberOrTag::Latest; + block_count = block_count.saturating_sub(1); + } + + let end_block = ctx.resolve_block_tag(newest); + let end_block_plus = end_block + 1; + + block_count = block_count.min(end_block_plus); + + // Validate percentiles + if let Some(percentiles) = &reward_percentiles + && percentiles.windows(2).any(|w| w[0] > w[1] || w[0] > 100.) + { + return Err("invalid reward percentiles".to_string()); + } + + let start_block = end_block_plus - block_count; + let cold = ctx.cold(); + + let specs: Vec<_> = (start_block..=end_block).map(HeaderSpecifier::Number).collect(); + let headers = cold.get_headers(specs).await.map_err(|e| e.to_string())?; + + let mut base_fee_per_gas: Vec = Vec::with_capacity(headers.len() + 1); + let mut gas_used_ratio: Vec = Vec::with_capacity(headers.len()); + let mut rewards: Vec> = Vec::new(); + + for (offset, maybe_header) in headers.iter().enumerate() { + let Some(header) = maybe_header else { + return Err(format!("missing header at block {}", start_block + offset as u64)); + }; + + base_fee_per_gas.push(header.base_fee_per_gas.unwrap_or_default() as u128); + gas_used_ratio.push(if header.gas_limit > 0 { + header.gas_used as f64 / header.gas_limit as f64 + } else { + 0.0 + }); + + if let Some(percentiles) = &reward_percentiles { + let block_num = start_block + offset as u64; + + let (txs, receipts) = tokio::try_join!( + cold.get_transactions_in_block(block_num), + cold.get_receipts_in_block(block_num), + ) + .map_err(|e| e.to_string())?; + + let block_rewards = calculate_reward_percentiles( + percentiles, + header.gas_used, + header.base_fee_per_gas.unwrap_or_default(), + &txs, + &receipts, + ); + rewards.push(block_rewards); + } + } + + // Next block base fee + if let Some(last_header) = headers.last().and_then(|h| h.as_ref()) { + base_fee_per_gas.push( + last_header.next_block_base_fee(BaseFeeParams::ethereum()).unwrap_or_default() + as u128, + ); + } + + let base_fee_per_blob_gas = vec![0; base_fee_per_gas.len()]; + let blob_gas_used_ratio = vec![0.; gas_used_ratio.len()]; + + Ok(FeeHistory { + base_fee_per_gas, + gas_used_ratio, + base_fee_per_blob_gas, + blob_gas_used_ratio, + oldest_block: start_block, + reward: reward_percentiles.map(|_| rewards), + }) + }; + + await_handler!(@option hctx.spawn_blocking(task)) +} + +/// Calculate reward percentiles for a single block. +/// +/// Sorts transactions by effective tip ascending, then walks +/// cumulative gas used to find the tip value at each percentile. +fn calculate_reward_percentiles( + percentiles: &[f64], + gas_used: u64, + base_fee: u64, + txs: &[signet_storage_types::TransactionSigned], + receipts: &[signet_storage_types::Receipt], +) -> Vec { + if gas_used == 0 || txs.is_empty() { + return vec![0; percentiles.len()]; + } + + // Pair each tx's effective tip with its gas used (from receipt cumulative deltas) + let mut tx_gas_and_tip: Vec<(u64, u128)> = txs + .iter() + .zip(receipts.iter()) + .enumerate() + .map(|(i, (tx, receipt))| { + let prev_cumulative = + if i > 0 { receipts[i - 1].inner.cumulative_gas_used() } else { 0 }; + let tx_gas = receipt.inner.cumulative_gas_used() - prev_cumulative; + let tip = tx.effective_tip_per_gas(base_fee).unwrap_or_default(); + (tx_gas, tip) + }) + .collect(); + + // Sort by tip ascending + tx_gas_and_tip.sort_by_key(|&(_, tip)| tip); + + let mut result = Vec::with_capacity(percentiles.len()); + let mut cumulative_gas: u64 = 0; + let mut tx_idx = 0; + + for &percentile in percentiles { + let threshold = (gas_used as f64 * percentile / 100.0) as u64; + + while tx_idx < tx_gas_and_tip.len() { + cumulative_gas += tx_gas_and_tip[tx_idx].0; + if cumulative_gas >= threshold { + break; + } + tx_idx += 1; + } + + result.push(tx_gas_and_tip.get(tx_idx).map(|&(_, tip)| tip).unwrap_or_default()); + } + + result +} + // --------------------------------------------------------------------------- // Block Queries // --------------------------------------------------------------------------- @@ -726,7 +938,7 @@ where } /// Extract logs from a block's receipts that match the filter's address and topic criteria. -fn collect_matching_logs( +pub(crate) fn collect_matching_logs( header: &alloy::consensus::Header, block_hash: B256, txs: &[signet_storage_types::TransactionSigned], @@ -758,3 +970,163 @@ fn collect_matching_logs( logs } + +// --------------------------------------------------------------------------- +// Filters +// --------------------------------------------------------------------------- + +pub(crate) async fn new_filter( + hctx: HandlerCtx, + (filter,): (Filter,), + ctx: StorageRpcCtx, +) -> Result +where + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let task = async move { + let latest = ctx.tags().latest(); + Ok(ctx.filter_manager().install_log_filter(latest, filter)) + }; + + await_handler!(@option hctx.spawn_blocking(task)) +} + +pub(crate) async fn new_block_filter( + hctx: HandlerCtx, + ctx: StorageRpcCtx, +) -> Result +where + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let task = async move { + let latest = ctx.tags().latest(); + Ok(ctx.filter_manager().install_block_filter(latest)) + }; + + await_handler!(@option hctx.spawn_blocking(task)) +} + +pub(crate) async fn uninstall_filter( + (id,): (U64,), + ctx: StorageRpcCtx, +) -> Result +where + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + Ok(ctx.filter_manager().uninstall(id).is_some()) +} + +pub(crate) async fn get_filter_changes( + hctx: HandlerCtx, + (id,): (U64,), + ctx: StorageRpcCtx, +) -> Result +where + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let task = async move { + let fm = ctx.filter_manager(); + let mut entry = fm.get_mut(id).ok_or_else(|| format!("filter not found: {id}"))?; + + let latest = ctx.tags().latest(); + let start = entry.next_start_block(); + + if start > latest { + entry.mark_polled(latest); + return Ok(entry.empty_output()); + } + + let cold = ctx.cold(); + + if entry.is_block() { + let specs: Vec<_> = (start..=latest).map(HeaderSpecifier::Number).collect(); + let headers = cold.get_headers(specs).await.map_err(|e| e.to_string())?; + let hashes: Vec = headers.into_iter().flatten().map(|h| h.hash_slow()).collect(); + entry.mark_polled(latest); + Ok(FilterOutput::from(hashes)) + } else { + let filter = entry.as_filter().cloned().unwrap(); + let address_filter = FilteredParams::address_filter(&filter.address); + let topics_filter = FilteredParams::topics_filter(&filter.topics); + + let mut all_logs = Vec::new(); + + for (chunk_start, chunk_end) in + BlockRangeInclusiveIter::new(start..=latest, MAX_HEADERS_RANGE) + { + let specs: Vec<_> = + (chunk_start..=chunk_end).map(HeaderSpecifier::Number).collect(); + let headers = cold.get_headers(specs).await.map_err(|e| e.to_string())?; + + for (offset, maybe_header) in headers.into_iter().enumerate() { + let Some(header) = maybe_header else { continue }; + + if !FilteredParams::matches_address(header.logs_bloom, &address_filter) + || !FilteredParams::matches_topics(header.logs_bloom, &topics_filter) + { + continue; + } + + let block_num = chunk_start + offset as u64; + let block_hash = header.hash_slow(); + + let (txs, receipts) = tokio::try_join!( + cold.get_transactions_in_block(block_num), + cold.get_receipts_in_block(block_num), + ) + .map_err(|e| e.to_string())?; + + all_logs.extend(collect_matching_logs( + &header, block_hash, &txs, &receipts, &filter, + )); + } + } + + entry.mark_polled(latest); + Ok(FilterOutput::from(all_logs)) + } + }; + + await_handler!(@option hctx.spawn_blocking(task)) +} + +// --------------------------------------------------------------------------- +// Subscriptions +// --------------------------------------------------------------------------- + +pub(crate) async fn subscribe( + hctx: HandlerCtx, + SubscribeArgs(kind, filter): SubscribeArgs, + ctx: StorageRpcCtx, +) -> Result +where + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let interest = match kind { + alloy::rpc::types::pubsub::SubscriptionKind::NewHeads => InterestKind::Block, + alloy::rpc::types::pubsub::SubscriptionKind::Logs => { + let f = filter.unwrap_or_default(); + InterestKind::Log(f) + } + other => { + return Err(format!("unsupported subscription kind: {other:?}")); + } + }; + + ctx.sub_manager() + .subscribe(&hctx, interest) + .ok_or_else(|| "notifications not enabled on this transport".to_string()) +} + +pub(crate) async fn unsubscribe((id,): (U64,), ctx: StorageRpcCtx) -> Result +where + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + Ok(ctx.sub_manager().unsubscribe(id)) +} diff --git a/crates/rpc-storage/src/eth/helpers.rs b/crates/rpc-storage/src/eth/helpers.rs index 39354ba..38ae472 100644 --- a/crates/rpc-storage/src/eth/helpers.rs +++ b/crates/rpc-storage/src/eth/helpers.rs @@ -38,6 +38,21 @@ pub(crate) struct StorageAtArgs(pub Address, pub U256, #[serde(default)] pub Opt #[derive(Debug, Deserialize)] pub(crate) struct AddrWithBlock(pub Address, #[serde(default)] pub Option); +/// Args for `eth_feeHistory`. +#[derive(Debug, Deserialize)] +pub(crate) struct FeeHistoryArgs( + pub alloy::primitives::U64, + pub alloy::eips::BlockNumberOrTag, + #[serde(default)] pub Option>, +); + +/// Args for `eth_subscribe`. +#[derive(Debug, Deserialize)] +pub(crate) struct SubscribeArgs( + pub alloy::rpc::types::pubsub::SubscriptionKind, + #[serde(default)] pub Option>, +); + /// Normalize transaction request gas without making DB reads. /// /// - If the gas is below `MIN_TRANSACTION_GAS`, set it to `None` diff --git a/crates/rpc-storage/src/eth/mod.rs b/crates/rpc-storage/src/eth/mod.rs index f8f70b1..463a9aa 100644 --- a/crates/rpc-storage/src/eth/mod.rs +++ b/crates/rpc-storage/src/eth/mod.rs @@ -3,15 +3,16 @@ mod endpoints; use endpoints::{ addr_tx_count, balance, block, block_number, block_receipts, block_tx_count, call, chain_id, - code_at, estimate_gas, get_logs, header_by, not_supported, raw_transaction_by_hash, - raw_tx_by_block_and_index, send_raw_transaction, storage_at, transaction_by_hash, - transaction_receipt, tx_by_block_and_index, + code_at, estimate_gas, fee_history, gas_price, get_filter_changes, get_logs, header_by, + max_priority_fee_per_gas, new_block_filter, new_filter, not_supported, raw_transaction_by_hash, + raw_tx_by_block_and_index, send_raw_transaction, storage_at, subscribe, transaction_by_hash, + transaction_receipt, tx_by_block_and_index, uninstall_filter, unsubscribe, }; mod error; pub use error::EthError; -mod helpers; +pub(crate) mod helpers; use crate::StorageRpcCtx; use alloy::{eips::BlockNumberOrTag, primitives::B256}; @@ -58,9 +59,9 @@ where // --- .route("protocolVersion", not_supported) .route("syncing", not_supported) - .route("gasPrice", not_supported) - .route("maxPriorityFeePerGas", not_supported) - .route("feeHistory", not_supported) + .route("gasPrice", gas_price::) + .route("maxPriorityFeePerGas", max_priority_fee_per_gas::) + .route("feeHistory", fee_history::) .route("coinbase", not_supported) .route("accounts", not_supported) .route("blobBaseFee", not_supported) @@ -79,12 +80,12 @@ where .route("signTypedData", not_supported) .route("getProof", not_supported) .route("createAccessList", not_supported) - .route("newFilter", not_supported) - .route("newBlockFilter", not_supported) + .route("newFilter", new_filter::) + .route("newBlockFilter", new_block_filter::) .route("newPendingTransactionFilter", not_supported) - .route("uninstallFilter", not_supported) - .route("getFilterChanges", not_supported) - .route("getFilterLogs", not_supported) - .route("subscribe", not_supported) - .route("unsubscribe", not_supported) + .route("uninstallFilter", uninstall_filter::) + .route("getFilterChanges", get_filter_changes::) + .route("getFilterLogs", get_filter_changes::) + .route("subscribe", subscribe::) + .route("unsubscribe", unsubscribe::) } diff --git a/crates/rpc-storage/src/gas_oracle.rs b/crates/rpc-storage/src/gas_oracle.rs new file mode 100644 index 0000000..47c23c2 --- /dev/null +++ b/crates/rpc-storage/src/gas_oracle.rs @@ -0,0 +1,55 @@ +//! Cold-storage gas oracle for computing gas price suggestions. +//! +//! Reads recent block headers and transactions from cold storage to +//! compute a suggested tip cap based on recent transaction activity. + +use alloy::{consensus::Transaction, primitives::U256}; +use signet_cold::{ColdStorageError, ColdStorageReadHandle, HeaderSpecifier}; + +use crate::StorageRpcConfig; + +/// Suggest a tip cap based on recent transaction tips. +/// +/// Reads the last `gas_oracle_block_count` blocks from cold storage, +/// computes the effective tip per gas for each transaction, sorts all +/// tips, and returns the value at `gas_oracle_percentile`. +/// +/// Returns `U256::ZERO` if no transactions are found in the range. +pub(crate) async fn suggest_tip_cap( + cold: &ColdStorageReadHandle, + latest: u64, + config: &StorageRpcConfig, +) -> Result { + let block_count = config.gas_oracle_block_count.min(latest + 1); + let start = latest.saturating_sub(block_count - 1); + + let specs: Vec<_> = (start..=latest).map(HeaderSpecifier::Number).collect(); + let headers = cold.get_headers(specs).await?; + + let mut all_tips: Vec = Vec::new(); + + for (offset, maybe_header) in headers.into_iter().enumerate() { + let Some(header) = maybe_header else { continue }; + let base_fee = header.base_fee_per_gas.unwrap_or_default(); + let block_num = start + offset as u64; + + let txs = cold.get_transactions_in_block(block_num).await?; + + for tx in &txs { + if let Some(tip) = tx.effective_tip_per_gas(base_fee) { + all_tips.push(tip); + } + } + } + + if all_tips.is_empty() { + return Ok(U256::ZERO); + } + + all_tips.sort_unstable(); + + let index = ((config.gas_oracle_percentile / 100.0) * (all_tips.len() - 1) as f64) as usize; + let index = index.min(all_tips.len() - 1); + + Ok(U256::from(all_tips[index])) +} diff --git a/crates/rpc-storage/src/interest/filters.rs b/crates/rpc-storage/src/interest/filters.rs new file mode 100644 index 0000000..6d98fd0 --- /dev/null +++ b/crates/rpc-storage/src/interest/filters.rs @@ -0,0 +1,326 @@ +//! Filter management for `eth_newFilter` / `eth_getFilterChanges`. + +use crate::interest::InterestKind; +use alloy::{ + primitives::{B256, U64}, + rpc::types::{Filter, Log}, +}; +use dashmap::{DashMap, mapref::one::RefMut}; +use std::{ + collections::VecDeque, + sync::{ + Arc, Weak, + atomic::{AtomicU64, Ordering}, + }, + time::{Duration, Instant}, +}; +use tracing::trace; + +type FilterId = U64; + +/// Either type for filter outputs. +#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize)] +#[serde(untagged)] +#[allow(dead_code)] +pub(crate) enum Either { + /// Log + Log(Log), + /// Block hash + Block(B256), +} + +/// The output of a filter. +/// +/// This will be either a list of logs or a list of block hashes. Pending tx +/// filters are not supported by Signet. For convenience, there is a special +/// variant for empty results. +#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize)] +#[serde(untagged)] +pub(crate) enum FilterOutput { + /// Empty output. Holds a `[(); 0]` to make sure it serializes as an empty + /// array. + Empty([(); 0]), + /// Logs + Log(VecDeque), + /// Block hashes + Block(VecDeque), +} + +#[allow(dead_code)] +impl FilterOutput { + /// Create an empty filter output. + pub(crate) const fn empty() -> Self { + Self::Empty([]) + } + + /// True if this is an empty filter output. + pub(crate) fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// The length of this filter output. + pub(crate) fn len(&self) -> usize { + match self { + Self::Empty(_) => 0, + Self::Log(logs) => logs.len(), + Self::Block(blocks) => blocks.len(), + } + } + + /// Extend this filter output with another. + /// + /// # Panics + /// + /// If the two filter outputs are of different types. + pub(crate) fn extend(&mut self, other: Self) { + match (self, other) { + (Self::Log(logs), Self::Log(other_logs)) => logs.extend(other_logs), + (Self::Block(blocks), Self::Block(other_blocks)) => blocks.extend(other_blocks), + (_, Self::Empty(_)) => (), + (this @ Self::Empty(_), other) => *this = other, + _ => panic!("attempted to mix log and block outputs"), + } + } + + /// Pop a value from the front of the filter output. + pub(crate) fn pop_front(&mut self) -> Option { + match self { + Self::Log(logs) => logs.pop_front().map(Either::Log), + Self::Block(blocks) => blocks.pop_front().map(Either::Block), + Self::Empty(_) => None, + } + } +} + +impl From> for FilterOutput { + fn from(block_hashes: Vec) -> Self { + Self::Block(block_hashes.into()) + } +} + +impl From> for FilterOutput { + fn from(logs: Vec) -> Self { + Self::Log(logs.into()) + } +} + +impl FromIterator for FilterOutput { + fn from_iter>(iter: T) -> Self { + let inner: VecDeque<_> = iter.into_iter().collect(); + if inner.is_empty() { Self::empty() } else { Self::Log(inner) } + } +} + +impl FromIterator for FilterOutput { + fn from_iter>(iter: T) -> Self { + let inner: VecDeque<_> = iter.into_iter().collect(); + if inner.is_empty() { Self::empty() } else { Self::Block(inner) } + } +} + +/// An active filter. +/// +/// Records the filter details, the [`Instant`] at which the filter was last +/// polled, and the first block whose contents should be considered. +#[derive(Debug, Clone, PartialEq, Eq)] +pub(crate) struct ActiveFilter { + next_start_block: u64, + last_poll_time: Instant, + kind: InterestKind, +} + +impl core::fmt::Display for ActiveFilter { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!( + f, + "ActiveFilter {{ next_start_block: {}, ms_since_last_poll: {}, kind: {:?} }}", + self.next_start_block, + self.last_poll_time.elapsed().as_millis(), + self.kind + ) + } +} + +impl ActiveFilter { + /// True if this is a log filter. + #[allow(dead_code)] + pub(crate) const fn is_filter(&self) -> bool { + self.kind.is_filter() + } + + /// True if this is a block filter. + pub(crate) const fn is_block(&self) -> bool { + self.kind.is_block() + } + + /// Fallible cast to a filter. + pub(crate) const fn as_filter(&self) -> Option<&Filter> { + self.kind.as_filter() + } + + /// Mark the filter as having been polled at the given block. + pub(crate) fn mark_polled(&mut self, current_block: u64) { + self.next_start_block = current_block + 1; + self.last_poll_time = Instant::now(); + } + + /// Get the next start block for the filter. + pub(crate) const fn next_start_block(&self) -> u64 { + self.next_start_block + } + + /// Get the duration since the filter was last polled. + pub(crate) fn time_since_last_poll(&self) -> Duration { + self.last_poll_time.elapsed() + } + + /// Return an empty output of the same kind as this filter. + pub(crate) const fn empty_output(&self) -> FilterOutput { + self.kind.empty_output() + } +} + +/// Inner logic for [`FilterManager`]. +#[derive(Debug)] +pub(crate) struct FilterManagerInner { + current_id: AtomicU64, + filters: DashMap, +} + +impl FilterManagerInner { + /// Create a new filter manager. + fn new() -> Self { + // Start from 1, as 0 is weird in quantity encoding. + Self { current_id: AtomicU64::new(1), filters: DashMap::new() } + } + + /// Get the next filter ID. + fn next_id(&self) -> FilterId { + FilterId::from(self.current_id.fetch_add(1, Ordering::Relaxed)) + } + + /// Get a filter by ID. + pub(crate) fn get_mut(&self, id: FilterId) -> Option> { + self.filters.get_mut(&id) + } + + fn install(&self, current_block: u64, kind: InterestKind) -> FilterId { + let id = self.next_id(); + let next_start_block = current_block + 1; + let _ = self + .filters + .insert(id, ActiveFilter { next_start_block, last_poll_time: Instant::now(), kind }); + id + } + + /// Install a new log filter. + pub(crate) fn install_log_filter(&self, current_block: u64, filter: Filter) -> FilterId { + self.install(current_block, InterestKind::Log(Box::new(filter))) + } + + /// Install a new block filter. + pub(crate) fn install_block_filter(&self, current_block: u64) -> FilterId { + self.install(current_block, InterestKind::Block) + } + + /// Uninstall a filter, returning the kind of filter that was uninstalled. + pub(crate) fn uninstall(&self, id: FilterId) -> Option<(U64, ActiveFilter)> { + self.filters.remove(&id) + } + + /// Clean stale filters that have not been polled in a while. + fn clean_stale(&self, older_than: Duration) { + self.filters.retain(|_, filter| filter.time_since_last_poll() < older_than); + } +} + +/// Manager for filters. +/// +/// The manager tracks active filters, and periodically cleans stale filters. +/// Filters are stored in a [`DashMap`] that maps filter IDs to active filters. +/// Filter IDs are assigned sequentially, starting from 1. +/// +/// Calling [`Self::new`] spawns a task that periodically cleans stale filters. +/// This task runs on a separate thread to avoid [`DashMap::retain`] deadlock. +/// See [`DashMap`] documentation for more information. +#[derive(Debug, Clone)] +pub(crate) struct FilterManager { + inner: Arc, +} + +impl FilterManager { + /// Create a new filter manager. Spawn a task to clean stale filters. + pub(crate) fn new(clean_interval: Duration, age_limit: Duration) -> Self { + let inner = Arc::new(FilterManagerInner::new()); + let manager = Self { inner }; + FilterCleanTask::new(Arc::downgrade(&manager.inner), clean_interval, age_limit).spawn(); + manager + } +} + +impl std::ops::Deref for FilterManager { + type Target = FilterManagerInner; + + fn deref(&self) -> &Self::Target { + self.inner.deref() + } +} + +/// Task to clean up unpolled filters. +/// +/// This task runs on a separate thread to avoid [`DashMap::retain`] deadlocks. +#[derive(Debug)] +struct FilterCleanTask { + manager: Weak, + sleep: Duration, + age_limit: Duration, +} + +impl FilterCleanTask { + /// Create a new filter cleaner task. + const fn new(manager: Weak, sleep: Duration, age_limit: Duration) -> Self { + Self { manager, sleep, age_limit } + } + + /// Run the task. This task runs on a separate thread, which ensures that + /// [`DashMap::retain`]'s deadlock condition is not met. See [`DashMap`] + /// documentation for more information. + fn spawn(self) { + std::thread::spawn(move || { + loop { + std::thread::sleep(self.sleep); + trace!("cleaning stale filters"); + match self.manager.upgrade() { + Some(manager) => manager.clean_stale(self.age_limit), + None => break, + } + } + }); + } +} + +// Some code in this file has been copied and modified from reth +// +// The original license is included below: +// +// The MIT License (MIT) +// +// Copyright (c) 2022-2025 Reth Contributors +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +//. +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. diff --git a/crates/rpc-storage/src/interest/kind.rs b/crates/rpc-storage/src/interest/kind.rs new file mode 100644 index 0000000..ff2392e --- /dev/null +++ b/crates/rpc-storage/src/interest/kind.rs @@ -0,0 +1,106 @@ +//! Filter kinds for subscriptions and polling filters. + +use crate::interest::{NewBlockNotification, filters::FilterOutput, subs::SubscriptionBuffer}; +use alloy::rpc::types::{Filter, Header, Log}; +use std::collections::VecDeque; + +/// The different kinds of filters that can be created. +/// +/// Pending tx filters are not supported by Signet. +#[derive(Debug, Clone, PartialEq, Eq)] +pub(crate) enum InterestKind { + /// Log filter with a user-supplied [`Filter`]. + Log(Box), + /// New-block filter. + Block, +} + +impl InterestKind { + /// True if this is a log filter. + #[allow(dead_code)] + pub(crate) const fn is_filter(&self) -> bool { + matches!(self, Self::Log(_)) + } + + /// True if this is a block filter. + pub(crate) const fn is_block(&self) -> bool { + matches!(self, Self::Block) + } + + /// Fallible cast to a filter. + pub(crate) const fn as_filter(&self) -> Option<&Filter> { + match self { + Self::Log(f) => Some(f), + _ => None, + } + } + + fn apply_block(notif: &NewBlockNotification) -> SubscriptionBuffer { + let header = Header { + hash: notif.header.hash_slow(), + inner: notif.header.clone(), + total_difficulty: None, + size: None, + }; + SubscriptionBuffer::Block(VecDeque::from([header])) + } + + fn apply_filter(&self, notif: &NewBlockNotification) -> SubscriptionBuffer { + let filter = self.as_filter().unwrap(); + let block_hash = notif.header.hash_slow(); + let block_number = notif.header.number; + let block_timestamp = notif.header.timestamp; + + let logs: VecDeque = notif + .receipts + .iter() + .enumerate() + .flat_map(|(tx_idx, receipt)| { + let tx_hash = *notif.transactions[tx_idx].tx_hash(); + receipt.inner.logs.iter().enumerate().filter_map(move |(log_idx, log)| { + if filter.matches(log) { + Some(Log { + inner: log.clone(), + block_hash: Some(block_hash), + block_number: Some(block_number), + block_timestamp: Some(block_timestamp), + transaction_hash: Some(tx_hash), + transaction_index: Some(tx_idx as u64), + log_index: Some(log_idx as u64), + removed: false, + }) + } else { + None + } + }) + }) + .collect(); + + SubscriptionBuffer::Log(logs) + } + + /// Apply the filter to a [`NewBlockNotification`], producing a + /// subscription buffer. + pub(crate) fn filter_notification_for_sub( + &self, + notif: &NewBlockNotification, + ) -> SubscriptionBuffer { + if self.is_block() { Self::apply_block(notif) } else { self.apply_filter(notif) } + } + + /// Return an empty output of the same kind as this filter. + pub(crate) const fn empty_output(&self) -> FilterOutput { + match self { + Self::Log(_) => FilterOutput::Log(VecDeque::new()), + Self::Block => FilterOutput::Block(VecDeque::new()), + } + } + + /// Return an empty subscription buffer of the same kind as this filter. + pub(crate) const fn empty_sub_buffer(&self) -> SubscriptionBuffer { + match self { + Self::Log(_) => SubscriptionBuffer::Log(VecDeque::new()), + Self::Block => SubscriptionBuffer::Block(VecDeque::new()), + } + } +} diff --git a/crates/rpc-storage/src/interest/mod.rs b/crates/rpc-storage/src/interest/mod.rs new file mode 100644 index 0000000..3c611d9 --- /dev/null +++ b/crates/rpc-storage/src/interest/mod.rs @@ -0,0 +1,22 @@ +//! Filter and subscription management for block/log notifications. + +mod filters; +pub(crate) use filters::{FilterManager, FilterOutput}; +mod kind; +pub(crate) use kind::InterestKind; +mod subs; +pub(crate) use subs::SubscriptionManager; + +/// Notification sent when a new block is available in storage. +/// +/// The caller constructs and sends these via a +/// [`tokio::sync::broadcast::Sender`]. +#[derive(Debug, Clone)] +pub struct NewBlockNotification { + /// The block header. + pub header: alloy::consensus::Header, + /// Transactions in the block. + pub transactions: Vec, + /// Receipts for the block. + pub receipts: Vec, +} diff --git a/crates/rpc-storage/src/interest/subs.rs b/crates/rpc-storage/src/interest/subs.rs new file mode 100644 index 0000000..8fb7b04 --- /dev/null +++ b/crates/rpc-storage/src/interest/subs.rs @@ -0,0 +1,333 @@ +//! Subscription management for `eth_subscribe` / `eth_unsubscribe`. + +use crate::interest::{InterestKind, NewBlockNotification}; +use ajj::{HandlerCtx, serde_json}; +use alloy::{primitives::U64, rpc::types::Log}; +use dashmap::DashMap; +use std::{ + cmp::min, + collections::VecDeque, + future::pending, + sync::{ + Arc, Weak, + atomic::{AtomicU64, Ordering}, + }, + time::Duration, +}; +use tokio::sync::broadcast::{self, error::RecvError}; +use tokio_util::sync::{CancellationToken, WaitForCancellationFutureOwned}; +use tracing::{Instrument, debug, debug_span, enabled, trace}; + +/// Either type for subscription outputs. +#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize)] +#[serde(untagged)] +pub(crate) enum Either { + /// A log entry. + Log(Box), + /// A block header. + Block(Box), +} + +/// Buffer for subscription outputs. +#[derive(Debug, Clone, PartialEq, Eq)] +pub(crate) enum SubscriptionBuffer { + /// Log buffer. + Log(VecDeque), + /// Block header buffer. + Block(VecDeque), +} + +impl SubscriptionBuffer { + /// True if the buffer is empty. + pub(crate) fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Get the number of items in the buffer. + pub(crate) fn len(&self) -> usize { + match self { + Self::Log(buf) => buf.len(), + Self::Block(buf) => buf.len(), + } + } + + /// Extend this buffer with another buffer. + /// + /// # Panics + /// + /// Panics if the buffers are of different types. + pub(crate) fn extend(&mut self, other: Self) { + match (self, other) { + (Self::Log(buf), Self::Log(other)) => buf.extend(other), + (Self::Block(buf), Self::Block(other)) => buf.extend(other), + _ => panic!("mismatched buffer types"), + } + } + + /// Pop the front of the buffer. + pub(crate) fn pop_front(&mut self) -> Option { + match self { + Self::Log(buf) => buf.pop_front().map(|log| Either::Log(Box::new(log))), + Self::Block(buf) => buf.pop_front().map(|header| Either::Block(Box::new(header))), + } + } +} + +impl From> for SubscriptionBuffer { + fn from(logs: Vec) -> Self { + Self::Log(logs.into()) + } +} + +impl FromIterator for SubscriptionBuffer { + fn from_iter>(iter: T) -> Self { + Self::Log(iter.into_iter().collect()) + } +} + +impl From> for SubscriptionBuffer { + fn from(headers: Vec) -> Self { + Self::Block(headers.into()) + } +} + +impl FromIterator for SubscriptionBuffer { + fn from_iter>(iter: T) -> Self { + Self::Block(iter.into_iter().collect()) + } +} + +/// Tracks ongoing subscription tasks. +/// +/// Performs the following functions: +/// - assigns unique subscription IDs +/// - spawns tasks to manage each subscription +/// - allows cancelling subscriptions by ID +/// +/// Calling [`Self::new`] spawns a task that periodically cleans stale filters. +/// This task runs on a separate thread to avoid [`DashMap::retain`] deadlock. +/// See [`DashMap`] documentation for more information. +#[derive(Clone)] +pub(crate) struct SubscriptionManager { + inner: Arc, +} + +impl SubscriptionManager { + /// Instantiate a new subscription manager, start a task to clean up + /// subscriptions cancelled by user disconnection. + pub(crate) fn new( + notif_sender: broadcast::Sender, + clean_interval: Duration, + ) -> Self { + let inner = Arc::new(SubscriptionManagerInner::new(notif_sender)); + let task = SubCleanerTask::new(Arc::downgrade(&inner), clean_interval); + task.spawn(); + Self { inner } + } +} + +impl core::ops::Deref for SubscriptionManager { + type Target = SubscriptionManagerInner; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +impl core::fmt::Debug for SubscriptionManager { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("SubscriptionManager").finish_non_exhaustive() + } +} + +/// Inner logic for [`SubscriptionManager`]. +#[derive(Debug)] +pub(crate) struct SubscriptionManagerInner { + next_id: AtomicU64, + tasks: DashMap, + notif_sender: broadcast::Sender, +} + +impl SubscriptionManagerInner { + /// Create a new subscription manager. + fn new(notif_sender: broadcast::Sender) -> Self { + Self { next_id: AtomicU64::new(1), tasks: DashMap::new(), notif_sender } + } + + /// Assign a new subscription ID. + fn next_id(&self) -> U64 { + U64::from(self.next_id.fetch_add(1, Ordering::Relaxed)) + } + + /// Cancel a subscription task. + pub(crate) fn unsubscribe(&self, id: U64) -> bool { + if let Some(task) = self.tasks.remove(&id) { + task.1.cancel(); + true + } else { + false + } + } + + /// Subscribe to notifications. Returns `None` if notifications are + /// disabled. + pub(crate) fn subscribe(&self, ajj_ctx: &HandlerCtx, filter: InterestKind) -> Option { + if !ajj_ctx.notifications_enabled() { + return None; + } + + let id = self.next_id(); + let token = CancellationToken::new(); + let task = SubscriptionTask { + id, + filter, + token: token.clone(), + notifs: self.notif_sender.subscribe(), + }; + task.spawn(ajj_ctx); + + debug!(%id, "registered new subscription"); + + Some(id) + } +} + +/// Task to manage a single subscription. +#[derive(Debug)] +struct SubscriptionTask { + id: U64, + filter: InterestKind, + token: CancellationToken, + notifs: broadcast::Receiver, +} + +impl SubscriptionTask { + /// Create the task future. + async fn task_future(self, ajj_ctx: HandlerCtx, ajj_cancel: WaitForCancellationFutureOwned) { + let SubscriptionTask { id, filter, token, mut notifs } = self; + + let Some(sender) = ajj_ctx.notifications() else { return }; + + let mut notif_buffer = filter.empty_sub_buffer(); + tokio::pin!(ajj_cancel); + + loop { + let span = debug_span!(parent: None, "SubscriptionTask::task_future", %id, filter = tracing::field::Empty); + if enabled!(tracing::Level::TRACE) { + span.record("filter", format!("{filter:?}")); + } + + let guard = span.enter(); + + let permit_fut = async { + if !notif_buffer.is_empty() { + sender.reserve_many(min(sender.max_capacity() / 2, notif_buffer.len())).await + } else { + pending().await + } + } + .in_current_span(); + drop(guard); + + tokio::select! { + biased; + _ = &mut ajj_cancel => { + let _guard = span.enter(); + trace!("subscription cancelled by client disconnect"); + token.cancel(); + break; + } + _ = token.cancelled() => { + let _guard = span.enter(); + trace!("subscription cancelled by user"); + break; + } + permits = permit_fut => { + let _guard = span.enter(); + let Ok(permits) = permits else { + trace!("channel to client closed"); + break + }; + + for permit in permits { + let Some(item) = notif_buffer.pop_front() else { + break; + }; + let notification = ajj::serde_json::json!{ + { + "jsonrpc": "2.0", + "method": "eth_subscription", + "params": { + "result": &item, + "subscription": id + }, + } + }; + let Ok(brv) = serde_json::value::to_raw_value(¬ification) else { + trace!(?item, "failed to serialize notification"); + continue + }; + permit.send(brv); + } + } + notif_res = notifs.recv() => { + let _guard = span.enter(); + + let notif = match notif_res { + Ok(notif) => notif, + Err(RecvError::Lagged(skipped)) => { + trace!(skipped, "missed notifications"); + continue; + }, + Err(e) =>{ + trace!(?e, "notification stream closed"); + break; + } + }; + + let output = filter.filter_notification_for_sub(¬if); + + trace!(count = output.len(), "Filter applied to notification"); + if !output.is_empty() { + notif_buffer.extend(output); + } + } + } + } + } + + /// Spawn on the ajj [`HandlerCtx`]. + fn spawn(self, ctx: &HandlerCtx) { + ctx.spawn_graceful_with_ctx(|ctx, ajj_cancel| self.task_future(ctx, ajj_cancel)); + } +} + +/// Task to clean up cancelled subscriptions. +/// +/// This task runs on a separate thread to avoid [`DashMap::retain`] deadlocks. +#[derive(Debug)] +struct SubCleanerTask { + inner: Weak, + interval: Duration, +} + +impl SubCleanerTask { + /// Create a new subscription cleaner task. + const fn new(inner: Weak, interval: Duration) -> Self { + Self { inner, interval } + } + + /// Run the task. This task runs on a separate thread, which ensures that + /// [`DashMap::retain`]'s deadlock condition is not met. See [`DashMap`] + /// documentation for more information. + fn spawn(self) { + std::thread::spawn(move || { + loop { + std::thread::sleep(self.interval); + if let Some(inner) = self.inner.upgrade() { + inner.tasks.retain(|_, task| !task.is_cancelled()); + } + } + }); + } +} diff --git a/crates/rpc-storage/src/lib.rs b/crates/rpc-storage/src/lib.rs index fac92bc..af4d93a 100644 --- a/crates/rpc-storage/src/lib.rs +++ b/crates/rpc-storage/src/lib.rs @@ -13,15 +13,19 @@ mod config; pub use config::StorageRpcConfig; - mod ctx; pub use ctx::StorageRpcCtx; - mod resolve; pub use resolve::BlockTags; - mod eth; pub use eth::EthError; +mod gas_oracle; +mod interest; +pub use interest::NewBlockNotification; +mod debug; +pub use debug::DebugError; +mod signet; +pub use signet::error::SignetError; /// Instantiate the `eth` API router. pub fn eth() -> ajj::Router> @@ -31,3 +35,31 @@ where { eth::eth() } + +/// Instantiate the `debug` API router. +pub fn debug() -> ajj::Router> +where + H: signet_hot::HotKv + Send + Sync + 'static, + ::Error: trevm::revm::database::DBErrorMarker, +{ + debug::debug() +} + +/// Instantiate the `signet` API router. +pub fn signet() -> ajj::Router> +where + H: signet_hot::HotKv + Send + Sync + 'static, + ::Error: trevm::revm::database::DBErrorMarker, +{ + signet::signet() +} + +/// Instantiate a combined router with `eth`, `debug`, and `signet` +/// namespaces. +pub fn router() -> ajj::Router> +where + H: signet_hot::HotKv + Send + Sync + 'static, + ::Error: trevm::revm::database::DBErrorMarker, +{ + ajj::Router::new().merge(eth::eth()).merge(debug::debug()).merge(signet::signet()) +} diff --git a/crates/rpc-storage/src/signet/endpoints.rs b/crates/rpc-storage/src/signet/endpoints.rs new file mode 100644 index 0000000..c1c3f26 --- /dev/null +++ b/crates/rpc-storage/src/signet/endpoints.rs @@ -0,0 +1,107 @@ +//! Signet namespace RPC endpoint implementations. + +use crate::{ + ctx::StorageRpcCtx, + eth::helpers::{CfgFiller, await_handler, response_tri}, + signet::error::SignetError, +}; +use ajj::{HandlerCtx, ResponsePayload}; +use alloy::eips::{BlockId, eip1559::BaseFeeParams}; +use signet_bundle::{SignetBundleDriver, SignetCallBundle, SignetCallBundleResponse}; +use signet_hot::HotKv; +use signet_hot::model::HotKvRead; +use signet_types::SignedOrder; +use std::time::Duration; +use tokio::select; +use trevm::revm::database::DBErrorMarker; + +/// `signet_sendOrder` handler. +pub(super) async fn send_order( + hctx: HandlerCtx, + order: SignedOrder, + ctx: StorageRpcCtx, +) -> Result<(), String> +where + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let Some(tx_cache) = ctx.tx_cache().cloned() else { + return Err(SignetError::TxCacheNotProvided.into_string()); + }; + + let task = |hctx: HandlerCtx| async move { + hctx.spawn(async move { tx_cache.forward_order(order).await.map_err(|e| e.to_string()) }); + Ok(()) + }; + + await_handler!(@option hctx.spawn_blocking_with_ctx(task)) +} + +/// `signet_callBundle` handler. +pub(super) async fn call_bundle( + hctx: HandlerCtx, + bundle: SignetCallBundle, + ctx: StorageRpcCtx, +) -> ResponsePayload +where + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let timeout = bundle.bundle.timeout.unwrap_or(1000); + + let task = async move { + let id = bundle.state_block_number(); + let mut block_id: BlockId = id.into(); + + let pending = block_id.is_pending(); + if pending { + block_id = BlockId::latest(); + } + + let cold = ctx.cold(); + let block_num = response_tri!(ctx.resolve_block_id(block_id).await); + + let mut header = + response_tri!(cold.get_header_by_number(block_num).await.map_err(|e| e.to_string())); + + let header = + response_tri!(header.as_mut().ok_or_else(|| format!("block not found: {block_id}"))); + + // For pending blocks, synthesize the next-block header. + if pending { + header.parent_hash = header.hash_slow(); + header.number += 1; + header.timestamp += 12; + header.base_fee_per_gas = header.next_block_base_fee(BaseFeeParams::ethereum()); + header.gas_limit = ctx.config().rpc_gas_cap; + } + + // State at the resolved block number (before any pending header mutation). + let db = response_tri!(ctx.revm_state_at_height(block_num).map_err(|e| e.to_string())); + + let mut driver = SignetBundleDriver::from(&bundle); + + let trevm = signet_evm::signet_evm(db, ctx.constants().clone()) + .fill_cfg(&CfgFiller(ctx.chain_id())) + .fill_block(header); + + response_tri!(trevm.drive_bundle(&mut driver).map_err(|e| e.into_error())); + + ResponsePayload::Success(driver.into_response()) + }; + + let task = async move { + select! { + _ = tokio::time::sleep(Duration::from_millis(timeout)) => { + ResponsePayload::internal_error_message( + "timeout during bundle simulation".into(), + ) + } + result = task => { + result + } + } + }; + + await_handler!(@response_option hctx.spawn_blocking(task)) +} diff --git a/crates/rpc-storage/src/signet/error.rs b/crates/rpc-storage/src/signet/error.rs new file mode 100644 index 0000000..ad019f4 --- /dev/null +++ b/crates/rpc-storage/src/signet/error.rs @@ -0,0 +1,16 @@ +//! Error types for the signet namespace. + +/// Errors that can occur in the `signet` namespace. +#[derive(Debug, Clone, Copy, thiserror::Error)] +pub enum SignetError { + /// The transaction cache was not provided. + #[error("transaction cache not provided")] + TxCacheNotProvided, +} + +impl SignetError { + /// Convert to a string by value. + pub fn into_string(self) -> String { + self.to_string() + } +} diff --git a/crates/rpc-storage/src/signet/mod.rs b/crates/rpc-storage/src/signet/mod.rs new file mode 100644 index 0000000..f9c452e --- /dev/null +++ b/crates/rpc-storage/src/signet/mod.rs @@ -0,0 +1,19 @@ +//! Signet RPC methods and related code. + +mod endpoints; +use endpoints::{call_bundle, send_order}; +pub(crate) mod error; + +use crate::ctx::StorageRpcCtx; +use signet_hot::HotKv; +use signet_hot::model::HotKvRead; +use trevm::revm::database::DBErrorMarker; + +/// Instantiate a `signet` API router backed by storage. +pub(crate) fn signet() -> ajj::Router> +where + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + ajj::Router::new().route("sendOrder", send_order::).route("callBundle", call_bundle::) +} diff --git a/crates/rpc-storage/tests/eth_rpc.rs b/crates/rpc-storage/tests/eth_rpc.rs index 3f70175..e6d5655 100644 --- a/crates/rpc-storage/tests/eth_rpc.rs +++ b/crates/rpc-storage/tests/eth_rpc.rs @@ -16,9 +16,10 @@ use serde_json::{Value, json}; use signet_cold::{BlockData, ColdStorageHandle, ColdStorageTask, mem::MemColdBackend}; use signet_constants::SignetSystemConstants; use signet_hot::{HotKv, db::UnsafeDbWrite, mem::MemKv}; -use signet_rpc_storage::{BlockTags, StorageRpcConfig, StorageRpcCtx}; +use signet_rpc_storage::{BlockTags, NewBlockNotification, StorageRpcConfig, StorageRpcCtx}; use signet_storage::UnifiedStorage; use signet_storage_types::Receipt; +use tokio::sync::broadcast; use tokio_util::sync::CancellationToken; use tower::ServiceExt; @@ -32,6 +33,9 @@ struct TestHarness { cold: ColdStorageHandle, hot: MemKv, tags: BlockTags, + #[allow(dead_code)] + notif_tx: broadcast::Sender, + ctx: StorageRpcCtx, _cancel: CancellationToken, } @@ -44,11 +48,29 @@ impl TestHarness { let storage = UnifiedStorage::new(hot.clone(), cold.clone()); let constants = SignetSystemConstants::test(); let tags = BlockTags::new(latest, latest.saturating_sub(2), 0); - let ctx = - StorageRpcCtx::new(storage, constants, tags.clone(), None, StorageRpcConfig::default()); - let app = signet_rpc_storage::eth::().into_axum("/").with_state(ctx); + let (notif_tx, _) = broadcast::channel::(16); + let ctx = StorageRpcCtx::new( + storage, + constants, + tags.clone(), + None, + StorageRpcConfig::default(), + notif_tx.clone(), + ); + let app = signet_rpc_storage::eth::().into_axum("/").with_state(ctx.clone()); + + Self { app, cold, hot, tags, notif_tx, ctx, _cancel: cancel } + } + + /// Build an axum router for the debug namespace. + fn debug_app(&self) -> axum::Router { + signet_rpc_storage::debug::().into_axum("/").with_state(self.ctx.clone()) + } - Self { app, cold, hot, tags, _cancel: cancel } + /// Build an axum router for the signet namespace. + #[allow(dead_code)] + fn signet_app(&self) -> axum::Router { + signet_rpc_storage::signet::().into_axum("/").with_state(self.ctx.clone()) } } @@ -105,6 +127,14 @@ const LOG_TOPIC: B256 = B256::repeat_byte(0xcc); /// Uses alloy's signer to produce a valid ECDSA signature so that /// `recover_sender` succeeds during RPC response building. fn make_signed_tx(nonce: u64) -> (signet_storage_types::TransactionSigned, Address) { + make_signed_tx_with_gas_price(nonce, 1_000_000_000) +} + +/// Create a legacy transaction with a custom gas price. +fn make_signed_tx_with_gas_price( + nonce: u64, + gas_price: u128, +) -> (signet_storage_types::TransactionSigned, Address) { use alloy::signers::{SignerSync, local::PrivateKeySigner}; let signer = PrivateKeySigner::from_signing_key( @@ -117,7 +147,7 @@ fn make_signed_tx(nonce: u64) -> (signet_storage_types::TransactionSigned, Addre let tx = TxLegacy { nonce, - gas_price: 1_000_000_000, + gas_price, gas_limit: 21_000, to: TxKind::Call(Address::ZERO), value: U256::from(1000), @@ -170,10 +200,14 @@ fn make_block( let all_logs: Vec<_> = receipts.iter().flat_map(|r| r.inner.logs.iter()).collect(); let bloom = logs_bloom(all_logs); + let gas_used = receipts.last().map(|r| r.inner.cumulative_gas_used).unwrap_or_default(); + let header = Header { number: block_num, timestamp: 1_700_000_000 + block_num, base_fee_per_gas: Some(1_000_000_000), + gas_limit: 30_000_000, + gas_used, logs_bloom: bloom, ..Default::default() }; @@ -565,7 +599,7 @@ async fn test_get_logs_empty() { #[tokio::test] async fn test_not_supported() { let h = TestHarness::new(0).await; - let resp = rpc_call_raw(&h.app, "gasPrice", json!([])).await; + let resp = rpc_call_raw(&h.app, "syncing", json!([])).await; assert!(resp.get("error").is_some()); let msg = resp["error"]["message"].as_str().unwrap(); assert!(msg.contains("not supported"), "unexpected error: {msg}"); @@ -577,3 +611,236 @@ async fn test_send_raw_tx_no_cache() { let resp = rpc_call_raw(&h.app, "sendRawTransaction", json!(["0x00"])).await; assert!(resp.get("error").is_some()); } + +// --------------------------------------------------------------------------- +// Group 7: Gas & Fee Queries +// --------------------------------------------------------------------------- + +#[tokio::test] +async fn test_gas_price() { + let h = TestHarness::new(0).await; + + // Create a block with txs that have gas_price (2 gwei) > base_fee (1 gwei) + let (tx0, _) = make_signed_tx_with_gas_price(0, 2_000_000_000); + let block = make_block(1, vec![tx0], 0); + h.cold.append_block(block).await.unwrap(); + h.tags.set_latest(1); + + let result = rpc_call(&h.app, "gasPrice", json!([])).await; + + // tip = gas_price - base_fee = 2e9 - 1e9 = 1e9 + // gasPrice = tip + base_fee = 1e9 + 1e9 = 2e9 = 0x77359400 + assert_eq!(result, json!("0x77359400")); +} + +#[tokio::test] +async fn test_max_priority_fee_per_gas() { + let h = TestHarness::new(0).await; + + let (tx0, _) = make_signed_tx_with_gas_price(0, 2_000_000_000); + let block = make_block(1, vec![tx0], 0); + h.cold.append_block(block).await.unwrap(); + h.tags.set_latest(1); + + let result = rpc_call(&h.app, "maxPriorityFeePerGas", json!([])).await; + + // tip only = gas_price - base_fee = 1e9 = 0x3b9aca00 + assert_eq!(result, json!("0x3b9aca00")); +} + +#[tokio::test] +async fn test_gas_price_empty_blocks() { + let h = TestHarness::new(0).await; + + let block = make_block(1, vec![], 0); + h.cold.append_block(block).await.unwrap(); + h.tags.set_latest(1); + + let result = rpc_call(&h.app, "gasPrice", json!([])).await; + + // No txs means tip = 0, gasPrice = base_fee = 1e9 = 0x3b9aca00 + assert_eq!(result, json!("0x3b9aca00")); +} + +#[tokio::test] +async fn test_fee_history_basic() { + let h = TestHarness::new(0).await; + + for i in 1u64..=3 { + let (tx, _) = make_signed_tx_with_gas_price(i - 1, 2_000_000_000); + let block = make_block(i, vec![tx], 0); + h.cold.append_block(block).await.unwrap(); + } + h.tags.set_latest(3); + + // Request 2 blocks of fee history ending at block 3 + let result = rpc_call(&h.app, "feeHistory", json!(["0x2", "0x3", null])).await; + + // oldest_block = end_block + 1 - block_count = 3 + 1 - 2 = 2 + assert_eq!(result["oldestBlock"], json!("0x2")); + // base_fee_per_gas has block_count + 1 entries (includes next-block prediction) + let base_fees = result["baseFeePerGas"].as_array().unwrap(); + assert_eq!(base_fees.len(), 3); + // gas_used_ratio has block_count entries + let gas_ratios = result["gasUsedRatio"].as_array().unwrap(); + assert_eq!(gas_ratios.len(), 2); + // No reward field when no percentiles requested + assert!(result["reward"].is_null()); +} + +#[tokio::test] +async fn test_fee_history_with_rewards() { + let h = TestHarness::new(0).await; + + let (tx0, _) = make_signed_tx_with_gas_price(0, 2_000_000_000); + let block = make_block(1, vec![tx0], 0); + h.cold.append_block(block).await.unwrap(); + h.tags.set_latest(1); + + let result = rpc_call(&h.app, "feeHistory", json!(["0x1", "0x1", [25.0, 75.0]])).await; + + assert_eq!(result["oldestBlock"], json!("0x1")); + let rewards = result["reward"].as_array().unwrap(); + assert_eq!(rewards.len(), 1); + let block_rewards = rewards[0].as_array().unwrap(); + assert_eq!(block_rewards.len(), 2); +} + +// --------------------------------------------------------------------------- +// Group 8: Filters +// --------------------------------------------------------------------------- + +#[tokio::test] +async fn test_new_block_filter_and_changes() { + let h = TestHarness::new(0).await; + + // Install a block filter at block 0 + let filter_id = rpc_call(&h.app, "newBlockFilter", json!([])).await; + let filter_id_str = filter_id.as_str().unwrap().to_string(); + + // Append a block + let (tx0, _) = make_signed_tx(0); + let block = make_block(1, vec![tx0], 0); + h.cold.append_block(block).await.unwrap(); + h.tags.set_latest(1); + + // Poll for changes — should get block hash for block 1 + let changes = rpc_call(&h.app, "getFilterChanges", json!([filter_id_str])).await; + let hashes = changes.as_array().unwrap(); + assert_eq!(hashes.len(), 1); + assert!(hashes[0].is_string()); + + // Poll again with no new blocks — should be empty + let changes = rpc_call(&h.app, "getFilterChanges", json!([filter_id_str])).await; + let hashes = changes.as_array().unwrap(); + assert!(hashes.is_empty()); +} + +#[tokio::test] +async fn test_new_log_filter_and_changes() { + let h = TestHarness::new(0).await; + + // Install a log filter for LOG_ADDR with LOG_TOPIC + let filter_id = rpc_call( + &h.app, + "newFilter", + json!([{ + "address": format!("{:?}", LOG_ADDR), + "topics": [format!("{:?}", LOG_TOPIC)], + }]), + ) + .await; + let filter_id_str = filter_id.as_str().unwrap().to_string(); + + // Append a block with matching logs + let (tx0, _) = make_signed_tx(0); + let block = make_block(1, vec![tx0], 2); + h.cold.append_block(block).await.unwrap(); + h.tags.set_latest(1); + + // Poll for changes — should get matching logs + let changes = rpc_call(&h.app, "getFilterChanges", json!([filter_id_str])).await; + let logs = changes.as_array().unwrap(); + assert_eq!(logs.len(), 2); + assert_eq!(logs[0]["address"], json!(format!("{:?}", LOG_ADDR))); +} + +#[tokio::test] +async fn test_uninstall_filter() { + let h = TestHarness::new(0).await; + + let filter_id = rpc_call(&h.app, "newBlockFilter", json!([])).await; + let filter_id_str = filter_id.as_str().unwrap().to_string(); + + // Uninstall + let result = rpc_call(&h.app, "uninstallFilter", json!([filter_id_str])).await; + assert_eq!(result, json!(true)); + + // Uninstall again — should return false + let result = rpc_call(&h.app, "uninstallFilter", json!([filter_id_str])).await; + assert_eq!(result, json!(false)); +} + +// --------------------------------------------------------------------------- +// Group 9: Debug namespace +// --------------------------------------------------------------------------- + +/// Set up hot storage with a genesis header and fund an address. +/// +/// The genesis header at block 0 is required so `revm_reader_at_height` +/// can validate height bounds. Without it, MemKv returns `NoBlocks`. +fn setup_hot_for_evm(hot: &MemKv, addr: Address, balance: U256) { + use signet_storage_types::{Account, SealedHeader}; + + let writer = hot.writer().unwrap(); + + // Write a genesis header so the hot storage tracks block 0. + let genesis = SealedHeader::new(Header::default()); + writer.put_header(&genesis).unwrap(); + + writer.put_account(&addr, &Account { nonce: 0, balance, bytecode_hash: None }).unwrap(); + writer.commit().unwrap(); +} + +#[tokio::test] +async fn test_trace_block_by_number_noop() { + let h = TestHarness::new(0).await; + + let (tx0, sender) = make_signed_tx(0); + setup_hot_for_evm(&h.hot, sender, U256::from(1_000_000_000_000_000_000u128)); + + let block = make_block(1, vec![tx0], 0); + h.cold.append_block(block).await.unwrap(); + h.tags.set_latest(1); + + let debug_app = h.debug_app(); + let result = + rpc_call(&debug_app, "traceBlockByNumber", json!(["0x1", {"tracer": "noopTracer"}])).await; + + let traces = result.as_array().unwrap(); + assert_eq!(traces.len(), 1); +} + +#[tokio::test] +async fn test_trace_transaction_noop() { + let h = TestHarness::new(0).await; + + let (tx0, sender) = make_signed_tx(0); + let tx_hash = *tx0.tx_hash(); + setup_hot_for_evm(&h.hot, sender, U256::from(1_000_000_000_000_000_000u128)); + + let block = make_block(1, vec![tx0], 0); + h.cold.append_block(block).await.unwrap(); + h.tags.set_latest(1); + + let debug_app = h.debug_app(); + let result = rpc_call( + &debug_app, + "traceTransaction", + json!([format!("{:?}", tx_hash), {"tracer": "noopTracer"}]), + ) + .await; + + // NoopFrame result is not null + assert!(!result.is_null()); +} From 291ab9fdb780db33a95746f8e2ccb57ed3ffd690 Mon Sep 17 00:00:00 2001 From: James Date: Fri, 13 Feb 2026 14:52:51 -0500 Subject: [PATCH 10/31] chore: use signet-storage 0.3.0 from crates.io, remove patch overrides Co-Authored-By: Claude Opus 4.6 --- Cargo.toml | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 5a8c908..aea7d27 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,10 +56,10 @@ signet-tx-cache = "0.16.0-rc.8" signet-types = "0.16.0-rc.8" signet-zenith = "0.16.0-rc.8" signet-journal = "0.16.0-rc.8" -signet-storage = "0.2.0" -signet-cold = "0.2.0" -signet-hot = "0.2.0" -signet-storage-types = "0.2.0" +signet-storage = "0.3.0" +signet-cold = "0.3.0" +signet-hot = "0.3.0" +signet-storage-types = "0.3.0" # ajj ajj = { version = "0.3.4" } @@ -137,6 +137,11 @@ alloy-rlp = "0.3.11" tempfile = "3.17.0" # [patch.crates-io] +# signet-cold = { path = "../storage/crates/cold" } +# signet-hot = { path = "../storage/crates/hot" } +# signet-storage = { path = "../storage/crates/storage" } +# signet-storage-types = { path = "../storage/crates/types" } + # signet-bundle = { path = "../sdk/crates/bundle"} # signet-constants = { path = "../sdk/crates/constants"} # signet-evm = { path = "../sdk/crates/evm"} From 2163d987116782cf3182711bb79f8981a55d224e Mon Sep 17 00:00:00 2001 From: James Date: Fri, 13 Feb 2026 14:55:24 -0500 Subject: [PATCH 11/31] refactor: adapt rpc-storage to signet-storage 0.3.0 API - Use `SealedHeader` with `.hash()` / `.into_inner()` instead of `header.hash_slow()` - Use `RecoveredTx` (pre-recovered sender) instead of manual `recover_sender` calls - Use `ColdReceipt` with per-tx `gas_used` instead of computing deltas from cumulative gas - Delegate `get_logs` to cold storage instead of manual bloom filtering and block iteration - Remove `BlockRangeInclusiveIter`, `collect_matching_logs`, `build_receipt_from_parts`, and `recover_sender` helpers - Simplify `build_rpc_transaction` and `build_receipt` to be infallible Co-Authored-By: Claude Opus 4.6 --- crates/rpc-storage/src/ctx.rs | 2 +- crates/rpc-storage/src/debug/endpoints.rs | 6 +- crates/rpc-storage/src/eth/endpoints.rs | 286 ++++++--------------- crates/rpc-storage/src/eth/helpers.rs | 160 +++--------- crates/rpc-storage/src/signet/endpoints.rs | 13 +- crates/rpc-storage/tests/eth_rpc.rs | 18 +- 6 files changed, 135 insertions(+), 350 deletions(-) diff --git a/crates/rpc-storage/src/ctx.rs b/crates/rpc-storage/src/ctx.rs index a65f845..5c5495d 100644 --- a/crates/rpc-storage/src/ctx.rs +++ b/crates/rpc-storage/src/ctx.rs @@ -231,6 +231,6 @@ impl StorageRpcCtx { .ok_or(EthError::BlockNotFound(id))?; let db = self.revm_state_at_height(header.number)?; - Ok(EvmBlockContext { header, db }) + Ok(EvmBlockContext { header: header.into_inner(), db }) } } diff --git a/crates/rpc-storage/src/debug/endpoints.rs b/crates/rpc-storage/src/debug/endpoints.rs index 7662429..dc0656b 100644 --- a/crates/rpc-storage/src/debug/endpoints.rs +++ b/crates/rpc-storage/src/debug/endpoints.rs @@ -68,7 +68,8 @@ where ); }; - let block_hash = header.hash_slow(); + let block_hash = header.hash(); + let header = header.into_inner(); tracing::debug!(number = header.number, "Loaded block"); @@ -151,7 +152,8 @@ where ); let header = - response_tri!(header.ok_or(DebugError::BlockNotFound(format!("block {block_num}")))); + response_tri!(header.ok_or(DebugError::BlockNotFound(format!("block {block_num}")))) + .into_inner(); tracing::debug!(number = block_num, "Loaded containing block"); diff --git a/crates/rpc-storage/src/eth/endpoints.rs b/crates/rpc-storage/src/eth/endpoints.rs index 4e010f0..8f1b8f0 100644 --- a/crates/rpc-storage/src/eth/endpoints.rs +++ b/crates/rpc-storage/src/eth/endpoints.rs @@ -3,23 +3,23 @@ use crate::{ ctx::{EvmBlockContext, StorageRpcCtx}, eth::helpers::{ - AddrWithBlock, BlockParams, BlockRangeInclusiveIter, CfgFiller, FeeHistoryArgs, - StorageAtArgs, SubscribeArgs, TxParams, await_handler, build_receipt, - build_receipt_from_parts, build_rpc_transaction, normalize_gas_stateless, response_tri, + AddrWithBlock, BlockParams, CfgFiller, FeeHistoryArgs, StorageAtArgs, SubscribeArgs, + TxParams, await_handler, build_receipt, build_rpc_transaction, normalize_gas_stateless, + response_tri, }, gas_oracle, interest::{FilterOutput, InterestKind}, }; use ajj::{HandlerCtx, ResponsePayload}; use alloy::{ - consensus::{BlockHeader, Transaction, TxReceipt}, + consensus::Transaction, eips::{ BlockId, BlockNumberOrTag, eip1559::BaseFeeParams, eip2718::{Decodable2718, Encodable2718}, }, primitives::{B256, U64, U256}, - rpc::types::{Block, BlockTransactions, FeeHistory, Filter, FilteredParams, Log}, + rpc::types::{Block, BlockTransactions, FeeHistory, Filter, Log}, }; use signet_cold::{HeaderSpecifier, ReceiptSpecifier}; use signet_hot::model::HotKvRead; @@ -214,24 +214,20 @@ fn calculate_reward_percentiles( percentiles: &[f64], gas_used: u64, base_fee: u64, - txs: &[signet_storage_types::TransactionSigned], - receipts: &[signet_storage_types::Receipt], + txs: &[signet_storage_types::RecoveredTx], + receipts: &[signet_cold::ColdReceipt], ) -> Vec { if gas_used == 0 || txs.is_empty() { return vec![0; percentiles.len()]; } - // Pair each tx's effective tip with its gas used (from receipt cumulative deltas) + // Pair each tx's effective tip with its gas used. let mut tx_gas_and_tip: Vec<(u64, u128)> = txs .iter() .zip(receipts.iter()) - .enumerate() - .map(|(i, (tx, receipt))| { - let prev_cumulative = - if i > 0 { receipts[i - 1].inner.cumulative_gas_used() } else { 0 }; - let tx_gas = receipt.inner.cumulative_gas_used() - prev_cumulative; + .map(|(tx, receipt)| { let tip = tx.effective_tip_per_gas(base_fee).unwrap_or_default(); - (tx_gas, tip) + (receipt.gas_used, tip) }) .collect(); @@ -290,10 +286,10 @@ where return Ok(None); }; - let block_hash = header.hash_slow(); + let block_hash = header.hash(); + let base_fee = header.base_fee_per_gas; let transactions = if full { - let base_fee = header.base_fee_per_gas(); let rpc_txs: Vec<_> = txs .into_iter() .enumerate() @@ -301,9 +297,9 @@ where let meta = signet_storage_types::ConfirmationMeta::new( block_num, block_hash, i as u64, ); - build_rpc_transaction(tx, &meta, base_fee).map_err(|e| e.to_string()) + build_rpc_transaction(tx, &meta, base_fee) }) - .collect::>()?; + .collect(); BlockTransactions::Full(rpc_txs) } else { let hashes: Vec = txs.iter().map(|tx| *tx.tx_hash()).collect(); @@ -311,7 +307,12 @@ where }; Ok(Some(Block { - header: alloy::rpc::types::Header::new(header), + header: alloy::rpc::types::Header { + inner: header.into_inner(), + hash: block_hash, + total_difficulty: None, + size: None, + }, transactions, uncles: vec![], withdrawals: None, @@ -370,36 +371,10 @@ where return Ok(None); }; - let block_hash = header.hash_slow(); - let mut log_index: u64 = 0; - - let rpc_receipts = txs - .into_iter() - .zip(receipts.iter()) - .enumerate() - .map(|(idx, (tx, receipt))| { - let prev_cumulative = idx - .checked_sub(1) - .and_then(|i| receipts.get(i)) - .map(|r| r.inner.cumulative_gas_used()) - .unwrap_or_default(); - - let gas_used = receipt.inner.cumulative_gas_used() - prev_cumulative; - let offset = log_index; - log_index += receipt.inner.logs().len() as u64; - - build_receipt_from_parts( - tx, - &header, - block_hash, - idx as u64, - receipt.clone(), - gas_used, - offset, - ) - .map_err(|e| e.to_string()) - }) - .collect::, _>>()?; + let base_fee = header.base_fee_per_gas; + + let rpc_receipts = + txs.iter().zip(receipts).map(|(tx, cr)| build_receipt(cr, tx, base_fee)).collect(); Ok(Some(rpc_receipts)) }; @@ -425,7 +400,17 @@ where cold.get_header_by_number(block_num) .await - .map(|h| h.map(alloy::rpc::types::Header::new)) + .map(|h| { + h.map(|sh| { + let hash = sh.hash(); + alloy::rpc::types::Header { + inner: sh.into_inner(), + hash, + total_difficulty: None, + size: None, + } + }) + }) .map_err(|e| e.to_string()) }; @@ -456,9 +441,9 @@ where // Fetch header for base_fee let header = cold.get_header_by_number(meta.block_number()).await.map_err(|e| e.to_string())?; - let base_fee = header.and_then(|h| h.base_fee_per_gas()); + let base_fee = header.and_then(|h| h.base_fee_per_gas); - build_rpc_transaction(tx, &meta, base_fee).map(Some).map_err(|e| e.to_string()) + Ok(Some(build_rpc_transaction(tx, &meta, base_fee))) }; await_handler!(@option hctx.spawn_blocking(task)) @@ -511,9 +496,9 @@ where let (tx, meta) = confirmed.into_parts(); let header = cold.get_header_by_number(meta.block_number()).await.map_err(|e| e.to_string())?; - let base_fee = header.and_then(|h| h.base_fee_per_gas()); + let base_fee = header.and_then(|h| h.base_fee_per_gas); - build_rpc_transaction(tx, &meta, base_fee).map(Some).map_err(|e| e.to_string()) + Ok(Some(build_rpc_transaction(tx, &meta, base_fee))) }; await_handler!(@option hctx.spawn_blocking(task)) @@ -554,16 +539,24 @@ where ::Error: DBErrorMarker, { let task = async move { - let Some(receipt_ctx) = ctx - .cold() - .get_receipt_with_context(ReceiptSpecifier::TxHash(hash)) - .await - .map_err(|e| e.to_string())? + let cold = ctx.cold(); + + let Some(cr) = + cold.get_receipt(ReceiptSpecifier::TxHash(hash)).await.map_err(|e| e.to_string())? else { return Ok(None); }; - build_receipt(receipt_ctx).map(Some).map_err(|e| e.to_string()) + let (tx, header) = tokio::try_join!( + cold.get_tx_by_hash(cr.tx_hash), + cold.get_header_by_number(cr.block_number), + ) + .map_err(|e| e.to_string())?; + + let tx = tx.ok_or("receipt found but transaction missing")?.into_inner(); + let base_fee = header.and_then(|h| h.base_fee_per_gas); + + Ok(Some(build_receipt(cr, &tx, base_fee))) }; await_handler!(@option hctx.spawn_blocking(task)) @@ -829,9 +822,6 @@ where // Logs // --------------------------------------------------------------------------- -/// Maximum headers fetched per batch when scanning bloom filters. -const MAX_HEADERS_RANGE: u64 = 1_000; - pub(crate) async fn get_logs( hctx: HandlerCtx, (filter,): (Filter,), @@ -844,34 +834,8 @@ where let task = async move { let cold = ctx.cold(); - // Build bloom filters for efficient block-level filtering. - let address_filter = FilteredParams::address_filter(&filter.address); - let topics_filter = FilteredParams::topics_filter(&filter.topics); - - match filter.block_option { - alloy::rpc::types::FilterBlockOption::AtBlockHash(hash) => { - let header = cold - .get_header_by_hash(hash) - .await - .map_err(|e| e.to_string())? - .ok_or_else(|| format!("block not found: {hash}"))?; - - if !FilteredParams::matches_address(header.logs_bloom, &address_filter) - || !FilteredParams::matches_topics(header.logs_bloom, &topics_filter) - { - return Ok(vec![]); - } - - let block_num = header.number; - let (txs, receipts) = tokio::try_join!( - cold.get_transactions_in_block(block_num), - cold.get_receipts_in_block(block_num), - ) - .map_err(|e| e.to_string())?; - - Ok(collect_matching_logs(&header, hash, &txs, &receipts, &filter)) - } - + let resolved_filter = match filter.block_option { + alloy::rpc::types::FilterBlockOption::AtBlockHash(_) => filter, alloy::rpc::types::FilterBlockOption::Range { from_block, to_block } => { let from = from_block.map(|b| ctx.resolve_block_tag(b)).unwrap_or(0); let to = to_block @@ -886,89 +850,27 @@ where return Err(format!("query exceeds max block range ({max_blocks})")); } - let mut all_logs = Vec::new(); - - for (chunk_start, chunk_end) in - BlockRangeInclusiveIter::new(from..=to, MAX_HEADERS_RANGE) - { - let specs: Vec<_> = - (chunk_start..=chunk_end).map(HeaderSpecifier::Number).collect(); - - let headers = cold.get_headers(specs).await.map_err(|e| e.to_string())?; - - for (offset, maybe_header) in headers.into_iter().enumerate() { - let Some(header) = maybe_header else { - continue; - }; - - if !FilteredParams::matches_address(header.logs_bloom, &address_filter) - || !FilteredParams::matches_topics(header.logs_bloom, &topics_filter) - { - continue; - } - - let block_num = chunk_start + offset as u64; - let block_hash = header.hash_slow(); - - let (txs, receipts) = tokio::try_join!( - cold.get_transactions_in_block(block_num), - cold.get_receipts_in_block(block_num), - ) - .map_err(|e| e.to_string())?; - - let logs = - collect_matching_logs(&header, block_hash, &txs, &receipts, &filter); - all_logs.extend(logs); - - let max_logs = ctx.config().max_logs_per_response; - if max_logs > 0 && all_logs.len() > max_logs { - return Err(format!( - "query exceeds max logs per response ({max_logs})" - )); - } - } + Filter { + block_option: alloy::rpc::types::FilterBlockOption::Range { + from_block: Some(BlockNumberOrTag::Number(from)), + to_block: Some(BlockNumberOrTag::Number(to)), + }, + ..filter } - - Ok(all_logs) } - } - }; + }; - await_handler!(@option hctx.spawn_blocking(task)) -} + let logs = cold.get_logs(resolved_filter).await.map_err(|e| e.to_string())?; -/// Extract logs from a block's receipts that match the filter's address and topic criteria. -pub(crate) fn collect_matching_logs( - header: &alloy::consensus::Header, - block_hash: B256, - txs: &[signet_storage_types::TransactionSigned], - receipts: &[signet_storage_types::Receipt], - filter: &Filter, -) -> Vec { - let mut logs = Vec::new(); - let mut log_index: u64 = 0; - - for (tx_idx, (tx, receipt)) in txs.iter().zip(receipts.iter()).enumerate() { - let tx_hash = *tx.tx_hash(); - - for log in &receipt.inner.logs { - if filter.matches_address(log.address) && filter.matches_topics(log.topics()) { - logs.push(Log { - inner: log.clone(), - block_hash: Some(block_hash), - block_number: Some(header.number), - block_timestamp: Some(header.timestamp), - transaction_hash: Some(tx_hash), - transaction_index: Some(tx_idx as u64), - log_index: Some(log_index), - removed: false, - }); - } - log_index += 1; + let max_logs = ctx.config().max_logs_per_response; + if max_logs > 0 && logs.len() > max_logs { + return Err(format!("query exceeds max logs per response ({max_logs})")); } - } - logs + Ok(logs) + }; + + await_handler!(@option hctx.spawn_blocking(task)) } // --------------------------------------------------------------------------- @@ -1045,49 +947,23 @@ where if entry.is_block() { let specs: Vec<_> = (start..=latest).map(HeaderSpecifier::Number).collect(); let headers = cold.get_headers(specs).await.map_err(|e| e.to_string())?; - let hashes: Vec = headers.into_iter().flatten().map(|h| h.hash_slow()).collect(); + let hashes: Vec = headers.into_iter().flatten().map(|h| h.hash()).collect(); entry.mark_polled(latest); Ok(FilterOutput::from(hashes)) } else { - let filter = entry.as_filter().cloned().unwrap(); - let address_filter = FilteredParams::address_filter(&filter.address); - let topics_filter = FilteredParams::topics_filter(&filter.topics); - - let mut all_logs = Vec::new(); - - for (chunk_start, chunk_end) in - BlockRangeInclusiveIter::new(start..=latest, MAX_HEADERS_RANGE) - { - let specs: Vec<_> = - (chunk_start..=chunk_end).map(HeaderSpecifier::Number).collect(); - let headers = cold.get_headers(specs).await.map_err(|e| e.to_string())?; - - for (offset, maybe_header) in headers.into_iter().enumerate() { - let Some(header) = maybe_header else { continue }; - - if !FilteredParams::matches_address(header.logs_bloom, &address_filter) - || !FilteredParams::matches_topics(header.logs_bloom, &topics_filter) - { - continue; - } - - let block_num = chunk_start + offset as u64; - let block_hash = header.hash_slow(); - - let (txs, receipts) = tokio::try_join!( - cold.get_transactions_in_block(block_num), - cold.get_receipts_in_block(block_num), - ) - .map_err(|e| e.to_string())?; + let stored = entry.as_filter().cloned().unwrap(); + let resolved = Filter { + block_option: alloy::rpc::types::FilterBlockOption::Range { + from_block: Some(BlockNumberOrTag::Number(start)), + to_block: Some(BlockNumberOrTag::Number(latest)), + }, + ..stored + }; - all_logs.extend(collect_matching_logs( - &header, block_hash, &txs, &receipts, &filter, - )); - } - } + let logs = cold.get_logs(resolved).await.map_err(|e| e.to_string())?; entry.mark_polled(latest); - Ok(FilterOutput::from(all_logs)) + Ok(FilterOutput::from(logs)) } }; diff --git a/crates/rpc-storage/src/eth/helpers.rs b/crates/rpc-storage/src/eth/helpers.rs index 38ae472..f387fa6 100644 --- a/crates/rpc-storage/src/eth/helpers.rs +++ b/crates/rpc-storage/src/eth/helpers.rs @@ -1,10 +1,8 @@ //! Parameter types, macros, and utility helpers for ETH RPC endpoints. -use crate::eth::error::EthError; use alloy::{ consensus::{ - BlockHeader, ReceiptEnvelope, ReceiptWithBloom, Transaction, TxReceipt, - transaction::{Recovered, SignerRecoverable}, + ReceiptEnvelope, ReceiptWithBloom, Transaction, TxReceipt, transaction::Recovered, }, eips::BlockId, primitives::{Address, TxKind, U256}, @@ -13,7 +11,7 @@ use alloy::{ }, }; use serde::Deserialize; -use signet_cold::ReceiptContext; +use signet_cold::ColdReceipt; use signet_storage_types::ConfirmationMeta; use trevm::MIN_TRANSACTION_GAS; @@ -105,33 +103,6 @@ macro_rules! response_tri { } pub(crate) use response_tri; -/// An iterator that yields inclusive block ranges of a given step size. -#[derive(Debug)] -pub(crate) struct BlockRangeInclusiveIter { - iter: std::iter::StepBy>, - step: u64, - end: u64, -} - -impl BlockRangeInclusiveIter { - pub(crate) fn new(range: std::ops::RangeInclusive, step: u64) -> Self { - Self { end: *range.end(), iter: range.step_by(step as usize + 1), step } - } -} - -impl Iterator for BlockRangeInclusiveIter { - type Item = (u64, u64); - - fn next(&mut self) -> Option { - let start = self.iter.next()?; - let end = (start + self.step).min(self.end); - if start > end { - return None; - } - Some((start, end)) - } -} - /// Small wrapper implementing [`trevm::Cfg`] to set the chain ID. pub(crate) struct CfgFiller(pub u64); @@ -141,138 +112,71 @@ impl trevm::Cfg for CfgFiller { } } -/// Recover the sender of a transaction, falling back to [`MagicSig`]. -/// -/// [`MagicSig`]: signet_types::MagicSig -pub(crate) fn recover_sender( - tx: &signet_storage_types::TransactionSigned, -) -> Result { - signet_types::MagicSig::try_from_signature(tx.signature()) - .map(|s| s.rollup_sender()) - .or_else(|| SignerRecoverable::recover_signer_unchecked(tx).ok()) - .ok_or(EthError::InvalidSignature) -} - /// Build an [`alloy::rpc::types::Transaction`] from cold storage types. pub(crate) fn build_rpc_transaction( - tx: signet_storage_types::TransactionSigned, + tx: signet_storage_types::RecoveredTx, meta: &ConfirmationMeta, base_fee: Option, -) -> Result { - let sender = recover_sender(&tx)?; - - // Convert EthereumTxEnvelope → TxEnvelope (EthereumTxEnvelope) - let tx_envelope: alloy::consensus::TxEnvelope = tx.into(); - let inner = Recovered::new_unchecked(tx_envelope, sender); +) -> alloy::rpc::types::Transaction { + let signer = tx.signer(); + let tx_envelope: alloy::consensus::TxEnvelope = tx.into_inner().into(); + let inner = Recovered::new_unchecked(tx_envelope, signer); let egp = base_fee .map(|bf| inner.effective_tip_per_gas(bf).unwrap_or_default() as u64 + bf) .unwrap_or_else(|| inner.max_fee_per_gas() as u64); - Ok(alloy::rpc::types::Transaction { + alloy::rpc::types::Transaction { inner, block_hash: Some(meta.block_hash()), block_number: Some(meta.block_number()), transaction_index: Some(meta.transaction_index()), effective_gas_price: Some(egp as u128), - }) -} - -/// Build a [`TransactionReceipt`] from a [`ReceiptContext`]. -pub(crate) fn build_receipt( - ctx: ReceiptContext, -) -> Result>, EthError> { - let (receipt, meta) = ctx.receipt.into_parts(); - let gas_used = receipt.inner.cumulative_gas_used() - ctx.prior_cumulative_gas; - - build_receipt_inner( - ctx.transaction, - &ctx.header, - &meta, - receipt, - gas_used, - 0, // log_index_offset: single receipt, no prior logs - ) + } } -/// Build a [`TransactionReceipt`] from individual components. +/// Build a [`TransactionReceipt`] from a [`ColdReceipt`] and its transaction. /// -/// Used by `eth_getBlockReceipts` where all receipts in the block are available. -pub(crate) fn build_receipt_from_parts( - tx: signet_storage_types::TransactionSigned, - header: &alloy::consensus::Header, - block_hash: alloy::primitives::B256, - tx_index: u64, - receipt: signet_storage_types::Receipt, - gas_used: u64, - log_index_offset: u64, -) -> Result>, EthError> { - let meta = ConfirmationMeta::new(header.number, block_hash, tx_index); - build_receipt_inner(tx, header, &meta, receipt, gas_used, log_index_offset) -} - -/// Shared receipt builder. -fn build_receipt_inner( - tx: signet_storage_types::TransactionSigned, - header: &alloy::consensus::Header, - meta: &ConfirmationMeta, - receipt: signet_storage_types::Receipt, - gas_used: u64, - log_index_offset: u64, -) -> Result>, EthError> { - let sender = recover_sender(&tx)?; - let tx_hash = *tx.tx_hash(); - - let logs_bloom = receipt.inner.bloom(); - let status = receipt.inner.status_or_post_state(); - let cumulative_gas_used = receipt.inner.cumulative_gas_used(); - - let logs: Vec = receipt - .inner - .logs - .into_iter() - .enumerate() - .map(|(i, log)| Log { - inner: log, - block_hash: Some(meta.block_hash()), - block_number: Some(meta.block_number()), - block_timestamp: Some(header.timestamp), - transaction_hash: Some(tx_hash), - transaction_index: Some(meta.transaction_index()), - log_index: Some(log_index_offset + i as u64), - removed: false, - }) - .collect(); +/// The transaction is needed for `to`, `contract_address`, and +/// `effective_gas_price` which are not stored on the receipt. +pub(crate) fn build_receipt( + cr: ColdReceipt, + tx: &signet_storage_types::RecoveredTx, + base_fee: Option, +) -> TransactionReceipt> { + let logs_bloom = cr.receipt.bloom(); + let status = cr.receipt.status; + let cumulative_gas_used = cr.receipt.cumulative_gas_used; - let rpc_receipt = alloy::rpc::types::eth::Receipt { status, cumulative_gas_used, logs }; + let rpc_receipt = + alloy::rpc::types::eth::Receipt { status, cumulative_gas_used, logs: cr.receipt.logs }; let (contract_address, to) = match tx.kind() { - TxKind::Create => (Some(sender.create(tx.nonce())), None), + TxKind::Create => (Some(cr.from.create(tx.nonce())), None), TxKind::Call(addr) => (None, Some(Address(*addr))), }; - let base_fee = header.base_fee_per_gas(); let egp = base_fee .map(|bf| tx.effective_tip_per_gas(bf).unwrap_or_default() as u64 + bf) .unwrap_or_else(|| tx.max_fee_per_gas() as u64); - Ok(TransactionReceipt { + TransactionReceipt { inner: build_receipt_envelope( ReceiptWithBloom { receipt: rpc_receipt, logs_bloom }, - receipt.tx_type, + cr.tx_type, ), - transaction_hash: tx_hash, - transaction_index: Some(meta.transaction_index()), - block_hash: Some(meta.block_hash()), - block_number: Some(meta.block_number()), - from: sender, + transaction_hash: cr.tx_hash, + transaction_index: Some(cr.transaction_index), + block_hash: Some(cr.block_hash), + block_number: Some(cr.block_number), + from: cr.from, to, - gas_used, + gas_used: cr.gas_used, contract_address, effective_gas_price: egp as u128, blob_gas_price: None, blob_gas_used: None, - }) + } } /// Wrap a receipt in the appropriate [`ReceiptEnvelope`] variant. diff --git a/crates/rpc-storage/src/signet/endpoints.rs b/crates/rpc-storage/src/signet/endpoints.rs index c1c3f26..d537980 100644 --- a/crates/rpc-storage/src/signet/endpoints.rs +++ b/crates/rpc-storage/src/signet/endpoints.rs @@ -61,15 +61,18 @@ where let cold = ctx.cold(); let block_num = response_tri!(ctx.resolve_block_id(block_id).await); - let mut header = + let sealed_header = response_tri!(cold.get_header_by_number(block_num).await.map_err(|e| e.to_string())); - let header = - response_tri!(header.as_mut().ok_or_else(|| format!("block not found: {block_id}"))); + let sealed_header = + response_tri!(sealed_header.ok_or_else(|| format!("block not found: {block_id}"))); + + let parent_hash = sealed_header.hash(); + let mut header = sealed_header.into_inner(); // For pending blocks, synthesize the next-block header. if pending { - header.parent_hash = header.hash_slow(); + header.parent_hash = parent_hash; header.number += 1; header.timestamp += 12; header.base_fee_per_gas = header.next_block_base_fee(BaseFeeParams::ethereum()); @@ -83,7 +86,7 @@ where let trevm = signet_evm::signet_evm(db, ctx.constants().clone()) .fill_cfg(&CfgFiller(ctx.chain_id())) - .fill_block(header); + .fill_block(&header); response_tri!(trevm.drive_bundle(&mut driver).map_err(|e| e.into_error())); diff --git a/crates/rpc-storage/tests/eth_rpc.rs b/crates/rpc-storage/tests/eth_rpc.rs index e6d5655..2a9a15d 100644 --- a/crates/rpc-storage/tests/eth_rpc.rs +++ b/crates/rpc-storage/tests/eth_rpc.rs @@ -6,7 +6,7 @@ use alloy::{ consensus::{ EthereumTxEnvelope, Header, Receipt as AlloyReceipt, SignableTransaction, Signed, TxLegacy, - TxType, + TxType, transaction::Recovered, }, primitives::{Address, B256, Log as PrimitiveLog, LogData, TxKind, U256, address, logs_bloom}, }; @@ -18,7 +18,7 @@ use signet_constants::SignetSystemConstants; use signet_hot::{HotKv, db::UnsafeDbWrite, mem::MemKv}; use signet_rpc_storage::{BlockTags, NewBlockNotification, StorageRpcConfig, StorageRpcCtx}; use signet_storage::UnifiedStorage; -use signet_storage_types::Receipt; +use signet_storage_types::{Receipt, SealedHeader}; use tokio::sync::broadcast; use tokio_util::sync::CancellationToken; use tower::ServiceExt; @@ -124,9 +124,9 @@ const LOG_TOPIC: B256 = B256::repeat_byte(0xcc); /// Create a legacy transaction signed with a deterministic key. /// -/// Uses alloy's signer to produce a valid ECDSA signature so that -/// `recover_sender` succeeds during RPC response building. -fn make_signed_tx(nonce: u64) -> (signet_storage_types::TransactionSigned, Address) { +/// Returns a [`RecoveredTx`] with the sender pre-recovered, plus the sender +/// address for use in test assertions. +fn make_signed_tx(nonce: u64) -> (signet_storage_types::RecoveredTx, Address) { make_signed_tx_with_gas_price(nonce, 1_000_000_000) } @@ -134,7 +134,7 @@ fn make_signed_tx(nonce: u64) -> (signet_storage_types::TransactionSigned, Addre fn make_signed_tx_with_gas_price( nonce: u64, gas_price: u128, -) -> (signet_storage_types::TransactionSigned, Address) { +) -> (signet_storage_types::RecoveredTx, Address) { use alloy::signers::{SignerSync, local::PrivateKeySigner}; let signer = PrivateKeySigner::from_signing_key( @@ -159,7 +159,7 @@ fn make_signed_tx_with_gas_price( let signed: signet_storage_types::TransactionSigned = EthereumTxEnvelope::Legacy(Signed::new_unhashed(tx, sig)); - (signed, sender) + (Recovered::new_unchecked(signed, sender), sender) } /// Build a [`BlockData`] from pre-signed transactions. @@ -168,7 +168,7 @@ fn make_signed_tx_with_gas_price( /// attaches logs to each receipt. fn make_block( block_num: u64, - txs: Vec, + txs: Vec, logs_per_receipt: usize, ) -> BlockData { let receipts: Vec = txs @@ -212,7 +212,7 @@ fn make_block( ..Default::default() }; - BlockData::new(header, txs, receipts, vec![], None) + BlockData::new(SealedHeader::new(header), txs, receipts, vec![], None) } // --------------------------------------------------------------------------- From d6114f8dfc6030ce82b14186a4428bd46d35d8ab Mon Sep 17 00:00:00 2001 From: James Date: Sat, 14 Feb 2026 08:59:16 -0500 Subject: [PATCH 12/31] chore: bump signet-storage crates to 0.4.0 Co-Authored-By: Claude Opus 4.6 --- Cargo.toml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index aea7d27..f4fb977 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,10 +56,10 @@ signet-tx-cache = "0.16.0-rc.8" signet-types = "0.16.0-rc.8" signet-zenith = "0.16.0-rc.8" signet-journal = "0.16.0-rc.8" -signet-storage = "0.3.0" -signet-cold = "0.3.0" -signet-hot = "0.3.0" -signet-storage-types = "0.3.0" +signet-storage = "0.4.0" +signet-cold = "0.4.0" +signet-hot = "0.4.0" +signet-storage-types = "0.4.0" # ajj ajj = { version = "0.3.4" } From 2963621d6004caf815a56d1f9517a2be740176a4 Mon Sep 17 00:00:00 2001 From: James Date: Sat, 14 Feb 2026 09:54:53 -0500 Subject: [PATCH 13/31] refactor: use hot storage for block resolution and header lookups Addresses PR review feedback: - resolve_block_id now uses hot HotDbRead::get_header_number instead of cold storage, making it synchronous and avoiding async overhead - Add resolve_header for direct header fetches from hot storage, eliminating the double header lookup in header_by - Change not_supported() to return method_not_found() (JSON-RPC -32601) instead of internal_error (-32603) - Update ResolveError to use Storage/Db variants instead of Cold - Update tests to write headers to both hot and cold storage Co-Authored-By: Claude Opus 4.6 --- crates/rpc-storage/src/ctx.rs | 62 ++++++++++++++-------- crates/rpc-storage/src/debug/endpoints.rs | 4 +- crates/rpc-storage/src/eth/endpoints.rs | 35 ++++++------ crates/rpc-storage/src/resolve.rs | 8 ++- crates/rpc-storage/src/signet/endpoints.rs | 2 +- crates/rpc-storage/tests/eth_rpc.rs | 52 +++++++++++------- 6 files changed, 94 insertions(+), 69 deletions(-) diff --git a/crates/rpc-storage/src/ctx.rs b/crates/rpc-storage/src/ctx.rs index 5c5495d..825cb21 100644 --- a/crates/rpc-storage/src/ctx.rs +++ b/crates/rpc-storage/src/ctx.rs @@ -8,6 +8,7 @@ use crate::{ use alloy::eips::{BlockId, BlockNumberOrTag}; use signet_cold::ColdStorageReadHandle; use signet_hot::HotKv; +use signet_hot::db::HotDbRead; use signet_hot::model::{HotKvRead, RevmRead}; use signet_storage::UnifiedStorage; use signet_tx_cache::TxCache; @@ -177,17 +178,43 @@ impl StorageRpcCtx { /// /// For tag/number-based IDs, resolves synchronously via /// [`resolve_block_tag`](Self::resolve_block_tag). For hash-based IDs, - /// fetches the header from cold storage to obtain the block number. - pub(crate) async fn resolve_block_id(&self, id: BlockId) -> Result { + /// looks up the block number from hot storage's `HeaderNumbers` table. + pub(crate) fn resolve_block_id(&self, id: BlockId) -> Result + where + ::Error: std::error::Error + Send + Sync + 'static, + { match id { BlockId::Number(tag) => Ok(self.resolve_block_tag(tag)), BlockId::Hash(h) => { - let header = self - .cold() - .get_header_by_hash(h.block_hash) - .await? - .ok_or(ResolveError::HashNotFound(h.block_hash))?; - Ok(header.number) + let reader = self.hot_reader()?; + reader + .get_header_number(&h.block_hash) + .map_err(|e| ResolveError::Db(Box::new(e)))? + .ok_or(ResolveError::HashNotFound(h.block_hash)) + } + } + } + + /// Resolve a [`BlockId`] to a header from hot storage. + /// + /// For hash-based IDs, fetches the header directly by hash. For + /// tag/number-based IDs, resolves the tag then fetches the header by + /// number. Returns `None` if the header is not found. + pub(crate) fn resolve_header( + &self, + id: BlockId, + ) -> Result, ResolveError> + where + ::Error: std::error::Error + Send + Sync + 'static, + { + let reader = self.hot_reader()?; + match id { + BlockId::Hash(h) => { + reader.header_by_hash(&h.block_hash).map_err(|e| ResolveError::Db(Box::new(e))) + } + BlockId::Number(tag) => { + let height = self.resolve_block_tag(tag); + reader.get_header(height).map_err(|e| ResolveError::Db(Box::new(e))) } } } @@ -209,27 +236,16 @@ impl StorageRpcCtx { /// Resolve a [`BlockId`] to a header and revm database in one pass. /// - /// For hash-based IDs, fetches the header directly by hash. For - /// tag/number-based IDs, resolves the tag then fetches the header by - /// number. This avoids a redundant header lookup that would occur if - /// resolving to a block number first. - pub(crate) async fn resolve_evm_block( + /// Fetches the header from hot storage and creates a revm-compatible + /// database snapshot at the resolved block height. + pub(crate) fn resolve_evm_block( &self, id: BlockId, ) -> Result>, EthError> where ::Error: DBErrorMarker, { - let cold = self.cold(); - let header = match id { - BlockId::Hash(h) => cold.get_header_by_hash(h.block_hash).await?, - BlockId::Number(tag) => { - let height = self.resolve_block_tag(tag); - cold.get_header_by_number(height).await? - } - } - .ok_or(EthError::BlockNotFound(id))?; - + let header = self.resolve_header(id)?.ok_or(EthError::BlockNotFound(id))?; let db = self.revm_state_at_height(header.number)?; Ok(EvmBlockContext { header: header.into_inner(), db }) } diff --git a/crates/rpc-storage/src/debug/endpoints.rs b/crates/rpc-storage/src/debug/endpoints.rs index dc0656b..7dca346 100644 --- a/crates/rpc-storage/src/debug/endpoints.rs +++ b/crates/rpc-storage/src/debug/endpoints.rs @@ -49,9 +49,7 @@ where let fut = async move { let cold = ctx.cold(); let block_num = response_tri!( - ctx.resolve_block_id(id) - .await - .map_err(|e| { DebugError::BlockNotFound(e.to_string()) }) + ctx.resolve_block_id(id).map_err(|e| { DebugError::BlockNotFound(e.to_string()) }) ); let (header, txs) = response_tri!( diff --git a/crates/rpc-storage/src/eth/endpoints.rs b/crates/rpc-storage/src/eth/endpoints.rs index 8f1b8f0..412a47a 100644 --- a/crates/rpc-storage/src/eth/endpoints.rs +++ b/crates/rpc-storage/src/eth/endpoints.rs @@ -24,7 +24,6 @@ use alloy::{ use signet_cold::{HeaderSpecifier, ReceiptSpecifier}; use signet_hot::model::HotKvRead; use signet_hot::{HistoryRead, HotKv, db::HotDbRead}; -use std::borrow::Cow; use tracing::{Instrument, debug, trace_span}; use trevm::{EstimationResult, revm::database::DBErrorMarker}; @@ -35,7 +34,7 @@ use super::error::CallErrorData; // --------------------------------------------------------------------------- pub(crate) async fn not_supported() -> ResponsePayload<(), ()> { - ResponsePayload::internal_error_message(Cow::Borrowed("Method not supported.")) + ResponsePayload::method_not_found() } // --------------------------------------------------------------------------- @@ -274,7 +273,7 @@ where let task = async move { let cold = ctx.cold(); - let block_num = ctx.resolve_block_id(id).await.map_err(|e| e.to_string())?; + let block_num = ctx.resolve_block_id(id).map_err(|e| e.to_string())?; let (header, txs) = tokio::try_join!( cold.get_header_by_number(block_num), @@ -336,7 +335,7 @@ where let task = async move { let cold = ctx.cold(); - let block_num = ctx.resolve_block_id(id).await.map_err(|e| e.to_string())?; + let block_num = ctx.resolve_block_id(id).map_err(|e| e.to_string())?; cold.get_transaction_count(block_num) .await @@ -358,7 +357,7 @@ where { let task = async move { let cold = ctx.cold(); - let block_num = ctx.resolve_block_id(id).await.map_err(|e| e.to_string())?; + let block_num = ctx.resolve_block_id(id).map_err(|e| e.to_string())?; let (header, txs, receipts) = tokio::try_join!( cold.get_header_by_number(block_num), @@ -395,13 +394,9 @@ where let id = t.into(); let task = async move { - let cold = ctx.cold(); - let block_num = ctx.resolve_block_id(id).await.map_err(|e| e.to_string())?; - - cold.get_header_by_number(block_num) - .await - .map(|h| { - h.map(|sh| { + ctx.resolve_header(id) + .map(|opt| { + opt.map(|sh| { let hash = sh.hash(); alloy::rpc::types::Header { inner: sh.into_inner(), @@ -483,7 +478,7 @@ where let task = async move { let cold = ctx.cold(); - let block_num = ctx.resolve_block_id(id).await.map_err(|e| e.to_string())?; + let block_num = ctx.resolve_block_id(id).map_err(|e| e.to_string())?; let Some(confirmed) = cold .get_tx_by_block_and_index(block_num, index.to::()) @@ -518,7 +513,7 @@ where let task = async move { let cold = ctx.cold(); - let block_num = ctx.resolve_block_id(id).await.map_err(|e| e.to_string())?; + let block_num = ctx.resolve_block_id(id).map_err(|e| e.to_string())?; cold.get_tx_by_block_and_index(block_num, index.to::()) .await @@ -578,7 +573,7 @@ where let block = block.unwrap_or(BlockId::latest()); let task = async move { - let height = ctx.resolve_block_id(block).await.map_err(|e| e.to_string())?; + let height = ctx.resolve_block_id(block).map_err(|e| e.to_string())?; let reader = ctx.hot_reader().map_err(|e| e.to_string())?; let acct = @@ -602,7 +597,7 @@ where let block = block.unwrap_or(BlockId::latest()); let task = async move { - let height = ctx.resolve_block_id(block).await.map_err(|e| e.to_string())?; + let height = ctx.resolve_block_id(block).map_err(|e| e.to_string())?; let reader = ctx.hot_reader().map_err(|e| e.to_string())?; let val = reader @@ -627,7 +622,7 @@ where let block = block.unwrap_or(BlockId::latest()); let task = async move { - let height = ctx.resolve_block_id(block).await.map_err(|e| e.to_string())?; + let height = ctx.resolve_block_id(block).map_err(|e| e.to_string())?; let reader = ctx.hot_reader().map_err(|e| e.to_string())?; let acct = @@ -651,7 +646,7 @@ where let block = block.unwrap_or(BlockId::latest()); let task = async move { - let height = ctx.resolve_block_id(block).await.map_err(|e| e.to_string())?; + let height = ctx.resolve_block_id(block).map_err(|e| e.to_string())?; let reader = ctx.hot_reader().map_err(|e| e.to_string())?; let acct = @@ -693,7 +688,7 @@ where let span = trace_span!("eth_call", block_id = %id); let task = async move { - let EvmBlockContext { header, db } = response_tri!(ctx.resolve_evm_block(id).await); + let EvmBlockContext { header, db } = response_tri!(ctx.resolve_evm_block(id)); let trevm = signet_evm::signet_evm(db, ctx.constants().clone()) .fill_cfg(&CfgFiller(ctx.chain_id())) @@ -750,7 +745,7 @@ where let span = trace_span!("eth_estimateGas", block_id = %id); let task = async move { - let EvmBlockContext { header, db } = response_tri!(ctx.resolve_evm_block(id).await); + let EvmBlockContext { header, db } = response_tri!(ctx.resolve_evm_block(id)); let trevm = signet_evm::signet_evm(db, ctx.constants().clone()) .fill_cfg(&CfgFiller(ctx.chain_id())) diff --git a/crates/rpc-storage/src/resolve.rs b/crates/rpc-storage/src/resolve.rs index f5f6ebe..68f9b49 100644 --- a/crates/rpc-storage/src/resolve.rs +++ b/crates/rpc-storage/src/resolve.rs @@ -5,6 +5,7 @@ //! updating these as the chain progresses. use alloy::primitives::B256; +use signet_storage::StorageError; use std::sync::{ Arc, atomic::{AtomicU64, Ordering}, @@ -77,9 +78,12 @@ impl BlockTags { /// Error resolving a block identifier. #[derive(Debug, thiserror::Error)] pub enum ResolveError { - /// Cold storage error. + /// Storage error (e.g. failed to open a read transaction). #[error(transparent)] - Cold(#[from] signet_cold::ColdStorageError), + Storage(#[from] StorageError), + /// Database read error. + #[error("{0}")] + Db(Box), /// Block hash not found. #[error("block hash not found: {0}")] HashNotFound(B256), diff --git a/crates/rpc-storage/src/signet/endpoints.rs b/crates/rpc-storage/src/signet/endpoints.rs index d537980..7986d52 100644 --- a/crates/rpc-storage/src/signet/endpoints.rs +++ b/crates/rpc-storage/src/signet/endpoints.rs @@ -59,7 +59,7 @@ where } let cold = ctx.cold(); - let block_num = response_tri!(ctx.resolve_block_id(block_id).await); + let block_num = response_tri!(ctx.resolve_block_id(block_id)); let sealed_header = response_tri!(cold.get_header_by_number(block_num).await.map_err(|e| e.to_string())); diff --git a/crates/rpc-storage/tests/eth_rpc.rs b/crates/rpc-storage/tests/eth_rpc.rs index 2a9a15d..ae4ff00 100644 --- a/crates/rpc-storage/tests/eth_rpc.rs +++ b/crates/rpc-storage/tests/eth_rpc.rs @@ -62,6 +62,17 @@ impl TestHarness { Self { app, cold, hot, tags, notif_tx, ctx, _cancel: cancel } } + /// Append a block to both hot and cold storage. + /// + /// Writes the header to hot so hash→number and header lookups work, + /// then writes the full block to cold. + async fn append_block(&self, block: BlockData) { + let writer = self.hot.writer().unwrap(); + writer.put_header(&block.header).unwrap(); + writer.commit().unwrap(); + self.cold.append_block(block).await.unwrap(); + } + /// Build an axum router for the debug namespace. fn debug_app(&self) -> axum::Router { signet_rpc_storage::debug::().into_axum("/").with_state(self.ctx.clone()) @@ -238,7 +249,8 @@ async fn test_chain_id() { // Group 2: Cold storage — block queries // --------------------------------------------------------------------------- -/// Shared setup: append a block with 2 signed transactions to cold storage. +/// Shared setup: append a block with 2 signed transactions to both hot and +/// cold storage. async fn setup_cold_block(h: &TestHarness) -> (Vec, Vec
) { let (tx0, sender0) = make_signed_tx(0); let (tx1, sender1) = make_signed_tx(1); @@ -247,7 +259,7 @@ async fn setup_cold_block(h: &TestHarness) -> (Vec, Vec
) { let hash1 = *tx1.tx_hash(); let block = make_block(1, vec![tx0, tx1], 1); - h.cold.append_block(block).await.unwrap(); + h.append_block(block).await; h.tags.set_latest(1); (vec![hash0, hash1], vec![sender0, sender1]) @@ -441,7 +453,7 @@ async fn test_get_balance() { // Append a dummy block so tag resolution succeeds let block = make_block(1, vec![], 0); - h.cold.append_block(block).await.unwrap(); + h.append_block(block).await; let result = rpc_call(&h.app, "getBalance", json!([format!("{:?}", TEST_ADDR), "latest"])).await; @@ -456,7 +468,7 @@ async fn test_get_transaction_count() { setup_hot_account(&h.hot); let block = make_block(1, vec![], 0); - h.cold.append_block(block).await.unwrap(); + h.append_block(block).await; let result = rpc_call(&h.app, "getTransactionCount", json!([format!("{:?}", TEST_ADDR), "latest"])) @@ -471,7 +483,7 @@ async fn test_get_storage_at() { setup_hot_account(&h.hot); let block = make_block(1, vec![], 0); - h.cold.append_block(block).await.unwrap(); + h.append_block(block).await; let slot = format!("{:#066x}", 42u64); let result = @@ -488,7 +500,7 @@ async fn test_get_code() { setup_hot_account(&h.hot); let block = make_block(1, vec![], 0); - h.cold.append_block(block).await.unwrap(); + h.append_block(block).await; let result = rpc_call(&h.app, "getCode", json!([format!("{:?}", TEST_ADDR), "latest"])).await; @@ -500,7 +512,7 @@ async fn test_get_balance_unknown_account() { let h = TestHarness::new(1).await; let block = make_block(1, vec![], 0); - h.cold.append_block(block).await.unwrap(); + h.append_block(block).await; let unknown = Address::repeat_byte(0xff); let result = rpc_call(&h.app, "getBalance", json!([format!("{:?}", unknown), "latest"])).await; @@ -519,7 +531,7 @@ async fn test_get_logs_by_block_hash() { // Create block with transactions that have logs let (tx0, _) = make_signed_tx(0); let block = make_block(1, vec![tx0], 2); // 2 logs per receipt - h.cold.append_block(block).await.unwrap(); + h.append_block(block).await; h.tags.set_latest(1); // Get the block hash @@ -550,7 +562,7 @@ async fn test_get_logs_by_range() { let (tx0, _) = make_signed_tx(0); let block = make_block(1, vec![tx0], 1); - h.cold.append_block(block).await.unwrap(); + h.append_block(block).await; h.tags.set_latest(1); let result = rpc_call( @@ -575,7 +587,7 @@ async fn test_get_logs_empty() { let (tx0, _) = make_signed_tx(0); let block = make_block(1, vec![tx0], 0); // no logs - h.cold.append_block(block).await.unwrap(); + h.append_block(block).await; h.tags.set_latest(1); let result = rpc_call( @@ -602,7 +614,7 @@ async fn test_not_supported() { let resp = rpc_call_raw(&h.app, "syncing", json!([])).await; assert!(resp.get("error").is_some()); let msg = resp["error"]["message"].as_str().unwrap(); - assert!(msg.contains("not supported"), "unexpected error: {msg}"); + assert!(msg.contains("not found"), "unexpected error: {msg}"); } #[tokio::test] @@ -623,7 +635,7 @@ async fn test_gas_price() { // Create a block with txs that have gas_price (2 gwei) > base_fee (1 gwei) let (tx0, _) = make_signed_tx_with_gas_price(0, 2_000_000_000); let block = make_block(1, vec![tx0], 0); - h.cold.append_block(block).await.unwrap(); + h.append_block(block).await; h.tags.set_latest(1); let result = rpc_call(&h.app, "gasPrice", json!([])).await; @@ -639,7 +651,7 @@ async fn test_max_priority_fee_per_gas() { let (tx0, _) = make_signed_tx_with_gas_price(0, 2_000_000_000); let block = make_block(1, vec![tx0], 0); - h.cold.append_block(block).await.unwrap(); + h.append_block(block).await; h.tags.set_latest(1); let result = rpc_call(&h.app, "maxPriorityFeePerGas", json!([])).await; @@ -653,7 +665,7 @@ async fn test_gas_price_empty_blocks() { let h = TestHarness::new(0).await; let block = make_block(1, vec![], 0); - h.cold.append_block(block).await.unwrap(); + h.append_block(block).await; h.tags.set_latest(1); let result = rpc_call(&h.app, "gasPrice", json!([])).await; @@ -669,7 +681,7 @@ async fn test_fee_history_basic() { for i in 1u64..=3 { let (tx, _) = make_signed_tx_with_gas_price(i - 1, 2_000_000_000); let block = make_block(i, vec![tx], 0); - h.cold.append_block(block).await.unwrap(); + h.append_block(block).await; } h.tags.set_latest(3); @@ -694,7 +706,7 @@ async fn test_fee_history_with_rewards() { let (tx0, _) = make_signed_tx_with_gas_price(0, 2_000_000_000); let block = make_block(1, vec![tx0], 0); - h.cold.append_block(block).await.unwrap(); + h.append_block(block).await; h.tags.set_latest(1); let result = rpc_call(&h.app, "feeHistory", json!(["0x1", "0x1", [25.0, 75.0]])).await; @@ -721,7 +733,7 @@ async fn test_new_block_filter_and_changes() { // Append a block let (tx0, _) = make_signed_tx(0); let block = make_block(1, vec![tx0], 0); - h.cold.append_block(block).await.unwrap(); + h.append_block(block).await; h.tags.set_latest(1); // Poll for changes — should get block hash for block 1 @@ -755,7 +767,7 @@ async fn test_new_log_filter_and_changes() { // Append a block with matching logs let (tx0, _) = make_signed_tx(0); let block = make_block(1, vec![tx0], 2); - h.cold.append_block(block).await.unwrap(); + h.append_block(block).await; h.tags.set_latest(1); // Poll for changes — should get matching logs @@ -810,7 +822,7 @@ async fn test_trace_block_by_number_noop() { setup_hot_for_evm(&h.hot, sender, U256::from(1_000_000_000_000_000_000u128)); let block = make_block(1, vec![tx0], 0); - h.cold.append_block(block).await.unwrap(); + h.append_block(block).await; h.tags.set_latest(1); let debug_app = h.debug_app(); @@ -830,7 +842,7 @@ async fn test_trace_transaction_noop() { setup_hot_for_evm(&h.hot, sender, U256::from(1_000_000_000_000_000_000u128)); let block = make_block(1, vec![tx0], 0); - h.cold.append_block(block).await.unwrap(); + h.append_block(block).await; h.tags.set_latest(1); let debug_app = h.debug_app(); From 0c08f2f5f9351a2da7693763a355eb2ea61e596a Mon Sep 17 00:00:00 2001 From: James Date: Sat, 14 Feb 2026 10:18:57 -0500 Subject: [PATCH 14/31] refactor: align rpc-storage data shapes with canonical rpc crate Swap return types to use RpcBlock/RpcReceipt/RpcTransaction/RpcHeader type aliases, rename tx_by_block_and_index to match rpc naming, fix not_supported error message, and split call into run_call + call. Co-Authored-By: Claude Opus 4.6 --- crates/rpc-storage/Cargo.toml | 1 + crates/rpc-storage/src/eth/endpoints.rs | 75 ++++++++++++++++--------- crates/rpc-storage/src/eth/helpers.rs | 6 +- crates/rpc-storage/src/eth/mod.rs | 21 ++++--- crates/rpc-storage/tests/eth_rpc.rs | 2 +- 5 files changed, 70 insertions(+), 35 deletions(-) diff --git a/crates/rpc-storage/Cargo.toml b/crates/rpc-storage/Cargo.toml index 4a1fe01..d12a79a 100644 --- a/crates/rpc-storage/Cargo.toml +++ b/crates/rpc-storage/Cargo.toml @@ -27,6 +27,7 @@ tracing.workspace = true thiserror.workspace = true serde.workspace = true dashmap = "6.1.0" +reth-rpc-eth-api.workspace = true revm-inspectors.workspace = true itertools.workspace = true diff --git a/crates/rpc-storage/src/eth/endpoints.rs b/crates/rpc-storage/src/eth/endpoints.rs index 412a47a..6f1071c 100644 --- a/crates/rpc-storage/src/eth/endpoints.rs +++ b/crates/rpc-storage/src/eth/endpoints.rs @@ -18,14 +18,19 @@ use alloy::{ eip1559::BaseFeeParams, eip2718::{Decodable2718, Encodable2718}, }, + network::Ethereum, primitives::{B256, U64, U256}, rpc::types::{Block, BlockTransactions, FeeHistory, Filter, Log}, }; +use reth_rpc_eth_api::{RpcBlock, RpcHeader, RpcReceipt, RpcTransaction}; use signet_cold::{HeaderSpecifier, ReceiptSpecifier}; use signet_hot::model::HotKvRead; use signet_hot::{HistoryRead, HotKv, db::HotDbRead}; +use std::borrow::Cow; use tracing::{Instrument, debug, trace_span}; -use trevm::{EstimationResult, revm::database::DBErrorMarker}; +use trevm::{ + EstimationResult, revm::context::result::ExecutionResult, revm::database::DBErrorMarker, +}; use super::error::CallErrorData; @@ -34,7 +39,9 @@ use super::error::CallErrorData; // --------------------------------------------------------------------------- pub(crate) async fn not_supported() -> ResponsePayload<(), ()> { - ResponsePayload::method_not_found() + ResponsePayload::internal_error_message(Cow::Borrowed( + "Method not supported. See signet documentation for a list of unsupported methods: https://signet.sh/docs", + )) } // --------------------------------------------------------------------------- @@ -262,7 +269,7 @@ pub(crate) async fn block( hctx: HandlerCtx, BlockParams(t, full): BlockParams, ctx: StorageRpcCtx, -) -> Result, String> +) -> Result>, String> where T: Into, H: HotKv + Send + Sync + 'static, @@ -350,7 +357,7 @@ pub(crate) async fn block_receipts( hctx: HandlerCtx, (id,): (BlockId,), ctx: StorageRpcCtx, -) -> Result>, String> +) -> Result>>, String> where H: HotKv + Send + Sync + 'static, ::Error: DBErrorMarker, @@ -385,7 +392,7 @@ pub(crate) async fn header_by( hctx: HandlerCtx, (t,): (T,), ctx: StorageRpcCtx, -) -> Result, String> +) -> Result>, String> where T: Into, H: HotKv + Send + Sync + 'static, @@ -420,7 +427,7 @@ pub(crate) async fn transaction_by_hash( hctx: HandlerCtx, (hash,): (B256,), ctx: StorageRpcCtx, -) -> Result, String> +) -> Result>, String> where H: HotKv + Send + Sync + 'static, ::Error: DBErrorMarker, @@ -464,11 +471,11 @@ where await_handler!(@option hctx.spawn_blocking(task)) } -pub(crate) async fn tx_by_block_and_index( +pub(crate) async fn transaction_by_block_and_index( hctx: HandlerCtx, (t, index): (T, U64), ctx: StorageRpcCtx, -) -> Result, String> +) -> Result>, String> where T: Into, H: HotKv + Send + Sync + 'static, @@ -499,7 +506,7 @@ where await_handler!(@option hctx.spawn_blocking(task)) } -pub(crate) async fn raw_tx_by_block_and_index( +pub(crate) async fn raw_transaction_by_block_and_index( hctx: HandlerCtx, (t, index): (T, U64), ctx: StorageRpcCtx, @@ -528,7 +535,7 @@ pub(crate) async fn transaction_receipt( hctx: HandlerCtx, (hash,): (B256,), ctx: StorageRpcCtx, -) -> Result, String> +) -> Result>, String> where H: HotKv + Send + Sync + 'static, ::Error: DBErrorMarker, @@ -672,20 +679,17 @@ where // EVM Execution // --------------------------------------------------------------------------- -pub(crate) async fn call( +pub(crate) async fn run_call( hctx: HandlerCtx, - TxParams(mut request, block, state_overrides, block_overrides): TxParams, + TxParams(request, block, state_overrides, block_overrides): TxParams, ctx: StorageRpcCtx, -) -> ResponsePayload +) -> ResponsePayload where H: HotKv + Send + Sync + 'static, ::Error: DBErrorMarker, { - let max_gas = ctx.config().rpc_gas_cap; - normalize_gas_stateless(&mut request, max_gas); - let id = block.unwrap_or(BlockId::latest()); - let span = trace_span!("eth_call", block_id = %id); + let span = trace_span!("run_call", block_id = %id); let task = async move { let EvmBlockContext { header, db } = response_tri!(ctx.resolve_evm_block(id)); @@ -705,28 +709,49 @@ where } let result = response_tri!(trevm.call().map_err(signet_evm::EvmErrored::into_error)); + ResponsePayload::Success(result.0) + } + .instrument(span); + + await_handler!(@response_option hctx.spawn_blocking(task)) +} + +pub(crate) async fn call( + hctx: HandlerCtx, + mut params: TxParams, + ctx: StorageRpcCtx, +) -> ResponsePayload +where + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let max_gas = ctx.config().rpc_gas_cap; + normalize_gas_stateless(&mut params.0, max_gas); + + await_handler!(@response_option hctx.spawn_with_ctx(|hctx| async move { + let res = match run_call(hctx, params, ctx).await { + ResponsePayload::Success(res) => res, + ResponsePayload::Failure(err) => return ResponsePayload::Failure(err), + }; - match result.0 { - trevm::revm::context::result::ExecutionResult::Success { output, .. } => { + match res { + ExecutionResult::Success { output, .. } => { ResponsePayload::Success(output.data().clone()) } - trevm::revm::context::result::ExecutionResult::Revert { output, .. } => { + ExecutionResult::Revert { output, .. } => { ResponsePayload::internal_error_with_message_and_obj( "execution reverted".into(), output.clone().into(), ) } - trevm::revm::context::result::ExecutionResult::Halt { reason, .. } => { + ExecutionResult::Halt { reason, .. } => { ResponsePayload::internal_error_with_message_and_obj( "execution halted".into(), format!("{reason:?}").into(), ) } } - } - .instrument(span); - - await_handler!(@response_option hctx.spawn_blocking(task)) + })) } pub(crate) async fn estimate_gas( diff --git a/crates/rpc-storage/src/eth/helpers.rs b/crates/rpc-storage/src/eth/helpers.rs index f387fa6..6785d0c 100644 --- a/crates/rpc-storage/src/eth/helpers.rs +++ b/crates/rpc-storage/src/eth/helpers.rs @@ -5,11 +5,13 @@ use alloy::{ ReceiptEnvelope, ReceiptWithBloom, Transaction, TxReceipt, transaction::Recovered, }, eips::BlockId, + network::Ethereum, primitives::{Address, TxKind, U256}, rpc::types::{ BlockOverrides, Log, TransactionReceipt, TransactionRequest, state::StateOverride, }, }; +use reth_rpc_eth_api::{RpcReceipt, RpcTransaction}; use serde::Deserialize; use signet_cold::ColdReceipt; use signet_storage_types::ConfirmationMeta; @@ -117,7 +119,7 @@ pub(crate) fn build_rpc_transaction( tx: signet_storage_types::RecoveredTx, meta: &ConfirmationMeta, base_fee: Option, -) -> alloy::rpc::types::Transaction { +) -> RpcTransaction { let signer = tx.signer(); let tx_envelope: alloy::consensus::TxEnvelope = tx.into_inner().into(); let inner = Recovered::new_unchecked(tx_envelope, signer); @@ -143,7 +145,7 @@ pub(crate) fn build_receipt( cr: ColdReceipt, tx: &signet_storage_types::RecoveredTx, base_fee: Option, -) -> TransactionReceipt> { +) -> RpcReceipt { let logs_bloom = cr.receipt.bloom(); let status = cr.receipt.status; let cumulative_gas_used = cr.receipt.cumulative_gas_used; diff --git a/crates/rpc-storage/src/eth/mod.rs b/crates/rpc-storage/src/eth/mod.rs index 463a9aa..7e1043e 100644 --- a/crates/rpc-storage/src/eth/mod.rs +++ b/crates/rpc-storage/src/eth/mod.rs @@ -4,9 +4,10 @@ mod endpoints; use endpoints::{ addr_tx_count, balance, block, block_number, block_receipts, block_tx_count, call, chain_id, code_at, estimate_gas, fee_history, gas_price, get_filter_changes, get_logs, header_by, - max_priority_fee_per_gas, new_block_filter, new_filter, not_supported, raw_transaction_by_hash, - raw_tx_by_block_and_index, send_raw_transaction, storage_at, subscribe, transaction_by_hash, - transaction_receipt, tx_by_block_and_index, uninstall_filter, unsubscribe, + max_priority_fee_per_gas, new_block_filter, new_filter, not_supported, + raw_transaction_by_block_and_index, raw_transaction_by_hash, send_raw_transaction, storage_at, + subscribe, transaction_by_block_and_index, transaction_by_hash, transaction_receipt, + uninstall_filter, unsubscribe, }; mod error; @@ -36,13 +37,19 @@ where .route("getBlockReceipts", block_receipts::) .route("getRawTransactionByHash", raw_transaction_by_hash::) .route("getTransactionByHash", transaction_by_hash::) - .route("getRawTransactionByBlockHashAndIndex", raw_tx_by_block_and_index::) + .route( + "getRawTransactionByBlockHashAndIndex", + raw_transaction_by_block_and_index::, + ) .route( "getRawTransactionByBlockNumberAndIndex", - raw_tx_by_block_and_index::, + raw_transaction_by_block_and_index::, + ) + .route("getTransactionByBlockHashAndIndex", transaction_by_block_and_index::) + .route( + "getTransactionByBlockNumberAndIndex", + transaction_by_block_and_index::, ) - .route("getTransactionByBlockHashAndIndex", tx_by_block_and_index::) - .route("getTransactionByBlockNumberAndIndex", tx_by_block_and_index::) .route("getTransactionReceipt", transaction_receipt::) .route("getBlockHeaderByHash", header_by::) .route("getBlockHeaderByNumber", header_by::) diff --git a/crates/rpc-storage/tests/eth_rpc.rs b/crates/rpc-storage/tests/eth_rpc.rs index ae4ff00..4273914 100644 --- a/crates/rpc-storage/tests/eth_rpc.rs +++ b/crates/rpc-storage/tests/eth_rpc.rs @@ -614,7 +614,7 @@ async fn test_not_supported() { let resp = rpc_call_raw(&h.app, "syncing", json!([])).await; assert!(resp.get("error").is_some()); let msg = resp["error"]["message"].as_str().unwrap(); - assert!(msg.contains("not found"), "unexpected error: {msg}"); + assert!(msg.contains("not supported"), "unexpected error: {msg}"); } #[tokio::test] From 9ce78407da38994f69a306559ba06adf4e63413b Mon Sep 17 00:00:00 2001 From: James Date: Sat, 14 Feb 2026 10:19:01 -0500 Subject: [PATCH 15/31] refactor: align filter, subscription, and tx submission behavior Align rpc-storage behavioral semantics with the rpc crate: - subscribe: require filter for Logs, reject filter for NewHeads via TryFrom impl - send_raw_transaction: return Result from spawned task instead of swallowing errors - uninstall_filter/unsubscribe: add HandlerCtx and wrap in spawn_blocking Co-Authored-By: Claude Opus 4.6 --- crates/rpc-storage/src/eth/endpoints.rs | 30 +++++++++++-------------- crates/rpc-storage/src/eth/helpers.rs | 25 ++++++++++++++++++++- 2 files changed, 37 insertions(+), 18 deletions(-) diff --git a/crates/rpc-storage/src/eth/endpoints.rs b/crates/rpc-storage/src/eth/endpoints.rs index 6f1071c..cedfc9c 100644 --- a/crates/rpc-storage/src/eth/endpoints.rs +++ b/crates/rpc-storage/src/eth/endpoints.rs @@ -827,9 +827,7 @@ where let hash = *envelope.tx_hash(); hctx.spawn(async move { - if let Err(e) = tx_cache.forward_raw_transaction(envelope).await { - tracing::warn!(%hash, err = %e, "failed to forward raw transaction"); - } + tx_cache.forward_raw_transaction(envelope).await.map_err(|e| e.to_string()) }); Ok(hash) @@ -931,6 +929,7 @@ where } pub(crate) async fn uninstall_filter( + hctx: HandlerCtx, (id,): (U64,), ctx: StorageRpcCtx, ) -> Result @@ -938,7 +937,8 @@ where H: HotKv + Send + Sync + 'static, ::Error: DBErrorMarker, { - Ok(ctx.filter_manager().uninstall(id).is_some()) + let task = async move { Ok(ctx.filter_manager().uninstall(id).is_some()) }; + await_handler!(@option hctx.spawn_blocking(task)) } pub(crate) async fn get_filter_changes( @@ -996,33 +996,29 @@ where pub(crate) async fn subscribe( hctx: HandlerCtx, - SubscribeArgs(kind, filter): SubscribeArgs, + sub: SubscribeArgs, ctx: StorageRpcCtx, ) -> Result where H: HotKv + Send + Sync + 'static, ::Error: DBErrorMarker, { - let interest = match kind { - alloy::rpc::types::pubsub::SubscriptionKind::NewHeads => InterestKind::Block, - alloy::rpc::types::pubsub::SubscriptionKind::Logs => { - let f = filter.unwrap_or_default(); - InterestKind::Log(f) - } - other => { - return Err(format!("unsupported subscription kind: {other:?}")); - } - }; + let interest: InterestKind = sub.try_into()?; ctx.sub_manager() .subscribe(&hctx, interest) .ok_or_else(|| "notifications not enabled on this transport".to_string()) } -pub(crate) async fn unsubscribe((id,): (U64,), ctx: StorageRpcCtx) -> Result +pub(crate) async fn unsubscribe( + hctx: HandlerCtx, + (id,): (U64,), + ctx: StorageRpcCtx, +) -> Result where H: HotKv + Send + Sync + 'static, ::Error: DBErrorMarker, { - Ok(ctx.sub_manager().unsubscribe(id)) + let task = async move { Ok(ctx.sub_manager().unsubscribe(id)) }; + await_handler!(@option hctx.spawn_blocking(task)) } diff --git a/crates/rpc-storage/src/eth/helpers.rs b/crates/rpc-storage/src/eth/helpers.rs index 6785d0c..a900d7f 100644 --- a/crates/rpc-storage/src/eth/helpers.rs +++ b/crates/rpc-storage/src/eth/helpers.rs @@ -1,5 +1,6 @@ //! Parameter types, macros, and utility helpers for ETH RPC endpoints. +use crate::interest::InterestKind; use alloy::{ consensus::{ ReceiptEnvelope, ReceiptWithBloom, Transaction, TxReceipt, transaction::Recovered, @@ -8,7 +9,8 @@ use alloy::{ network::Ethereum, primitives::{Address, TxKind, U256}, rpc::types::{ - BlockOverrides, Log, TransactionReceipt, TransactionRequest, state::StateOverride, + BlockOverrides, Log, TransactionReceipt, TransactionRequest, pubsub::SubscriptionKind, + state::StateOverride, }, }; use reth_rpc_eth_api::{RpcReceipt, RpcTransaction}; @@ -53,6 +55,27 @@ pub(crate) struct SubscribeArgs( #[serde(default)] pub Option>, ); +impl TryFrom for InterestKind { + type Error = String; + + fn try_from(args: SubscribeArgs) -> Result { + match args.0 { + SubscriptionKind::Logs => args + .1 + .map(InterestKind::Log) + .ok_or_else(|| "missing filter for Logs subscription".to_string()), + SubscriptionKind::NewHeads => { + if args.1.is_some() { + Err("filter not supported for NewHeads subscription".to_string()) + } else { + Ok(InterestKind::Block) + } + } + other => Err(format!("unsupported subscription kind: {other:?}")), + } + } +} + /// Normalize transaction request gas without making DB reads. /// /// - If the gas is below `MIN_TRANSACTION_GAS`, set it to `None` From 40f573415b8e2c2f3ca00a9dd38b96a4b803cde4 Mon Sep 17 00:00:00 2001 From: James Date: Sat, 14 Feb 2026 11:54:38 -0500 Subject: [PATCH 16/31] fix: synthesize pending header in resolve_evm_block and bump storage to 0.5 resolve_evm_block previously mapped Pending to Latest without modifying the header, causing eth_estimateGas (which defaults to Pending) and eth_call with explicit Pending to see wrong block.number, timestamp, and base_fee. Now synthesizes a next-block header matching signet-rpc's block_cfg() behavior. Also refactors callBundle to use resolve_evm_block instead of duplicating the pending header logic inline, and passes max_logs to cold.get_logs() for early termination (signet-cold 0.5 API). Co-Authored-By: Claude Opus 4.6 --- Cargo.toml | 8 ++--- crates/rpc-storage/src/ctx.rs | 27 +++++++++++++++-- crates/rpc-storage/src/eth/endpoints.rs | 9 ++---- crates/rpc-storage/src/signet/endpoints.rs | 35 +++------------------- 4 files changed, 35 insertions(+), 44 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index f4fb977..65d8e44 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,10 +56,10 @@ signet-tx-cache = "0.16.0-rc.8" signet-types = "0.16.0-rc.8" signet-zenith = "0.16.0-rc.8" signet-journal = "0.16.0-rc.8" -signet-storage = "0.4.0" -signet-cold = "0.4.0" -signet-hot = "0.4.0" -signet-storage-types = "0.4.0" +signet-storage = "0.5.0" +signet-cold = "0.5.0" +signet-hot = "0.5.0" +signet-storage-types = "0.5.0" # ajj ajj = { version = "0.3.4" } diff --git a/crates/rpc-storage/src/ctx.rs b/crates/rpc-storage/src/ctx.rs index 825cb21..6450048 100644 --- a/crates/rpc-storage/src/ctx.rs +++ b/crates/rpc-storage/src/ctx.rs @@ -238,6 +238,11 @@ impl StorageRpcCtx { /// /// Fetches the header from hot storage and creates a revm-compatible /// database snapshot at the resolved block height. + /// + /// For `Pending` block IDs, remaps to `Latest` and synthesizes a + /// next-block header (incremented number, timestamp +12s, projected + /// base fee, gas limit from config). State is loaded at the latest + /// finalized block in both cases. pub(crate) fn resolve_evm_block( &self, id: BlockId, @@ -245,8 +250,24 @@ impl StorageRpcCtx { where ::Error: DBErrorMarker, { - let header = self.resolve_header(id)?.ok_or(EthError::BlockNotFound(id))?; - let db = self.revm_state_at_height(header.number)?; - Ok(EvmBlockContext { header: header.into_inner(), db }) + let pending = id.is_pending(); + let id = if pending { BlockId::latest() } else { id }; + + let sealed = self.resolve_header(id)?.ok_or(EthError::BlockNotFound(id))?; + let db = self.revm_state_at_height(sealed.number)?; + + let parent_hash = sealed.hash(); + let mut header = sealed.into_inner(); + + if pending { + header.parent_hash = parent_hash; + header.number += 1; + header.timestamp += 12; + header.base_fee_per_gas = + header.next_block_base_fee(alloy::eips::eip1559::BaseFeeParams::ethereum()); + header.gas_limit = self.config().rpc_gas_cap; + } + + Ok(EvmBlockContext { header, db }) } } diff --git a/crates/rpc-storage/src/eth/endpoints.rs b/crates/rpc-storage/src/eth/endpoints.rs index cedfc9c..9824fd1 100644 --- a/crates/rpc-storage/src/eth/endpoints.rs +++ b/crates/rpc-storage/src/eth/endpoints.rs @@ -878,12 +878,8 @@ where } }; - let logs = cold.get_logs(resolved_filter).await.map_err(|e| e.to_string())?; - let max_logs = ctx.config().max_logs_per_response; - if max_logs > 0 && logs.len() > max_logs { - return Err(format!("query exceeds max logs per response ({max_logs})")); - } + let logs = cold.get_logs(resolved_filter, max_logs).await.map_err(|e| e.to_string())?; Ok(logs) }; @@ -980,7 +976,8 @@ where ..stored }; - let logs = cold.get_logs(resolved).await.map_err(|e| e.to_string())?; + let max_logs = ctx.config().max_logs_per_response; + let logs = cold.get_logs(resolved, max_logs).await.map_err(|e| e.to_string())?; entry.mark_polled(latest); Ok(FilterOutput::from(logs)) diff --git a/crates/rpc-storage/src/signet/endpoints.rs b/crates/rpc-storage/src/signet/endpoints.rs index 7986d52..4f8e3ce 100644 --- a/crates/rpc-storage/src/signet/endpoints.rs +++ b/crates/rpc-storage/src/signet/endpoints.rs @@ -1,12 +1,12 @@ //! Signet namespace RPC endpoint implementations. use crate::{ - ctx::StorageRpcCtx, + ctx::{EvmBlockContext, StorageRpcCtx}, eth::helpers::{CfgFiller, await_handler, response_tri}, signet::error::SignetError, }; use ajj::{HandlerCtx, ResponsePayload}; -use alloy::eips::{BlockId, eip1559::BaseFeeParams}; +use alloy::eips::BlockId; use signet_bundle::{SignetBundleDriver, SignetCallBundle, SignetCallBundleResponse}; use signet_hot::HotKv; use signet_hot::model::HotKvRead; @@ -51,36 +51,9 @@ where let task = async move { let id = bundle.state_block_number(); - let mut block_id: BlockId = id.into(); + let block_id: BlockId = id.into(); - let pending = block_id.is_pending(); - if pending { - block_id = BlockId::latest(); - } - - let cold = ctx.cold(); - let block_num = response_tri!(ctx.resolve_block_id(block_id)); - - let sealed_header = - response_tri!(cold.get_header_by_number(block_num).await.map_err(|e| e.to_string())); - - let sealed_header = - response_tri!(sealed_header.ok_or_else(|| format!("block not found: {block_id}"))); - - let parent_hash = sealed_header.hash(); - let mut header = sealed_header.into_inner(); - - // For pending blocks, synthesize the next-block header. - if pending { - header.parent_hash = parent_hash; - header.number += 1; - header.timestamp += 12; - header.base_fee_per_gas = header.next_block_base_fee(BaseFeeParams::ethereum()); - header.gas_limit = ctx.config().rpc_gas_cap; - } - - // State at the resolved block number (before any pending header mutation). - let db = response_tri!(ctx.revm_state_at_height(block_num).map_err(|e| e.to_string())); + let EvmBlockContext { header, db } = response_tri!(ctx.resolve_evm_block(block_id)); let mut driver = SignetBundleDriver::from(&bundle); From d90d2b78d3648c59d32c66a08477d2c8b6c78a39 Mon Sep 17 00:00:00 2001 From: James Date: Sat, 14 Feb 2026 13:03:40 -0500 Subject: [PATCH 17/31] feat: add uncle and access lists --- crates/rpc-storage/src/eth/endpoints.rs | 59 +++++++++++++++++++++++++ crates/rpc-storage/src/eth/mod.rs | 16 +++---- crates/rpc/src/eth/endpoints.rs | 10 +++++ crates/rpc/src/eth/mod.rs | 8 ++-- 4 files changed, 81 insertions(+), 12 deletions(-) diff --git a/crates/rpc-storage/src/eth/endpoints.rs b/crates/rpc-storage/src/eth/endpoints.rs index 9824fd1..d945759 100644 --- a/crates/rpc-storage/src/eth/endpoints.rs +++ b/crates/rpc-storage/src/eth/endpoints.rs @@ -17,12 +17,14 @@ use alloy::{ BlockId, BlockNumberOrTag, eip1559::BaseFeeParams, eip2718::{Decodable2718, Encodable2718}, + eip2930::AccessListResult, }, network::Ethereum, primitives::{B256, U64, U256}, rpc::types::{Block, BlockTransactions, FeeHistory, Filter, Log}, }; use reth_rpc_eth_api::{RpcBlock, RpcHeader, RpcReceipt, RpcTransaction}; +use revm_inspectors::access_list::AccessListInspector; use signet_cold::{HeaderSpecifier, ReceiptSpecifier}; use signet_hot::model::HotKvRead; use signet_hot::{HistoryRead, HotKv, db::HotDbRead}; @@ -44,6 +46,16 @@ pub(crate) async fn not_supported() -> ResponsePayload<(), ()> { )) } +/// Uncle count is always zero — Signet has no uncle blocks. +pub(crate) async fn uncle_count() -> Result { + Ok(U64::ZERO) +} + +/// Uncle block is always absent — Signet has no uncle blocks. +pub(crate) async fn uncle_block() -> Result, ()> { + Ok(None) +} + // --------------------------------------------------------------------------- // Simple Queries // --------------------------------------------------------------------------- @@ -804,6 +816,53 @@ where await_handler!(@response_option hctx.spawn_blocking(task)) } +pub(crate) async fn create_access_list( + hctx: HandlerCtx, + TxParams(mut request, block, state_overrides, block_overrides): TxParams, + ctx: StorageRpcCtx, +) -> ResponsePayload +where + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let max_gas = ctx.config().rpc_gas_cap; + normalize_gas_stateless(&mut request, max_gas); + + let id = block.unwrap_or(BlockId::pending()); + let span = trace_span!("eth_createAccessList", block_id = %id); + + let task = async move { + let EvmBlockContext { header, db } = response_tri!(ctx.resolve_evm_block(id)); + + let trevm = signet_evm::signet_evm(db, ctx.constants().clone()) + .fill_cfg(&CfgFiller(ctx.chain_id())) + .fill_block(&header); + + let trevm = response_tri!(trevm.maybe_apply_state_overrides(state_overrides.as_ref())) + .maybe_apply_block_overrides(block_overrides.as_deref()) + .fill_tx(&request); + + let initial = request.access_list.clone().unwrap_or_default(); + let mut inspector = AccessListInspector::new(initial); + + let result = trevm + .try_with_inspector(&mut inspector, |trevm| trevm.run()) + .map_err(signet_evm::EvmErrored::into_error); + + let (gas_used, error) = match result { + Ok(ref trevm) => (U256::from(trevm.gas_used()), None), + Err(ref e) => (U256::ZERO, Some(e.to_string())), + }; + + let access_list = inspector.into_access_list(); + + ResponsePayload::Success(AccessListResult { access_list, gas_used, error }) + } + .instrument(span); + + await_handler!(@response_option hctx.spawn_blocking(task)) +} + // --------------------------------------------------------------------------- // Transaction Submission // --------------------------------------------------------------------------- diff --git a/crates/rpc-storage/src/eth/mod.rs b/crates/rpc-storage/src/eth/mod.rs index 7e1043e..5b14e38 100644 --- a/crates/rpc-storage/src/eth/mod.rs +++ b/crates/rpc-storage/src/eth/mod.rs @@ -3,11 +3,11 @@ mod endpoints; use endpoints::{ addr_tx_count, balance, block, block_number, block_receipts, block_tx_count, call, chain_id, - code_at, estimate_gas, fee_history, gas_price, get_filter_changes, get_logs, header_by, - max_priority_fee_per_gas, new_block_filter, new_filter, not_supported, + code_at, create_access_list, estimate_gas, fee_history, gas_price, get_filter_changes, + get_logs, header_by, max_priority_fee_per_gas, new_block_filter, new_filter, not_supported, raw_transaction_by_block_and_index, raw_transaction_by_hash, send_raw_transaction, storage_at, subscribe, transaction_by_block_and_index, transaction_by_hash, transaction_receipt, - uninstall_filter, unsubscribe, + uncle_block, uncle_count, uninstall_filter, unsubscribe, }; mod error; @@ -72,10 +72,10 @@ where .route("coinbase", not_supported) .route("accounts", not_supported) .route("blobBaseFee", not_supported) - .route("getUncleCountByBlockHash", not_supported) - .route("getUncleCountByBlockNumber", not_supported) - .route("getUncleByBlockHashAndIndex", not_supported) - .route("getUncleByBlockNumberAndIndex", not_supported) + .route("getUncleCountByBlockHash", uncle_count) + .route("getUncleCountByBlockNumber", uncle_count) + .route("getUncleByBlockHashAndIndex", uncle_block) + .route("getUncleByBlockNumberAndIndex", uncle_block) .route("getWork", not_supported) .route("hashrate", not_supported) .route("mining", not_supported) @@ -86,7 +86,7 @@ where .route("signTransaction", not_supported) .route("signTypedData", not_supported) .route("getProof", not_supported) - .route("createAccessList", not_supported) + .route("createAccessList", create_access_list::) .route("newFilter", new_filter::) .route("newBlockFilter", new_block_filter::) .route("newPendingTransactionFilter", not_supported) diff --git a/crates/rpc/src/eth/endpoints.rs b/crates/rpc/src/eth/endpoints.rs index 23f099f..7574594 100644 --- a/crates/rpc/src/eth/endpoints.rs +++ b/crates/rpc/src/eth/endpoints.rs @@ -92,6 +92,16 @@ pub(super) async fn not_supported() -> ResponsePayload<(), ()> { )) } +/// Uncle count is always zero — Signet has no uncle blocks. +pub(super) async fn uncle_count() -> Result { + Ok(U64::ZERO) +} + +/// Uncle block is always absent — Signet has no uncle blocks. +pub(super) async fn uncle_block() -> Result, ()> { + Ok(None) +} + pub(super) async fn protocol_version(ctx: RpcCtx) -> Result where Host: FullNodeComponents, diff --git a/crates/rpc/src/eth/mod.rs b/crates/rpc/src/eth/mod.rs index 0d8c131..ed906b6 100644 --- a/crates/rpc/src/eth/mod.rs +++ b/crates/rpc/src/eth/mod.rs @@ -71,10 +71,10 @@ where .route("coinbase", not_supported) .route("accounts", not_supported) .route("blobBaseFee", not_supported) - .route("getUncleCountByBlockHash", not_supported) - .route("getUncleCountByBlockNumber", not_supported) - .route("getUncleByBlockHashAndIndex", not_supported) - .route("getUncleByBlockNumberAndIndex", not_supported) + .route("getUncleCountByBlockHash", uncle_count) + .route("getUncleCountByBlockNumber", uncle_count) + .route("getUncleByBlockHashAndIndex", uncle_block) + .route("getUncleByBlockNumberAndIndex", uncle_block) .route("getWork", not_supported) .route("hashrate", not_supported) .route("mining", not_supported) From 4d9c6db18c798f90dcb2bde1905116b4644599b5 Mon Sep 17 00:00:00 2001 From: James Date: Sat, 14 Feb 2026 13:44:30 -0500 Subject: [PATCH 18/31] refactor: address PR review comments across rpc-storage crate - Restructure into config/ module folder (ctx, resolve, gas_oracle, rpc_config) - Remove standalone pub fn wrappers, use .nest() for namespace prefixing - Remove reth-rpc-eth-api dependency, use alloy Network trait type aliases - Change not_supported to method_not_found, add concrete SignetError variants - Sanitize DebugError Display output, add tracing::warn at error call sites - Add fire-and-forget error logging for send_raw_transaction and send_order - Add default_bundle_timeout_ms config, SyncStatus struct, BlockTags::update_all - Add comprehensive interest/ module docs, endpoint doc comments, permit docs - Extract hot_reader_at_block helper for account state endpoints - Add FUTURE-EVALUATION.md documenting Vec collection constraint Co-Authored-By: Claude Opus 4.6 --- crates/rpc-storage/Cargo.toml | 1 - crates/rpc-storage/FUTURE-EVALUATION.md | 13 +++ crates/rpc-storage/src/{ => config}/ctx.rs | 7 +- .../src/{ => config}/gas_oracle.rs | 2 +- crates/rpc-storage/src/config/mod.rs | 17 +++ .../rpc-storage/src/{ => config}/resolve.rs | 57 ++++++++- .../src/{config.rs => config/rpc_config.rs} | 8 ++ crates/rpc-storage/src/debug/endpoints.rs | 59 ++++++---- crates/rpc-storage/src/debug/error.rs | 15 ++- crates/rpc-storage/src/debug/mod.rs | 2 +- crates/rpc-storage/src/eth/endpoints.rs | 106 +++++++++++------ crates/rpc-storage/src/eth/error.rs | 2 +- crates/rpc-storage/src/eth/helpers.rs | 30 ++++- crates/rpc-storage/src/eth/mod.rs | 2 +- crates/rpc-storage/src/interest/mod.rs | 37 +++++- crates/rpc-storage/src/lib.rs | 45 ++------ crates/rpc-storage/src/signet/endpoints.rs | 28 +++-- crates/rpc-storage/src/signet/error.rs | 21 +++- crates/rpc-storage/src/signet/mod.rs | 2 +- crates/rpc-storage/tests/eth_rpc.rs | 108 ++++++++---------- 20 files changed, 382 insertions(+), 180 deletions(-) create mode 100644 crates/rpc-storage/FUTURE-EVALUATION.md rename crates/rpc-storage/src/{ => config}/ctx.rs (98%) rename crates/rpc-storage/src/{ => config}/gas_oracle.rs (98%) create mode 100644 crates/rpc-storage/src/config/mod.rs rename crates/rpc-storage/src/{ => config}/resolve.rs (53%) rename crates/rpc-storage/src/{config.rs => config/rpc_config.rs} (89%) diff --git a/crates/rpc-storage/Cargo.toml b/crates/rpc-storage/Cargo.toml index d12a79a..4a1fe01 100644 --- a/crates/rpc-storage/Cargo.toml +++ b/crates/rpc-storage/Cargo.toml @@ -27,7 +27,6 @@ tracing.workspace = true thiserror.workspace = true serde.workspace = true dashmap = "6.1.0" -reth-rpc-eth-api.workspace = true revm-inspectors.workspace = true itertools.workspace = true diff --git a/crates/rpc-storage/FUTURE-EVALUATION.md b/crates/rpc-storage/FUTURE-EVALUATION.md new file mode 100644 index 0000000..4b51828 --- /dev/null +++ b/crates/rpc-storage/FUTURE-EVALUATION.md @@ -0,0 +1,13 @@ +# Future Evaluation Notes + +## Vec Collection at API Boundary + +Several endpoints (`eth_getBlockReceipts`, `debug_traceBlockByNumber`, etc.) +collect results into a `Vec` before returning. This is required because +`ajj::ResponsePayload` expects an owned `Serialize` value — there is no way +to feed items to the serializer via an iterator or streaming interface. + +If `ajj` adds support for streaming serialization (e.g. accepting an +`Iterator` or a `Stream`), these endpoints could be +refactored to avoid the intermediate allocation. Until then, the `Vec` +collection is the necessary approach at the API boundary. diff --git a/crates/rpc-storage/src/ctx.rs b/crates/rpc-storage/src/config/ctx.rs similarity index 98% rename from crates/rpc-storage/src/ctx.rs rename to crates/rpc-storage/src/config/ctx.rs index 6450048..fb0cc95 100644 --- a/crates/rpc-storage/src/ctx.rs +++ b/crates/rpc-storage/src/config/ctx.rs @@ -1,9 +1,12 @@ //! RPC context wrapping [`UnifiedStorage`]. use crate::{ - EthError, StorageRpcConfig, + config::{ + StorageRpcConfig, + resolve::{BlockTags, ResolveError}, + }, + eth::EthError, interest::{FilterManager, NewBlockNotification, SubscriptionManager}, - resolve::{BlockTags, ResolveError}, }; use alloy::eips::{BlockId, BlockNumberOrTag}; use signet_cold::ColdStorageReadHandle; diff --git a/crates/rpc-storage/src/gas_oracle.rs b/crates/rpc-storage/src/config/gas_oracle.rs similarity index 98% rename from crates/rpc-storage/src/gas_oracle.rs rename to crates/rpc-storage/src/config/gas_oracle.rs index 47c23c2..866cad3 100644 --- a/crates/rpc-storage/src/gas_oracle.rs +++ b/crates/rpc-storage/src/config/gas_oracle.rs @@ -6,7 +6,7 @@ use alloy::{consensus::Transaction, primitives::U256}; use signet_cold::{ColdStorageError, ColdStorageReadHandle, HeaderSpecifier}; -use crate::StorageRpcConfig; +use crate::config::StorageRpcConfig; /// Suggest a tip cap based on recent transaction tips. /// diff --git a/crates/rpc-storage/src/config/mod.rs b/crates/rpc-storage/src/config/mod.rs new file mode 100644 index 0000000..8a99258 --- /dev/null +++ b/crates/rpc-storage/src/config/mod.rs @@ -0,0 +1,17 @@ +//! Configuration, context, and block tag resolution. +//! +//! This module groups the crate's configuration types, the RPC context +//! that wraps [`signet_storage::UnifiedStorage`], gas oracle helpers, +//! and block tag / block ID resolution logic. + +mod rpc_config; +pub use rpc_config::StorageRpcConfig; + +mod ctx; +pub(crate) use ctx::EvmBlockContext; +pub use ctx::StorageRpcCtx; + +pub(crate) mod gas_oracle; + +pub(crate) mod resolve; +pub use resolve::{BlockTags, SyncStatus}; diff --git a/crates/rpc-storage/src/resolve.rs b/crates/rpc-storage/src/config/resolve.rs similarity index 53% rename from crates/rpc-storage/src/resolve.rs rename to crates/rpc-storage/src/config/resolve.rs index 68f9b49..8a23ef4 100644 --- a/crates/rpc-storage/src/resolve.rs +++ b/crates/rpc-storage/src/config/resolve.rs @@ -7,10 +7,26 @@ use alloy::primitives::B256; use signet_storage::StorageError; use std::sync::{ - Arc, + Arc, RwLock, atomic::{AtomicU64, Ordering}, }; +/// Snapshot of the node's syncing progress. +/// +/// When the node is still catching up to the network, this struct +/// describes the sync window. Once fully synced, the context owner +/// should call [`BlockTags::clear_sync_status`] to indicate that +/// syncing is complete. +#[derive(Debug, Clone, Copy)] +pub struct SyncStatus { + /// Block number the node started syncing from. + pub starting_block: u64, + /// Current block the node has synced to. + pub current_block: u64, + /// Highest known block number on the network. + pub highest_block: u64, +} + /// Externally-updated block tag tracker. /// /// Each tag is an `Arc` that the caller updates as the chain @@ -26,12 +42,19 @@ use std::sync::{ /// /// tags.set_latest(101); /// assert_eq!(tags.latest(), 101); +/// +/// // Update all tags at once. +/// tags.update_all(200, 195, 190); +/// assert_eq!(tags.latest(), 200); +/// assert_eq!(tags.safe(), 195); +/// assert_eq!(tags.finalized(), 190); /// ``` #[derive(Debug, Clone)] pub struct BlockTags { latest: Arc, safe: Arc, finalized: Arc, + sync_status: Arc>>, } impl BlockTags { @@ -41,6 +64,7 @@ impl BlockTags { latest: Arc::new(AtomicU64::new(latest)), safe: Arc::new(AtomicU64::new(safe)), finalized: Arc::new(AtomicU64::new(finalized)), + sync_status: Arc::new(RwLock::new(None)), } } @@ -73,6 +97,37 @@ impl BlockTags { pub fn set_finalized(&self, n: u64) { self.finalized.store(n, Ordering::Release); } + + /// Update all three tags in one call. + /// + /// Stores are ordered finalized → safe → latest so that readers + /// always observe a consistent or slightly-stale view (never a + /// latest that is behind the finalized it was published with). + pub fn update_all(&self, latest: u64, safe: u64, finalized: u64) { + self.finalized.store(finalized, Ordering::Release); + self.safe.store(safe, Ordering::Release); + self.latest.store(latest, Ordering::Release); + } + + /// Returns `true` if the node is currently syncing. + pub fn is_syncing(&self) -> bool { + self.sync_status.read().expect("sync status lock poisoned").is_some() + } + + /// Returns the current sync status, if the node is syncing. + pub fn sync_status(&self) -> Option { + *self.sync_status.read().expect("sync status lock poisoned") + } + + /// Update the sync status to indicate the node is syncing. + pub fn set_sync_status(&self, status: SyncStatus) { + *self.sync_status.write().expect("sync status lock poisoned") = Some(status); + } + + /// Clear the sync status, indicating the node is fully synced. + pub fn clear_sync_status(&self) { + *self.sync_status.write().expect("sync status lock poisoned") = None; + } } /// Error resolving a block identifier. diff --git a/crates/rpc-storage/src/config.rs b/crates/rpc-storage/src/config/rpc_config.rs similarity index 89% rename from crates/rpc-storage/src/config.rs rename to crates/rpc-storage/src/config/rpc_config.rs index 98c1f26..690eb3a 100644 --- a/crates/rpc-storage/src/config.rs +++ b/crates/rpc-storage/src/config/rpc_config.rs @@ -66,6 +66,13 @@ pub struct StorageRpcConfig { /// /// Default: `1024`. pub max_block_history: u64, + + /// Default timeout in milliseconds for bundle simulation. + /// + /// Used when the bundle request does not specify its own timeout. + /// + /// Default: `1000` (1 second). + pub default_bundle_timeout_ms: u64, } impl Default for StorageRpcConfig { @@ -80,6 +87,7 @@ impl Default for StorageRpcConfig { gas_oracle_percentile: 60.0, max_header_history: 1024, max_block_history: 1024, + default_bundle_timeout_ms: 1000, } } } diff --git a/crates/rpc-storage/src/debug/endpoints.rs b/crates/rpc-storage/src/debug/endpoints.rs index 7dca346..33be233 100644 --- a/crates/rpc-storage/src/debug/endpoints.rs +++ b/crates/rpc-storage/src/debug/endpoints.rs @@ -1,7 +1,7 @@ //! Debug namespace RPC endpoint implementations. use crate::{ - ctx::StorageRpcCtx, + config::StorageRpcCtx, debug::DebugError, eth::helpers::{CfgFiller, await_handler, response_tri}, }; @@ -41,6 +41,9 @@ where { let opts = response_tri!(opts.ok_or(DebugError::InvalidTracerConfig)); + // Acquire a tracing semaphore permit to limit concurrent debug + // requests. The permit is held for the entire handler lifetime and + // is dropped when the async block completes. let _permit = ctx.acquire_tracing_permit().await; let id = id.into(); @@ -48,16 +51,20 @@ where let fut = async move { let cold = ctx.cold(); - let block_num = response_tri!( - ctx.resolve_block_id(id).map_err(|e| { DebugError::BlockNotFound(e.to_string()) }) - ); + let block_num = response_tri!(ctx.resolve_block_id(id).map_err(|e| { + tracing::warn!(error = %e, ?id, "block resolution failed"); + DebugError::BlockNotFound(id) + })); let (header, txs) = response_tri!( tokio::try_join!( cold.get_header_by_number(block_num), cold.get_transactions_in_block(block_num), ) - .map_err(|e| DebugError::Cold(e.to_string())) + .map_err(|e| { + tracing::warn!(error = %e, block_num, "cold storage read failed"); + DebugError::Cold(e.to_string()) + }) ); let Some(header) = header else { @@ -73,11 +80,12 @@ where let mut frames = Vec::with_capacity(txs.len()); - // State BEFORE this block - let db = response_tri!( - ctx.revm_state_at_height(header.number.saturating_sub(1)) - .map_err(|e| DebugError::Hot(e.to_string())) - ); + // State BEFORE this block. + let db = + response_tri!(ctx.revm_state_at_height(header.number.saturating_sub(1)).map_err(|e| { + tracing::warn!(error = %e, block_num, "hot storage read failed"); + DebugError::Hot(e.to_string()) + })); let mut trevm = signet_evm::signet_evm(db, ctx.constants().clone()) .fill_cfg(&CfgFiller(ctx.chain_id())) @@ -123,6 +131,7 @@ where { let opts = response_tri!(opts.ok_or(DebugError::InvalidTracerConfig)); + // Held for the handler duration; dropped when the async block completes. let _permit = ctx.acquire_tracing_permit().await; let span = tracing::debug_span!("traceTransaction", %tx_hash, tracer = ?opts.tracer.as_ref()); @@ -130,10 +139,11 @@ where let fut = async move { let cold = ctx.cold(); - // Look up the transaction and its containing block - let confirmed = response_tri!( - cold.get_tx_by_hash(tx_hash).await.map_err(|e| DebugError::Cold(e.to_string())) - ); + // Look up the transaction and its containing block. + let confirmed = response_tri!(cold.get_tx_by_hash(tx_hash).await.map_err(|e| { + tracing::warn!(error = %e, %tx_hash, "cold storage read failed"); + DebugError::Cold(e.to_string()) + })); let confirmed = response_tri!(confirmed.ok_or(DebugError::TransactionNotFound)); let (_tx, meta) = confirmed.into_parts(); @@ -146,20 +156,23 @@ where cold.get_header_by_number(block_num), cold.get_transactions_in_block(block_num), ) - .map_err(|e| DebugError::Cold(e.to_string())) + .map_err(|e| { + tracing::warn!(error = %e, block_num, "cold storage read failed"); + DebugError::Cold(e.to_string()) + }) ); - let header = - response_tri!(header.ok_or(DebugError::BlockNotFound(format!("block {block_num}")))) - .into_inner(); + let block_id = BlockId::Number(block_num.into()); + let header = response_tri!(header.ok_or(DebugError::BlockNotFound(block_id))).into_inner(); tracing::debug!(number = block_num, "Loaded containing block"); - // State BEFORE this block - let db = response_tri!( - ctx.revm_state_at_height(block_num.saturating_sub(1)) - .map_err(|e| DebugError::Hot(e.to_string())) - ); + // State BEFORE this block. + let db = + response_tri!(ctx.revm_state_at_height(block_num.saturating_sub(1)).map_err(|e| { + tracing::warn!(error = %e, block_num, "hot storage read failed"); + DebugError::Hot(e.to_string()) + })); let mut trevm = signet_evm::signet_evm(db, ctx.constants().clone()) .fill_cfg(&CfgFiller(ctx.chain_id())) diff --git a/crates/rpc-storage/src/debug/error.rs b/crates/rpc-storage/src/debug/error.rs index bc87c42..a078034 100644 --- a/crates/rpc-storage/src/debug/error.rs +++ b/crates/rpc-storage/src/debug/error.rs @@ -1,13 +1,20 @@ //! Error types for the debug namespace. +use alloy::eips::BlockId; + /// Errors that can occur in the `debug` namespace. +/// +/// The [`serde::Serialize`] impl emits sanitized messages suitable for +/// API responses — internal storage details are not exposed to callers. +/// Use [`tracing`] to log the full error chain before constructing the +/// variant. #[derive(Debug, Clone, thiserror::Error)] pub enum DebugError { /// Cold storage error. - #[error("cold storage: {0}")] + #[error("cold storage error")] Cold(String), /// Hot storage error. - #[error("hot storage: {0}")] + #[error("hot storage error")] Hot(String), /// Invalid tracer configuration. #[error("invalid tracer config")] @@ -16,11 +23,11 @@ pub enum DebugError { #[error("unsupported: {0}")] Unsupported(&'static str), /// EVM execution error. - #[error("evm: {0}")] + #[error("evm execution error")] Evm(String), /// Block not found. #[error("block not found: {0}")] - BlockNotFound(String), + BlockNotFound(BlockId), /// Transaction not found. #[error("transaction not found")] TransactionNotFound, diff --git a/crates/rpc-storage/src/debug/mod.rs b/crates/rpc-storage/src/debug/mod.rs index 172a2fc..44dcb94 100644 --- a/crates/rpc-storage/src/debug/mod.rs +++ b/crates/rpc-storage/src/debug/mod.rs @@ -6,7 +6,7 @@ mod error; pub use error::DebugError; pub(crate) mod tracer; -use crate::ctx::StorageRpcCtx; +use crate::config::StorageRpcCtx; use alloy::{eips::BlockNumberOrTag, primitives::B256}; use signet_hot::HotKv; use signet_hot::model::HotKvRead; diff --git a/crates/rpc-storage/src/eth/endpoints.rs b/crates/rpc-storage/src/eth/endpoints.rs index d945759..72ba2f0 100644 --- a/crates/rpc-storage/src/eth/endpoints.rs +++ b/crates/rpc-storage/src/eth/endpoints.rs @@ -1,13 +1,12 @@ //! ETH namespace RPC endpoint implementations. use crate::{ - ctx::{EvmBlockContext, StorageRpcCtx}, + config::{EvmBlockContext, StorageRpcCtx, gas_oracle}, eth::helpers::{ AddrWithBlock, BlockParams, CfgFiller, FeeHistoryArgs, StorageAtArgs, SubscribeArgs, - TxParams, await_handler, build_receipt, build_rpc_transaction, normalize_gas_stateless, - response_tri, + TxParams, await_handler, build_receipt, build_rpc_transaction, hot_reader_at_block, + normalize_gas_stateless, response_tri, }, - gas_oracle, interest::{FilterOutput, InterestKind}, }; use ajj::{HandlerCtx, ResponsePayload}; @@ -19,16 +18,23 @@ use alloy::{ eip2718::{Decodable2718, Encodable2718}, eip2930::AccessListResult, }, - network::Ethereum, + network::{Ethereum, Network}, primitives::{B256, U64, U256}, rpc::types::{Block, BlockTransactions, FeeHistory, Filter, Log}, }; -use reth_rpc_eth_api::{RpcBlock, RpcHeader, RpcReceipt, RpcTransaction}; + +/// RPC block type for the Ethereum network. +type RpcBlock = ::BlockResponse; +/// RPC header type for the Ethereum network. +type RpcHeader = ::HeaderResponse; +/// RPC transaction type for the Ethereum network. +type RpcTransaction = ::TransactionResponse; +/// RPC receipt type for the Ethereum network. +type RpcReceipt = ::ReceiptResponse; use revm_inspectors::access_list::AccessListInspector; use signet_cold::{HeaderSpecifier, ReceiptSpecifier}; use signet_hot::model::HotKvRead; use signet_hot::{HistoryRead, HotKv, db::HotDbRead}; -use std::borrow::Cow; use tracing::{Instrument, debug, trace_span}; use trevm::{ EstimationResult, revm::context::result::ExecutionResult, revm::database::DBErrorMarker, @@ -41,9 +47,7 @@ use super::error::CallErrorData; // --------------------------------------------------------------------------- pub(crate) async fn not_supported() -> ResponsePayload<(), ()> { - ResponsePayload::internal_error_message(Cow::Borrowed( - "Method not supported. See signet documentation for a list of unsupported methods: https://signet.sh/docs", - )) + ResponsePayload::method_not_found() } /// Uncle count is always zero — Signet has no uncle blocks. @@ -60,10 +64,12 @@ pub(crate) async fn uncle_block() -> Result, ()> { // Simple Queries // --------------------------------------------------------------------------- +/// `eth_blockNumber` — returns the latest block number from block tags. pub(crate) async fn block_number(ctx: StorageRpcCtx) -> Result { Ok(U64::from(ctx.tags().latest())) } +/// `eth_chainId` — returns the configured chain ID. pub(crate) async fn chain_id(ctx: StorageRpcCtx) -> Result { Ok(U64::from(ctx.chain_id())) } @@ -72,6 +78,7 @@ pub(crate) async fn chain_id(ctx: StorageRpcCtx) -> Result // Gas & Fee Queries // --------------------------------------------------------------------------- +/// `eth_gasPrice` — suggests gas price based on recent block tips + base fee. pub(crate) async fn gas_price(hctx: HandlerCtx, ctx: StorageRpcCtx) -> Result where H: HotKv + Send + Sync + 'static, @@ -98,6 +105,7 @@ where await_handler!(@option hctx.spawn_blocking(task)) } +/// `eth_maxPriorityFeePerGas` — suggests priority fee from recent block tips. pub(crate) async fn max_priority_fee_per_gas( hctx: HandlerCtx, ctx: StorageRpcCtx, @@ -116,6 +124,7 @@ where await_handler!(@option hctx.spawn_blocking(task)) } +/// `eth_feeHistory` — returns base fee and reward percentile data. pub(crate) async fn fee_history( hctx: HandlerCtx, FeeHistoryArgs(block_count, newest, reward_percentiles): FeeHistoryArgs, @@ -277,11 +286,13 @@ fn calculate_reward_percentiles( // Block Queries // --------------------------------------------------------------------------- +/// `eth_getBlockByHash` / `eth_getBlockByNumber` — resolve block, fetch +/// header + transactions from cold storage, assemble RPC block response. pub(crate) async fn block( hctx: HandlerCtx, BlockParams(t, full): BlockParams, ctx: StorageRpcCtx, -) -> Result>, String> +) -> Result, String> where T: Into, H: HotKv + Send + Sync + 'static, @@ -340,6 +351,7 @@ where await_handler!(@option hctx.spawn_blocking(task)) } +/// `eth_getBlockTransactionCount*` — transaction count in a block. pub(crate) async fn block_tx_count( hctx: HandlerCtx, (t,): (T,), @@ -365,11 +377,12 @@ where await_handler!(@option hctx.spawn_blocking(task)) } +/// `eth_getBlockReceipts` — all receipts in a block. pub(crate) async fn block_receipts( hctx: HandlerCtx, (id,): (BlockId,), ctx: StorageRpcCtx, -) -> Result>>, String> +) -> Result>, String> where H: HotKv + Send + Sync + 'static, ::Error: DBErrorMarker, @@ -400,11 +413,12 @@ where await_handler!(@option hctx.spawn_blocking(task)) } +/// `eth_getBlockHeaderByHash` / `eth_getBlockHeaderByNumber`. pub(crate) async fn header_by( hctx: HandlerCtx, (t,): (T,), ctx: StorageRpcCtx, -) -> Result>, String> +) -> Result, String> where T: Into, H: HotKv + Send + Sync + 'static, @@ -435,11 +449,12 @@ where // Transaction Queries // --------------------------------------------------------------------------- +/// `eth_getTransactionByHash` — look up transaction by hash from cold storage. pub(crate) async fn transaction_by_hash( hctx: HandlerCtx, (hash,): (B256,), ctx: StorageRpcCtx, -) -> Result>, String> +) -> Result, String> where H: HotKv + Send + Sync + 'static, ::Error: DBErrorMarker, @@ -463,6 +478,7 @@ where await_handler!(@option hctx.spawn_blocking(task)) } +/// `eth_getRawTransactionByHash` — RLP-encoded transaction bytes. pub(crate) async fn raw_transaction_by_hash( hctx: HandlerCtx, (hash,): (B256,), @@ -483,11 +499,12 @@ where await_handler!(@option hctx.spawn_blocking(task)) } +/// `eth_getTransactionByBlock*AndIndex` — transaction by position in block. pub(crate) async fn transaction_by_block_and_index( hctx: HandlerCtx, (t, index): (T, U64), ctx: StorageRpcCtx, -) -> Result>, String> +) -> Result, String> where T: Into, H: HotKv + Send + Sync + 'static, @@ -518,6 +535,7 @@ where await_handler!(@option hctx.spawn_blocking(task)) } +/// `eth_getRawTransactionByBlock*AndIndex` — raw RLP bytes by position. pub(crate) async fn raw_transaction_by_block_and_index( hctx: HandlerCtx, (t, index): (T, U64), @@ -543,11 +561,13 @@ where await_handler!(@option hctx.spawn_blocking(task)) } +/// `eth_getTransactionReceipt` — receipt by tx hash. Fetches the receipt, +/// then the associated transaction and header for derived fields. pub(crate) async fn transaction_receipt( hctx: HandlerCtx, (hash,): (B256,), ctx: StorageRpcCtx, -) -> Result>, String> +) -> Result, String> where H: HotKv + Send + Sync + 'static, ::Error: DBErrorMarker, @@ -580,6 +600,7 @@ where // Account State (Hot Storage) // --------------------------------------------------------------------------- +/// `eth_getBalance` — account balance at a given block from hot storage. pub(crate) async fn balance( hctx: HandlerCtx, AddrWithBlock(address, block): AddrWithBlock, @@ -589,12 +610,10 @@ where H: HotKv + Send + Sync + 'static, ::Error: DBErrorMarker, { - let block = block.unwrap_or(BlockId::latest()); + let id = block.unwrap_or(BlockId::latest()); let task = async move { - let height = ctx.resolve_block_id(block).map_err(|e| e.to_string())?; - - let reader = ctx.hot_reader().map_err(|e| e.to_string())?; + let (reader, height) = hot_reader_at_block(&ctx, id)?; let acct = reader.get_account_at_height(&address, Some(height)).map_err(|e| e.to_string())?; @@ -604,6 +623,7 @@ where await_handler!(@option hctx.spawn_blocking(task)) } +/// `eth_getStorageAt` — contract storage slot at a given block. pub(crate) async fn storage_at( hctx: HandlerCtx, StorageAtArgs(address, key, block): StorageAtArgs, @@ -613,12 +633,10 @@ where H: HotKv + Send + Sync + 'static, ::Error: DBErrorMarker, { - let block = block.unwrap_or(BlockId::latest()); + let id = block.unwrap_or(BlockId::latest()); let task = async move { - let height = ctx.resolve_block_id(block).map_err(|e| e.to_string())?; - - let reader = ctx.hot_reader().map_err(|e| e.to_string())?; + let (reader, height) = hot_reader_at_block(&ctx, id)?; let val = reader .get_storage_at_height(&address, &key, Some(height)) .map_err(|e| e.to_string())?; @@ -629,6 +647,7 @@ where await_handler!(@option hctx.spawn_blocking(task)) } +/// `eth_getTransactionCount` — account nonce at a given block. pub(crate) async fn addr_tx_count( hctx: HandlerCtx, AddrWithBlock(address, block): AddrWithBlock, @@ -638,12 +657,10 @@ where H: HotKv + Send + Sync + 'static, ::Error: DBErrorMarker, { - let block = block.unwrap_or(BlockId::latest()); + let id = block.unwrap_or(BlockId::latest()); let task = async move { - let height = ctx.resolve_block_id(block).map_err(|e| e.to_string())?; - - let reader = ctx.hot_reader().map_err(|e| e.to_string())?; + let (reader, height) = hot_reader_at_block(&ctx, id)?; let acct = reader.get_account_at_height(&address, Some(height)).map_err(|e| e.to_string())?; @@ -653,6 +670,7 @@ where await_handler!(@option hctx.spawn_blocking(task)) } +/// `eth_getCode` — contract bytecode at a given block. pub(crate) async fn code_at( hctx: HandlerCtx, AddrWithBlock(address, block): AddrWithBlock, @@ -662,12 +680,10 @@ where H: HotKv + Send + Sync + 'static, ::Error: DBErrorMarker, { - let block = block.unwrap_or(BlockId::latest()); + let id = block.unwrap_or(BlockId::latest()); let task = async move { - let height = ctx.resolve_block_id(block).map_err(|e| e.to_string())?; - - let reader = ctx.hot_reader().map_err(|e| e.to_string())?; + let (reader, height) = hot_reader_at_block(&ctx, id)?; let acct = reader.get_account_at_height(&address, Some(height)).map_err(|e| e.to_string())?; @@ -691,6 +707,10 @@ where // EVM Execution // --------------------------------------------------------------------------- +/// Shared EVM call execution used by `eth_call` and `eth_estimateGas`. +/// +/// Resolves the block, builds a revm instance with the requested state +/// and block overrides, then executes the transaction request. pub(crate) async fn run_call( hctx: HandlerCtx, TxParams(request, block, state_overrides, block_overrides): TxParams, @@ -728,6 +748,10 @@ where await_handler!(@response_option hctx.spawn_blocking(task)) } +/// `eth_call` — execute a call and return the output bytes. +/// +/// Delegates to [`run_call`], then maps the execution result to raw +/// output bytes, revert data, or halt reason. pub(crate) async fn call( hctx: HandlerCtx, mut params: TxParams, @@ -766,6 +790,7 @@ where })) } +/// `eth_estimateGas` — estimate gas required for a transaction. pub(crate) async fn estimate_gas( hctx: HandlerCtx, TxParams(mut request, block, state_overrides, block_overrides): TxParams, @@ -816,6 +841,7 @@ where await_handler!(@response_option hctx.spawn_blocking(task)) } +/// `eth_createAccessList` — generate an access list for a transaction. pub(crate) async fn create_access_list( hctx: HandlerCtx, TxParams(mut request, block, state_overrides, block_overrides): TxParams, @@ -867,6 +893,10 @@ where // Transaction Submission // --------------------------------------------------------------------------- +/// `eth_sendRawTransaction` — decode and forward a signed transaction. +/// +/// The transaction is forwarded to the tx cache in a fire-and-forget +/// task; the hash is returned immediately. pub(crate) async fn send_raw_transaction( hctx: HandlerCtx, (tx,): (alloy::primitives::Bytes,), @@ -886,7 +916,9 @@ where let hash = *envelope.tx_hash(); hctx.spawn(async move { - tx_cache.forward_raw_transaction(envelope).await.map_err(|e| e.to_string()) + if let Err(e) = tx_cache.forward_raw_transaction(envelope).await { + tracing::warn!(error = %e, %hash, "failed to forward raw transaction"); + } }); Ok(hash) @@ -899,6 +931,7 @@ where // Logs // --------------------------------------------------------------------------- +/// `eth_getLogs` — query logs from cold storage with filter criteria. pub(crate) async fn get_logs( hctx: HandlerCtx, (filter,): (Filter,), @@ -950,6 +983,7 @@ where // Filters // --------------------------------------------------------------------------- +/// `eth_newFilter` — install a log filter for polling. pub(crate) async fn new_filter( hctx: HandlerCtx, (filter,): (Filter,), @@ -967,6 +1001,7 @@ where await_handler!(@option hctx.spawn_blocking(task)) } +/// `eth_newBlockFilter` — install a block hash filter for polling. pub(crate) async fn new_block_filter( hctx: HandlerCtx, ctx: StorageRpcCtx, @@ -983,6 +1018,7 @@ where await_handler!(@option hctx.spawn_blocking(task)) } +/// `eth_uninstallFilter` — remove a filter. pub(crate) async fn uninstall_filter( hctx: HandlerCtx, (id,): (U64,), @@ -996,6 +1032,8 @@ where await_handler!(@option hctx.spawn_blocking(task)) } +/// `eth_getFilterChanges` / `eth_getFilterLogs` — poll a filter for new +/// results since the last poll. Fetches matching data from cold storage. pub(crate) async fn get_filter_changes( hctx: HandlerCtx, (id,): (U64,), @@ -1050,6 +1088,7 @@ where // Subscriptions // --------------------------------------------------------------------------- +/// `eth_subscribe` — register a push-based subscription (WebSocket/SSE). pub(crate) async fn subscribe( hctx: HandlerCtx, sub: SubscribeArgs, @@ -1066,6 +1105,7 @@ where .ok_or_else(|| "notifications not enabled on this transport".to_string()) } +/// `eth_unsubscribe` — cancel a push-based subscription. pub(crate) async fn unsubscribe( hctx: HandlerCtx, (id,): (U64,), diff --git a/crates/rpc-storage/src/eth/error.rs b/crates/rpc-storage/src/eth/error.rs index 93322e8..52b4617 100644 --- a/crates/rpc-storage/src/eth/error.rs +++ b/crates/rpc-storage/src/eth/error.rs @@ -14,7 +14,7 @@ pub enum EthError { Hot(#[from] signet_storage::StorageError), /// Block resolution error. #[error("resolve: {0}")] - Resolve(#[from] crate::resolve::ResolveError), + Resolve(#[from] crate::config::resolve::ResolveError), /// Invalid transaction signature. #[error("invalid transaction signature")] InvalidSignature, diff --git a/crates/rpc-storage/src/eth/helpers.rs b/crates/rpc-storage/src/eth/helpers.rs index a900d7f..756b784 100644 --- a/crates/rpc-storage/src/eth/helpers.rs +++ b/crates/rpc-storage/src/eth/helpers.rs @@ -6,19 +6,23 @@ use alloy::{ ReceiptEnvelope, ReceiptWithBloom, Transaction, TxReceipt, transaction::Recovered, }, eips::BlockId, - network::Ethereum, + network::{Ethereum, Network}, primitives::{Address, TxKind, U256}, rpc::types::{ BlockOverrides, Log, TransactionReceipt, TransactionRequest, pubsub::SubscriptionKind, state::StateOverride, }, }; -use reth_rpc_eth_api::{RpcReceipt, RpcTransaction}; use serde::Deserialize; use signet_cold::ColdReceipt; use signet_storage_types::ConfirmationMeta; use trevm::MIN_TRANSACTION_GAS; +/// RPC transaction type for the Ethereum network. +type RpcTransaction = ::TransactionResponse; +/// RPC receipt type for the Ethereum network. +type RpcReceipt = ::ReceiptResponse; + /// Args for `eth_call` and `eth_estimateGas`. #[derive(Debug, Deserialize)] pub(crate) struct TxParams( @@ -128,6 +132,24 @@ macro_rules! response_tri { } pub(crate) use response_tri; +/// Resolve a block ID and open a hot storage reader at that height. +/// +/// Shared by account-state endpoints (`balance`, `storage_at`, +/// `addr_tx_count`, `code_at`) which all follow the same +/// resolve → open reader → query pattern. +pub(crate) fn hot_reader_at_block( + ctx: &crate::config::StorageRpcCtx, + id: BlockId, +) -> Result<(H::RoTx, u64), String> +where + H: signet_hot::HotKv, + ::Error: std::error::Error + Send + Sync + 'static, +{ + let height = ctx.resolve_block_id(id).map_err(|e| e.to_string())?; + let reader = ctx.hot_reader().map_err(|e| e.to_string())?; + Ok((reader, height)) +} + /// Small wrapper implementing [`trevm::Cfg`] to set the chain ID. pub(crate) struct CfgFiller(pub u64); @@ -142,7 +164,7 @@ pub(crate) fn build_rpc_transaction( tx: signet_storage_types::RecoveredTx, meta: &ConfirmationMeta, base_fee: Option, -) -> RpcTransaction { +) -> RpcTransaction { let signer = tx.signer(); let tx_envelope: alloy::consensus::TxEnvelope = tx.into_inner().into(); let inner = Recovered::new_unchecked(tx_envelope, signer); @@ -168,7 +190,7 @@ pub(crate) fn build_receipt( cr: ColdReceipt, tx: &signet_storage_types::RecoveredTx, base_fee: Option, -) -> RpcReceipt { +) -> RpcReceipt { let logs_bloom = cr.receipt.bloom(); let status = cr.receipt.status; let cumulative_gas_used = cr.receipt.cumulative_gas_used; diff --git a/crates/rpc-storage/src/eth/mod.rs b/crates/rpc-storage/src/eth/mod.rs index 5b14e38..f674336 100644 --- a/crates/rpc-storage/src/eth/mod.rs +++ b/crates/rpc-storage/src/eth/mod.rs @@ -15,7 +15,7 @@ pub use error::EthError; pub(crate) mod helpers; -use crate::StorageRpcCtx; +use crate::config::StorageRpcCtx; use alloy::{eips::BlockNumberOrTag, primitives::B256}; use signet_hot::HotKv; use signet_hot::model::HotKvRead; diff --git a/crates/rpc-storage/src/interest/mod.rs b/crates/rpc-storage/src/interest/mod.rs index 3c611d9..9193af8 100644 --- a/crates/rpc-storage/src/interest/mod.rs +++ b/crates/rpc-storage/src/interest/mod.rs @@ -1,4 +1,39 @@ -//! Filter and subscription management for block/log notifications. +//! Filter and subscription management for storage-backed RPC. +//! +//! This module implements two managers that track client-registered +//! interests in chain events: +//! +//! - **[`FilterManager`]** — manages poll-based filters created via +//! `eth_newFilter` and `eth_newBlockFilter`. Clients poll with +//! `eth_getFilterChanges` to retrieve accumulated results. +//! +//! - **[`SubscriptionManager`]** — manages push-based subscriptions +//! created via `eth_subscribe`. Matching events are forwarded to +//! the client over the notification channel (WebSocket / SSE). +//! +//! # Architecture +//! +//! Both managers wrap a shared `Arc` containing a [`DashMap`] +//! that maps client-assigned IDs to their active state. This makes +//! both types cheaply clonable — cloning just increments an `Arc` +//! reference count. +//! +//! # Resource lifecycle +//! +//! Each manager spawns a **background OS thread** that periodically +//! cleans up stale entries. The cleanup threads hold a [`Weak`] +//! reference to the `Arc`, so they self-terminate once all +//! strong references are dropped. +//! +//! OS threads are used (rather than tokio tasks) because +//! [`DashMap::retain`] can deadlock if called from an async context +//! that also holds a `DashMap` read guard on the same shard. Running +//! cleanup on a dedicated OS thread ensures the retain lock is never +//! contended with an in-flight async handler. +//! +//! [`Weak`]: std::sync::Weak +//! [`DashMap`]: dashmap::DashMap +//! [`DashMap::retain`]: dashmap::DashMap::retain mod filters; pub(crate) use filters::{FilterManager, FilterOutput}; diff --git a/crates/rpc-storage/src/lib.rs b/crates/rpc-storage/src/lib.rs index af4d93a..b1e68d6 100644 --- a/crates/rpc-storage/src/lib.rs +++ b/crates/rpc-storage/src/lib.rs @@ -11,49 +11,21 @@ #![deny(unused_must_use, rust_2018_idioms)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -mod config; -pub use config::StorageRpcConfig; -mod ctx; -pub use ctx::StorageRpcCtx; -mod resolve; -pub use resolve::BlockTags; +pub(crate) mod config; +pub use config::{BlockTags, StorageRpcConfig, StorageRpcCtx, SyncStatus}; + mod eth; pub use eth::EthError; -mod gas_oracle; + mod interest; pub use interest::NewBlockNotification; + mod debug; pub use debug::DebugError; + mod signet; pub use signet::error::SignetError; -/// Instantiate the `eth` API router. -pub fn eth() -> ajj::Router> -where - H: signet_hot::HotKv + Send + Sync + 'static, - ::Error: trevm::revm::database::DBErrorMarker, -{ - eth::eth() -} - -/// Instantiate the `debug` API router. -pub fn debug() -> ajj::Router> -where - H: signet_hot::HotKv + Send + Sync + 'static, - ::Error: trevm::revm::database::DBErrorMarker, -{ - debug::debug() -} - -/// Instantiate the `signet` API router. -pub fn signet() -> ajj::Router> -where - H: signet_hot::HotKv + Send + Sync + 'static, - ::Error: trevm::revm::database::DBErrorMarker, -{ - signet::signet() -} - /// Instantiate a combined router with `eth`, `debug`, and `signet` /// namespaces. pub fn router() -> ajj::Router> @@ -61,5 +33,8 @@ where H: signet_hot::HotKv + Send + Sync + 'static, ::Error: trevm::revm::database::DBErrorMarker, { - ajj::Router::new().merge(eth::eth()).merge(debug::debug()).merge(signet::signet()) + ajj::Router::new() + .nest("eth", eth::eth()) + .nest("debug", debug::debug()) + .nest("signet", signet::signet()) } diff --git a/crates/rpc-storage/src/signet/endpoints.rs b/crates/rpc-storage/src/signet/endpoints.rs index 4f8e3ce..92e5102 100644 --- a/crates/rpc-storage/src/signet/endpoints.rs +++ b/crates/rpc-storage/src/signet/endpoints.rs @@ -1,7 +1,7 @@ //! Signet namespace RPC endpoint implementations. use crate::{ - ctx::{EvmBlockContext, StorageRpcCtx}, + config::{EvmBlockContext, StorageRpcCtx}, eth::helpers::{CfgFiller, await_handler, response_tri}, signet::error::SignetError, }; @@ -26,11 +26,15 @@ where ::Error: DBErrorMarker, { let Some(tx_cache) = ctx.tx_cache().cloned() else { - return Err(SignetError::TxCacheNotProvided.into_string()); + return Err(SignetError::TxCacheNotProvided.to_string()); }; let task = |hctx: HandlerCtx| async move { - hctx.spawn(async move { tx_cache.forward_order(order).await.map_err(|e| e.to_string()) }); + hctx.spawn(async move { + if let Err(e) = tx_cache.forward_order(order).await { + tracing::warn!(error = %e, "failed to forward order"); + } + }); Ok(()) }; @@ -42,18 +46,22 @@ pub(super) async fn call_bundle( hctx: HandlerCtx, bundle: SignetCallBundle, ctx: StorageRpcCtx, -) -> ResponsePayload +) -> ResponsePayload where H: HotKv + Send + Sync + 'static, ::Error: DBErrorMarker, { - let timeout = bundle.bundle.timeout.unwrap_or(1000); + let timeout = bundle.bundle.timeout.unwrap_or(ctx.config().default_bundle_timeout_ms); let task = async move { let id = bundle.state_block_number(); let block_id: BlockId = id.into(); - let EvmBlockContext { header, db } = response_tri!(ctx.resolve_evm_block(block_id)); + let EvmBlockContext { header, db } = + response_tri!(ctx.resolve_evm_block(block_id).map_err(|e| { + tracing::warn!(error = %e, ?block_id, "block resolution failed for bundle"); + SignetError::Resolve(e.to_string()) + })); let mut driver = SignetBundleDriver::from(&bundle); @@ -61,7 +69,11 @@ where .fill_cfg(&CfgFiller(ctx.chain_id())) .fill_block(&header); - response_tri!(trevm.drive_bundle(&mut driver).map_err(|e| e.into_error())); + response_tri!(trevm.drive_bundle(&mut driver).map_err(|e| { + let e = e.into_error(); + tracing::warn!(error = %e, "evm error during bundle simulation"); + SignetError::Evm(e.to_string()) + })); ResponsePayload::Success(driver.into_response()) }; @@ -70,7 +82,7 @@ where select! { _ = tokio::time::sleep(Duration::from_millis(timeout)) => { ResponsePayload::internal_error_message( - "timeout during bundle simulation".into(), + SignetError::Timeout.to_string().into(), ) } result = task => { diff --git a/crates/rpc-storage/src/signet/error.rs b/crates/rpc-storage/src/signet/error.rs index ad019f4..83570ab 100644 --- a/crates/rpc-storage/src/signet/error.rs +++ b/crates/rpc-storage/src/signet/error.rs @@ -1,16 +1,27 @@ //! Error types for the signet namespace. /// Errors that can occur in the `signet` namespace. -#[derive(Debug, Clone, Copy, thiserror::Error)] +#[derive(Debug, Clone, thiserror::Error)] pub enum SignetError { /// The transaction cache was not provided. #[error("transaction cache not provided")] TxCacheNotProvided, + /// Block resolution failed. + #[error("block resolution error")] + Resolve(String), + /// EVM execution error. + #[error("evm execution error")] + Evm(String), + /// Bundle simulation timed out. + #[error("timeout during bundle simulation")] + Timeout, } -impl SignetError { - /// Convert to a string by value. - pub fn into_string(self) -> String { - self.to_string() +impl serde::Serialize for SignetError { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(&self.to_string()) } } diff --git a/crates/rpc-storage/src/signet/mod.rs b/crates/rpc-storage/src/signet/mod.rs index f9c452e..276e5fb 100644 --- a/crates/rpc-storage/src/signet/mod.rs +++ b/crates/rpc-storage/src/signet/mod.rs @@ -4,7 +4,7 @@ mod endpoints; use endpoints::{call_bundle, send_order}; pub(crate) mod error; -use crate::ctx::StorageRpcCtx; +use crate::config::StorageRpcCtx; use signet_hot::HotKv; use signet_hot::model::HotKvRead; use trevm::revm::database::DBErrorMarker; diff --git a/crates/rpc-storage/tests/eth_rpc.rs b/crates/rpc-storage/tests/eth_rpc.rs index 4273914..01c742f 100644 --- a/crates/rpc-storage/tests/eth_rpc.rs +++ b/crates/rpc-storage/tests/eth_rpc.rs @@ -57,7 +57,7 @@ impl TestHarness { StorageRpcConfig::default(), notif_tx.clone(), ); - let app = signet_rpc_storage::eth::().into_axum("/").with_state(ctx.clone()); + let app = signet_rpc_storage::router::().into_axum("/").with_state(ctx.clone()); Self { app, cold, hot, tags, notif_tx, ctx, _cancel: cancel } } @@ -72,23 +72,12 @@ impl TestHarness { writer.commit().unwrap(); self.cold.append_block(block).await.unwrap(); } - - /// Build an axum router for the debug namespace. - fn debug_app(&self) -> axum::Router { - signet_rpc_storage::debug::().into_axum("/").with_state(self.ctx.clone()) - } - - /// Build an axum router for the signet namespace. - #[allow(dead_code)] - fn signet_app(&self) -> axum::Router { - signet_rpc_storage::signet::().into_axum("/").with_state(self.ctx.clone()) - } } /// Make a JSON-RPC call and return the `"result"` field. /// -/// The `method` parameter is the short name (e.g. `"blockNumber"`), without -/// the `eth_` prefix. The router registers methods without namespace prefix. +/// The `method` parameter is the fully-qualified name (e.g. +/// `"eth_blockNumber"`). The router nests each namespace under its prefix. /// /// Panics if the response contains an `"error"` field. async fn rpc_call(app: &axum::Router, method: &str, params: Value) -> Value { @@ -233,14 +222,14 @@ fn make_block( #[tokio::test] async fn test_block_number() { let h = TestHarness::new(42).await; - let result = rpc_call(&h.app, "blockNumber", json!([])).await; + let result = rpc_call(&h.app, "eth_blockNumber", json!([])).await; assert_eq!(result, json!("0x2a")); } #[tokio::test] async fn test_chain_id() { let h = TestHarness::new(0).await; - let result = rpc_call(&h.app, "chainId", json!([])).await; + let result = rpc_call(&h.app, "eth_chainId", json!([])).await; let expected = format!("0x{:x}", SignetSystemConstants::test().ru_chain_id()); assert_eq!(result, json!(expected)); } @@ -270,7 +259,7 @@ async fn test_get_block_by_number_hashes() { let h = TestHarness::new(0).await; let (tx_hashes, _) = setup_cold_block(&h).await; - let result = rpc_call(&h.app, "getBlockByNumber", json!(["0x1", false])).await; + let result = rpc_call(&h.app, "eth_getBlockByNumber", json!(["0x1", false])).await; assert_eq!(result["number"], json!("0x1")); let txs = result["transactions"].as_array().unwrap(); @@ -285,7 +274,7 @@ async fn test_get_block_by_number_full() { let h = TestHarness::new(0).await; let (tx_hashes, senders) = setup_cold_block(&h).await; - let result = rpc_call(&h.app, "getBlockByNumber", json!(["0x1", true])).await; + let result = rpc_call(&h.app, "eth_getBlockByNumber", json!(["0x1", true])).await; assert_eq!(result["number"], json!("0x1")); let txs = result["transactions"].as_array().unwrap(); @@ -305,10 +294,10 @@ async fn test_get_block_by_hash() { setup_cold_block(&h).await; // Get the block to learn its hash - let block = rpc_call(&h.app, "getBlockByNumber", json!(["0x1", false])).await; + let block = rpc_call(&h.app, "eth_getBlockByNumber", json!(["0x1", false])).await; let block_hash = block["hash"].as_str().unwrap().to_string(); - let result = rpc_call(&h.app, "getBlockByHash", json!([block_hash, false])).await; + let result = rpc_call(&h.app, "eth_getBlockByHash", json!([block_hash, false])).await; assert_eq!(result["number"], json!("0x1")); assert_eq!(result["hash"], json!(block_hash)); } @@ -318,7 +307,7 @@ async fn test_get_block_tx_count() { let h = TestHarness::new(0).await; setup_cold_block(&h).await; - let result = rpc_call(&h.app, "getBlockTransactionCountByNumber", json!(["0x1"])).await; + let result = rpc_call(&h.app, "eth_getBlockTransactionCountByNumber", json!(["0x1"])).await; assert_eq!(result, json!("0x2")); } @@ -327,7 +316,7 @@ async fn test_get_block_header() { let h = TestHarness::new(0).await; setup_cold_block(&h).await; - let result = rpc_call(&h.app, "getBlockHeaderByNumber", json!(["0x1"])).await; + let result = rpc_call(&h.app, "eth_getBlockHeaderByNumber", json!(["0x1"])).await; assert_eq!(result["number"], json!("0x1")); assert!(result["baseFeePerGas"].is_string()); } @@ -335,7 +324,7 @@ async fn test_get_block_header() { #[tokio::test] async fn test_get_block_not_found() { let h = TestHarness::new(255).await; - let result = rpc_call(&h.app, "getBlockByNumber", json!(["0xff", false])).await; + let result = rpc_call(&h.app, "eth_getBlockByNumber", json!(["0xff", false])).await; assert!(result.is_null()); } @@ -349,7 +338,7 @@ async fn test_get_transaction_by_hash() { let (tx_hashes, senders) = setup_cold_block(&h).await; let result = - rpc_call(&h.app, "getTransactionByHash", json!([format!("{:?}", tx_hashes[0])])).await; + rpc_call(&h.app, "eth_getTransactionByHash", json!([format!("{:?}", tx_hashes[0])])).await; assert_eq!(result["hash"], json!(format!("{:?}", tx_hashes[0]))); assert_eq!(result["from"], json!(format!("{:?}", senders[0]))); @@ -363,7 +352,8 @@ async fn test_get_raw_transaction_by_hash() { let (tx_hashes, _) = setup_cold_block(&h).await; let result = - rpc_call(&h.app, "getRawTransactionByHash", json!([format!("{:?}", tx_hashes[0])])).await; + rpc_call(&h.app, "eth_getRawTransactionByHash", json!([format!("{:?}", tx_hashes[0])])) + .await; // Raw transaction is a hex string let hex = result.as_str().unwrap(); @@ -377,7 +367,7 @@ async fn test_get_tx_by_block_and_index() { let (tx_hashes, senders) = setup_cold_block(&h).await; let result = - rpc_call(&h.app, "getTransactionByBlockNumberAndIndex", json!(["0x1", "0x0"])).await; + rpc_call(&h.app, "eth_getTransactionByBlockNumberAndIndex", json!(["0x1", "0x0"])).await; assert_eq!(result["hash"], json!(format!("{:?}", tx_hashes[0]))); assert_eq!(result["from"], json!(format!("{:?}", senders[0]))); @@ -389,7 +379,7 @@ async fn test_get_transaction_receipt() { let (tx_hashes, senders) = setup_cold_block(&h).await; let result = - rpc_call(&h.app, "getTransactionReceipt", json!([format!("{:?}", tx_hashes[0])])).await; + rpc_call(&h.app, "eth_getTransactionReceipt", json!([format!("{:?}", tx_hashes[0])])).await; assert_eq!(result["transactionHash"], json!(format!("{:?}", tx_hashes[0]))); assert_eq!(result["from"], json!(format!("{:?}", senders[0]))); @@ -403,7 +393,7 @@ async fn test_get_block_receipts() { let h = TestHarness::new(0).await; setup_cold_block(&h).await; - let result = rpc_call(&h.app, "getBlockReceipts", json!(["0x1"])).await; + let result = rpc_call(&h.app, "eth_getBlockReceipts", json!(["0x1"])).await; let receipts = result.as_array().unwrap(); assert_eq!(receipts.len(), 2); @@ -456,7 +446,7 @@ async fn test_get_balance() { h.append_block(block).await; let result = - rpc_call(&h.app, "getBalance", json!([format!("{:?}", TEST_ADDR), "latest"])).await; + rpc_call(&h.app, "eth_getBalance", json!([format!("{:?}", TEST_ADDR), "latest"])).await; // 1 ETH = 10^18 assert_eq!(result, json!("0xde0b6b3a7640000")); @@ -471,7 +461,7 @@ async fn test_get_transaction_count() { h.append_block(block).await; let result = - rpc_call(&h.app, "getTransactionCount", json!([format!("{:?}", TEST_ADDR), "latest"])) + rpc_call(&h.app, "eth_getTransactionCount", json!([format!("{:?}", TEST_ADDR), "latest"])) .await; assert_eq!(result, json!("0x5")); @@ -487,7 +477,8 @@ async fn test_get_storage_at() { let slot = format!("{:#066x}", 42u64); let result = - rpc_call(&h.app, "getStorageAt", json!([format!("{:?}", TEST_ADDR), slot, "latest"])).await; + rpc_call(&h.app, "eth_getStorageAt", json!([format!("{:?}", TEST_ADDR), slot, "latest"])) + .await; // 999 = 0x3e7, padded to 32 bytes let expected = format!("{:#066x}", 999u64); @@ -502,7 +493,8 @@ async fn test_get_code() { let block = make_block(1, vec![], 0); h.append_block(block).await; - let result = rpc_call(&h.app, "getCode", json!([format!("{:?}", TEST_ADDR), "latest"])).await; + let result = + rpc_call(&h.app, "eth_getCode", json!([format!("{:?}", TEST_ADDR), "latest"])).await; assert_eq!(result, json!("0x60006000f3")); } @@ -515,7 +507,8 @@ async fn test_get_balance_unknown_account() { h.append_block(block).await; let unknown = Address::repeat_byte(0xff); - let result = rpc_call(&h.app, "getBalance", json!([format!("{:?}", unknown), "latest"])).await; + let result = + rpc_call(&h.app, "eth_getBalance", json!([format!("{:?}", unknown), "latest"])).await; assert_eq!(result, json!("0x0")); } @@ -535,12 +528,12 @@ async fn test_get_logs_by_block_hash() { h.tags.set_latest(1); // Get the block hash - let block_result = rpc_call(&h.app, "getBlockByNumber", json!(["0x1", false])).await; + let block_result = rpc_call(&h.app, "eth_getBlockByNumber", json!(["0x1", false])).await; let block_hash = block_result["hash"].as_str().unwrap().to_string(); let result = rpc_call( &h.app, - "getLogs", + "eth_getLogs", json!([{ "blockHash": block_hash, "address": format!("{:?}", LOG_ADDR), @@ -567,7 +560,7 @@ async fn test_get_logs_by_range() { let result = rpc_call( &h.app, - "getLogs", + "eth_getLogs", json!([{ "fromBlock": "0x1", "toBlock": "0x1", @@ -592,7 +585,7 @@ async fn test_get_logs_empty() { let result = rpc_call( &h.app, - "getLogs", + "eth_getLogs", json!([{ "fromBlock": "0x1", "toBlock": "0x1", @@ -611,16 +604,16 @@ async fn test_get_logs_empty() { #[tokio::test] async fn test_not_supported() { let h = TestHarness::new(0).await; - let resp = rpc_call_raw(&h.app, "syncing", json!([])).await; + let resp = rpc_call_raw(&h.app, "eth_syncing", json!([])).await; assert!(resp.get("error").is_some()); let msg = resp["error"]["message"].as_str().unwrap(); - assert!(msg.contains("not supported"), "unexpected error: {msg}"); + assert!(msg.contains("not found"), "unexpected error: {msg}"); } #[tokio::test] async fn test_send_raw_tx_no_cache() { let h = TestHarness::new(0).await; - let resp = rpc_call_raw(&h.app, "sendRawTransaction", json!(["0x00"])).await; + let resp = rpc_call_raw(&h.app, "eth_sendRawTransaction", json!(["0x00"])).await; assert!(resp.get("error").is_some()); } @@ -638,7 +631,7 @@ async fn test_gas_price() { h.append_block(block).await; h.tags.set_latest(1); - let result = rpc_call(&h.app, "gasPrice", json!([])).await; + let result = rpc_call(&h.app, "eth_gasPrice", json!([])).await; // tip = gas_price - base_fee = 2e9 - 1e9 = 1e9 // gasPrice = tip + base_fee = 1e9 + 1e9 = 2e9 = 0x77359400 @@ -654,7 +647,7 @@ async fn test_max_priority_fee_per_gas() { h.append_block(block).await; h.tags.set_latest(1); - let result = rpc_call(&h.app, "maxPriorityFeePerGas", json!([])).await; + let result = rpc_call(&h.app, "eth_maxPriorityFeePerGas", json!([])).await; // tip only = gas_price - base_fee = 1e9 = 0x3b9aca00 assert_eq!(result, json!("0x3b9aca00")); @@ -668,7 +661,7 @@ async fn test_gas_price_empty_blocks() { h.append_block(block).await; h.tags.set_latest(1); - let result = rpc_call(&h.app, "gasPrice", json!([])).await; + let result = rpc_call(&h.app, "eth_gasPrice", json!([])).await; // No txs means tip = 0, gasPrice = base_fee = 1e9 = 0x3b9aca00 assert_eq!(result, json!("0x3b9aca00")); @@ -686,7 +679,7 @@ async fn test_fee_history_basic() { h.tags.set_latest(3); // Request 2 blocks of fee history ending at block 3 - let result = rpc_call(&h.app, "feeHistory", json!(["0x2", "0x3", null])).await; + let result = rpc_call(&h.app, "eth_feeHistory", json!(["0x2", "0x3", null])).await; // oldest_block = end_block + 1 - block_count = 3 + 1 - 2 = 2 assert_eq!(result["oldestBlock"], json!("0x2")); @@ -709,7 +702,7 @@ async fn test_fee_history_with_rewards() { h.append_block(block).await; h.tags.set_latest(1); - let result = rpc_call(&h.app, "feeHistory", json!(["0x1", "0x1", [25.0, 75.0]])).await; + let result = rpc_call(&h.app, "eth_feeHistory", json!(["0x1", "0x1", [25.0, 75.0]])).await; assert_eq!(result["oldestBlock"], json!("0x1")); let rewards = result["reward"].as_array().unwrap(); @@ -727,7 +720,7 @@ async fn test_new_block_filter_and_changes() { let h = TestHarness::new(0).await; // Install a block filter at block 0 - let filter_id = rpc_call(&h.app, "newBlockFilter", json!([])).await; + let filter_id = rpc_call(&h.app, "eth_newBlockFilter", json!([])).await; let filter_id_str = filter_id.as_str().unwrap().to_string(); // Append a block @@ -737,13 +730,13 @@ async fn test_new_block_filter_and_changes() { h.tags.set_latest(1); // Poll for changes — should get block hash for block 1 - let changes = rpc_call(&h.app, "getFilterChanges", json!([filter_id_str])).await; + let changes = rpc_call(&h.app, "eth_getFilterChanges", json!([filter_id_str])).await; let hashes = changes.as_array().unwrap(); assert_eq!(hashes.len(), 1); assert!(hashes[0].is_string()); // Poll again with no new blocks — should be empty - let changes = rpc_call(&h.app, "getFilterChanges", json!([filter_id_str])).await; + let changes = rpc_call(&h.app, "eth_getFilterChanges", json!([filter_id_str])).await; let hashes = changes.as_array().unwrap(); assert!(hashes.is_empty()); } @@ -755,7 +748,7 @@ async fn test_new_log_filter_and_changes() { // Install a log filter for LOG_ADDR with LOG_TOPIC let filter_id = rpc_call( &h.app, - "newFilter", + "eth_newFilter", json!([{ "address": format!("{:?}", LOG_ADDR), "topics": [format!("{:?}", LOG_TOPIC)], @@ -771,7 +764,7 @@ async fn test_new_log_filter_and_changes() { h.tags.set_latest(1); // Poll for changes — should get matching logs - let changes = rpc_call(&h.app, "getFilterChanges", json!([filter_id_str])).await; + let changes = rpc_call(&h.app, "eth_getFilterChanges", json!([filter_id_str])).await; let logs = changes.as_array().unwrap(); assert_eq!(logs.len(), 2); assert_eq!(logs[0]["address"], json!(format!("{:?}", LOG_ADDR))); @@ -781,15 +774,15 @@ async fn test_new_log_filter_and_changes() { async fn test_uninstall_filter() { let h = TestHarness::new(0).await; - let filter_id = rpc_call(&h.app, "newBlockFilter", json!([])).await; + let filter_id = rpc_call(&h.app, "eth_newBlockFilter", json!([])).await; let filter_id_str = filter_id.as_str().unwrap().to_string(); // Uninstall - let result = rpc_call(&h.app, "uninstallFilter", json!([filter_id_str])).await; + let result = rpc_call(&h.app, "eth_uninstallFilter", json!([filter_id_str])).await; assert_eq!(result, json!(true)); // Uninstall again — should return false - let result = rpc_call(&h.app, "uninstallFilter", json!([filter_id_str])).await; + let result = rpc_call(&h.app, "eth_uninstallFilter", json!([filter_id_str])).await; assert_eq!(result, json!(false)); } @@ -825,9 +818,9 @@ async fn test_trace_block_by_number_noop() { h.append_block(block).await; h.tags.set_latest(1); - let debug_app = h.debug_app(); let result = - rpc_call(&debug_app, "traceBlockByNumber", json!(["0x1", {"tracer": "noopTracer"}])).await; + rpc_call(&h.app, "debug_traceBlockByNumber", json!(["0x1", {"tracer": "noopTracer"}])) + .await; let traces = result.as_array().unwrap(); assert_eq!(traces.len(), 1); @@ -845,10 +838,9 @@ async fn test_trace_transaction_noop() { h.append_block(block).await; h.tags.set_latest(1); - let debug_app = h.debug_app(); let result = rpc_call( - &debug_app, - "traceTransaction", + &h.app, + "debug_traceTransaction", json!([format!("{:?}", tx_hash), {"tracer": "noopTracer"}]), ) .await; From b40411e62c6fce7fac8a3c0635c7d683d7ecf9d9 Mon Sep 17 00:00:00 2001 From: James Date: Sat, 14 Feb 2026 16:51:08 -0500 Subject: [PATCH 19/31] feat: bump ajj to 0.5.0 with lazy serialization and API migration Bump ajj from 0.3.4 to 0.5.0 and adapt all call sites to the new API: - ResponsePayload::Success/Failure replaced with ResponsePayload(Ok/Err) - Subscription task rewritten to use ctx.notify() instead of raw channel access via the removed notifications() method Leverage relaxed RpcSend bounds for lazy serialization: - LazyReceipts: serializes receipts inline from raw ColdReceipt + RecoveredTx data without intermediate Vec - In-housed BlockTransactions and RpcBlock: serialize full transactions or hashes lazily from Vec, hardcode empty uncles Refactor build_receipt and build_rpc_transaction to take references, cloning only where needed (logs Vec, RecoveredTx for into_inner). Co-Authored-By: Claude Opus 4.6 --- Cargo.toml | 2 +- crates/rpc-storage/FUTURE-EVALUATION.md | 49 ++++-- crates/rpc-storage/src/debug/endpoints.rs | 4 +- crates/rpc-storage/src/eth/endpoints.rs | 164 +++++++++++++++------ crates/rpc-storage/src/eth/helpers.rs | 13 +- crates/rpc-storage/src/interest/subs.rs | 89 +++++------ crates/rpc-storage/src/signet/endpoints.rs | 2 +- 7 files changed, 209 insertions(+), 114 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 65d8e44..79a7097 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,7 +62,7 @@ signet-hot = "0.5.0" signet-storage-types = "0.5.0" # ajj -ajj = { version = "0.3.4" } +ajj = { version = "0.5.0" } # trevm trevm = { version = "0.34.0", features = ["full_env_cfg"] } diff --git a/crates/rpc-storage/FUTURE-EVALUATION.md b/crates/rpc-storage/FUTURE-EVALUATION.md index 4b51828..f1abf26 100644 --- a/crates/rpc-storage/FUTURE-EVALUATION.md +++ b/crates/rpc-storage/FUTURE-EVALUATION.md @@ -1,13 +1,44 @@ # Future Evaluation Notes -## Vec Collection at API Boundary +## Lazy Serialization (ajj 0.5.0) -Several endpoints (`eth_getBlockReceipts`, `debug_traceBlockByNumber`, etc.) -collect results into a `Vec` before returning. This is required because -`ajj::ResponsePayload` expects an owned `Serialize` value — there is no way -to feed items to the serializer via an iterator or streaming interface. +ajj 0.5.0 relaxed `RpcSend` bounds so that custom `Serialize` impls can be +returned from handlers without collecting into a `Vec`. Two endpoints now use +this: -If `ajj` adds support for streaming serialization (e.g. accepting an -`Iterator` or a `Stream`), these endpoints could be -refactored to avoid the intermediate allocation. Until then, the `Vec` -collection is the necessary approach at the API boundary. +- **`eth_getBlockReceipts`** — `LazyReceipts` serializes receipts inline from + raw `ColdReceipt` + `RecoveredTx` data without an intermediate + `Vec`. +- **`eth_getBlockByHash` / `eth_getBlockByNumber`** — in-housed + `BlockTransactions` and `RpcBlock` types serialize full transactions or + hashes lazily from `Vec`. + +## Endpoints that cannot benefit + +- **`debug_traceBlockByNumber`** — EVM state is sequential and destructively + consumed per transaction. Computation must be eager; the `Vec` + confirms all traces succeed before returning. Converting `DebugError` to + `serde::ser::Error` would lose variant information. +- **`eth_feeHistory`** — Vecs feed into `FeeHistory` (alloy type) which uses + `alloy_serde::quantity` custom serializers. In-housing is moderate effort + for low payoff. +- **`eth_getLogs`** — `Vec` comes directly from cold storage with no + transformation at the API boundary. +- **Other sites** — Vecs are needed for sorting (`calculate_reward_percentiles`, + `suggest_tip_cap`), poll buffering (`FilterOutput`), or feed into alloy + types we don't control. + +## Channel-based cold storage streaming + +If cold storage returned `tokio::mpsc::Receiver` instead of `Vec`, lazy +async serialization is blocked by two constraints: + +1. `serde::Serialize` is synchronous — there is no way to `.await` a channel + receiver inside `serialize()`. +2. ajj 0.5.0 buffers the entire response via `serde_json::to_raw_value` before + sending over HTTP or WebSocket. There is no chunked or streaming response + support. + +To unlock channel-based streaming, ajj would need a streaming response API +(e.g. `AsyncSerialize` or HTTP chunked encoding with per-item flushing). Until +then, handlers must drain receivers before returning. diff --git a/crates/rpc-storage/src/debug/endpoints.rs b/crates/rpc-storage/src/debug/endpoints.rs index 33be233..c3d1e24 100644 --- a/crates/rpc-storage/src/debug/endpoints.rs +++ b/crates/rpc-storage/src/debug/endpoints.rs @@ -112,7 +112,7 @@ where tracing::debug!(tx_index = idx, tx_hash = ?tx.tx_hash(), "Traced transaction"); } - ResponsePayload::Success(frames) + ResponsePayload(Ok(frames)) } .instrument(span); @@ -204,7 +204,7 @@ where let res = response_tri!(crate::debug::tracer::trace(trevm, &opts, tx_info)).0; - ResponsePayload::Success(res) + ResponsePayload(Ok(res)) } .instrument(span); diff --git a/crates/rpc-storage/src/eth/endpoints.rs b/crates/rpc-storage/src/eth/endpoints.rs index 72ba2f0..0ae29ba 100644 --- a/crates/rpc-storage/src/eth/endpoints.rs +++ b/crates/rpc-storage/src/eth/endpoints.rs @@ -2,10 +2,13 @@ use crate::{ config::{EvmBlockContext, StorageRpcCtx, gas_oracle}, - eth::helpers::{ - AddrWithBlock, BlockParams, CfgFiller, FeeHistoryArgs, StorageAtArgs, SubscribeArgs, - TxParams, await_handler, build_receipt, build_rpc_transaction, hot_reader_at_block, - normalize_gas_stateless, response_tri, + eth::{ + error::CallErrorData, + helpers::{ + AddrWithBlock, BlockParams, CfgFiller, FeeHistoryArgs, StorageAtArgs, SubscribeArgs, + TxParams, await_handler, build_receipt, build_rpc_transaction, hot_reader_at_block, + normalize_gas_stateless, response_tri, + }, }, interest::{FilterOutput, InterestKind}, }; @@ -20,27 +23,91 @@ use alloy::{ }, network::{Ethereum, Network}, primitives::{B256, U64, U256}, - rpc::types::{Block, BlockTransactions, FeeHistory, Filter, Log}, + rpc::types::{FeeHistory, Filter, Log}, +}; +use revm_inspectors::access_list::AccessListInspector; +use serde::{Serialize, Serializer, ser::SerializeSeq}; +use signet_cold::{ColdReceipt, HeaderSpecifier, ReceiptSpecifier}; +use signet_hot::{HistoryRead, HotKv, db::HotDbRead, model::HotKvRead}; +use tracing::{Instrument, debug, trace_span}; +use trevm::{ + EstimationResult, revm::context::result::ExecutionResult, revm::database::DBErrorMarker, }; -/// RPC block type for the Ethereum network. -type RpcBlock = ::BlockResponse; /// RPC header type for the Ethereum network. type RpcHeader = ::HeaderResponse; /// RPC transaction type for the Ethereum network. type RpcTransaction = ::TransactionResponse; /// RPC receipt type for the Ethereum network. type RpcReceipt = ::ReceiptResponse; -use revm_inspectors::access_list::AccessListInspector; -use signet_cold::{HeaderSpecifier, ReceiptSpecifier}; -use signet_hot::model::HotKvRead; -use signet_hot::{HistoryRead, HotKv, db::HotDbRead}; -use tracing::{Instrument, debug, trace_span}; -use trevm::{ - EstimationResult, revm::context::result::ExecutionResult, revm::database::DBErrorMarker, -}; -use super::error::CallErrorData; +// --------------------------------------------------------------------------- +// Lazy serialization types +// --------------------------------------------------------------------------- + +/// Serializes as an empty JSON array `[]` without allocating. +pub(crate) struct EmptyArray; + +impl Serialize for EmptyArray { + fn serialize(&self, serializer: S) -> Result { + serializer.serialize_seq(Some(0))?.end() + } +} + +/// Block transactions with lazy serialization. +/// +/// In both variants the raw [`RecoveredTx`] list is kept and transformed +/// during serialization — either to full RPC transaction objects or to bare +/// hashes — avoiding an intermediate `Vec` allocation. +pub(crate) enum BlockTransactions { + Full { + txs: Vec, + block_num: u64, + block_hash: B256, + base_fee: Option, + }, + Hashes(Vec), +} + +impl Serialize for BlockTransactions { + fn serialize(&self, serializer: S) -> Result { + match self { + Self::Full { txs, block_num, block_hash, base_fee } => { + let mut seq = serializer.serialize_seq(Some(txs.len()))?; + for (i, tx) in txs.iter().enumerate() { + let meta = signet_storage_types::ConfirmationMeta::new( + *block_num, + *block_hash, + i as u64, + ); + seq.serialize_element(&build_rpc_transaction(tx, &meta, *base_fee))?; + } + seq.end() + } + Self::Hashes(txs) => { + let mut seq = serializer.serialize_seq(Some(txs.len()))?; + for tx in txs { + seq.serialize_element(tx.tx_hash())?; + } + seq.end() + } + } + } +} + +/// RPC block response with lazy transaction serialization. +/// +/// Replaces the alloy `Block` type so that transactions are serialized +/// inline from raw storage data. Signet has no uncles or withdrawals, so +/// those are hardcoded as empty/absent to avoid allocations. +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct RpcBlock { + #[serde(flatten)] + header: alloy::rpc::types::Header, + transactions: BlockTransactions, + uncles: EmptyArray, +} // --------------------------------------------------------------------------- // Not Supported @@ -319,23 +386,12 @@ where let base_fee = header.base_fee_per_gas; let transactions = if full { - let rpc_txs: Vec<_> = txs - .into_iter() - .enumerate() - .map(|(i, tx)| { - let meta = signet_storage_types::ConfirmationMeta::new( - block_num, block_hash, i as u64, - ); - build_rpc_transaction(tx, &meta, base_fee) - }) - .collect(); - BlockTransactions::Full(rpc_txs) + BlockTransactions::Full { txs, block_num, block_hash, base_fee } } else { - let hashes: Vec = txs.iter().map(|tx| *tx.tx_hash()).collect(); - BlockTransactions::Hashes(hashes) + BlockTransactions::Hashes(txs) }; - Ok(Some(Block { + Ok(Some(RpcBlock { header: alloy::rpc::types::Header { inner: header.into_inner(), hash: block_hash, @@ -343,8 +399,7 @@ where size: None, }, transactions, - uncles: vec![], - withdrawals: None, + uncles: EmptyArray, })) }; @@ -377,12 +432,30 @@ where await_handler!(@option hctx.spawn_blocking(task)) } +/// Lazily serialized receipt list. Each receipt is built and serialized +/// inline without allocating an intermediate `Vec`. +pub(crate) struct LazyReceipts { + txs: Vec, + receipts: Vec, + base_fee: Option, +} + +impl Serialize for LazyReceipts { + fn serialize(&self, serializer: S) -> Result { + let mut seq = serializer.serialize_seq(Some(self.txs.len()))?; + for (tx, cr) in self.txs.iter().zip(&self.receipts) { + seq.serialize_element(&build_receipt(cr, tx, self.base_fee))?; + } + seq.end() + } +} + /// `eth_getBlockReceipts` — all receipts in a block. pub(crate) async fn block_receipts( hctx: HandlerCtx, (id,): (BlockId,), ctx: StorageRpcCtx, -) -> Result>, String> +) -> Result, String> where H: HotKv + Send + Sync + 'static, ::Error: DBErrorMarker, @@ -404,10 +477,7 @@ where let base_fee = header.base_fee_per_gas; - let rpc_receipts = - txs.iter().zip(receipts).map(|(tx, cr)| build_receipt(cr, tx, base_fee)).collect(); - - Ok(Some(rpc_receipts)) + Ok(Some(LazyReceipts { txs, receipts, base_fee })) }; await_handler!(@option hctx.spawn_blocking(task)) @@ -472,7 +542,7 @@ where cold.get_header_by_number(meta.block_number()).await.map_err(|e| e.to_string())?; let base_fee = header.and_then(|h| h.base_fee_per_gas); - Ok(Some(build_rpc_transaction(tx, &meta, base_fee))) + Ok(Some(build_rpc_transaction(&tx, &meta, base_fee))) }; await_handler!(@option hctx.spawn_blocking(task)) @@ -529,7 +599,7 @@ where cold.get_header_by_number(meta.block_number()).await.map_err(|e| e.to_string())?; let base_fee = header.and_then(|h| h.base_fee_per_gas); - Ok(Some(build_rpc_transaction(tx, &meta, base_fee))) + Ok(Some(build_rpc_transaction(&tx, &meta, base_fee))) }; await_handler!(@option hctx.spawn_blocking(task)) @@ -590,7 +660,7 @@ where let tx = tx.ok_or("receipt found but transaction missing")?.into_inner(); let base_fee = header.and_then(|h| h.base_fee_per_gas); - Ok(Some(build_receipt(cr, &tx, base_fee))) + Ok(Some(build_receipt(&cr, &tx, base_fee))) }; await_handler!(@option hctx.spawn_blocking(task)) @@ -741,7 +811,7 @@ where } let result = response_tri!(trevm.call().map_err(signet_evm::EvmErrored::into_error)); - ResponsePayload::Success(result.0) + ResponsePayload(Ok(result.0)) } .instrument(span); @@ -765,14 +835,14 @@ where normalize_gas_stateless(&mut params.0, max_gas); await_handler!(@response_option hctx.spawn_with_ctx(|hctx| async move { - let res = match run_call(hctx, params, ctx).await { - ResponsePayload::Success(res) => res, - ResponsePayload::Failure(err) => return ResponsePayload::Failure(err), + let res = match run_call(hctx, params, ctx).await.0 { + Ok(res) => res, + Err(err) => return ResponsePayload(Err(err)), }; match res { ExecutionResult::Success { output, .. } => { - ResponsePayload::Success(output.data().clone()) + ResponsePayload(Ok(output.data().clone())) } ExecutionResult::Revert { output, .. } => { ResponsePayload::internal_error_with_message_and_obj( @@ -821,7 +891,7 @@ where response_tri!(trevm.estimate_gas().map_err(signet_evm::EvmErrored::into_error)); match estimate { - EstimationResult::Success { limit, .. } => ResponsePayload::Success(U64::from(limit)), + EstimationResult::Success { limit, .. } => ResponsePayload(Ok(U64::from(limit))), EstimationResult::Revert { reason, .. } => { ResponsePayload::internal_error_with_message_and_obj( "execution reverted".into(), @@ -882,7 +952,7 @@ where let access_list = inspector.into_access_list(); - ResponsePayload::Success(AccessListResult { access_list, gas_used, error }) + ResponsePayload(Ok(AccessListResult { access_list, gas_used, error })) } .instrument(span); diff --git a/crates/rpc-storage/src/eth/helpers.rs b/crates/rpc-storage/src/eth/helpers.rs index 756b784..f25667d 100644 --- a/crates/rpc-storage/src/eth/helpers.rs +++ b/crates/rpc-storage/src/eth/helpers.rs @@ -161,12 +161,12 @@ impl trevm::Cfg for CfgFiller { /// Build an [`alloy::rpc::types::Transaction`] from cold storage types. pub(crate) fn build_rpc_transaction( - tx: signet_storage_types::RecoveredTx, + tx: &signet_storage_types::RecoveredTx, meta: &ConfirmationMeta, base_fee: Option, ) -> RpcTransaction { let signer = tx.signer(); - let tx_envelope: alloy::consensus::TxEnvelope = tx.into_inner().into(); + let tx_envelope: alloy::consensus::TxEnvelope = tx.clone().into_inner().into(); let inner = Recovered::new_unchecked(tx_envelope, signer); let egp = base_fee @@ -187,7 +187,7 @@ pub(crate) fn build_rpc_transaction( /// The transaction is needed for `to`, `contract_address`, and /// `effective_gas_price` which are not stored on the receipt. pub(crate) fn build_receipt( - cr: ColdReceipt, + cr: &ColdReceipt, tx: &signet_storage_types::RecoveredTx, base_fee: Option, ) -> RpcReceipt { @@ -195,8 +195,11 @@ pub(crate) fn build_receipt( let status = cr.receipt.status; let cumulative_gas_used = cr.receipt.cumulative_gas_used; - let rpc_receipt = - alloy::rpc::types::eth::Receipt { status, cumulative_gas_used, logs: cr.receipt.logs }; + let rpc_receipt = alloy::rpc::types::eth::Receipt { + status, + cumulative_gas_used, + logs: cr.receipt.logs.clone(), + }; let (contract_address, to) = match tx.kind() { TxKind::Create => (Some(cr.from.create(tx.nonce())), None), diff --git a/crates/rpc-storage/src/interest/subs.rs b/crates/rpc-storage/src/interest/subs.rs index 8fb7b04..7bfdb97 100644 --- a/crates/rpc-storage/src/interest/subs.rs +++ b/crates/rpc-storage/src/interest/subs.rs @@ -1,13 +1,11 @@ //! Subscription management for `eth_subscribe` / `eth_unsubscribe`. use crate::interest::{InterestKind, NewBlockNotification}; -use ajj::{HandlerCtx, serde_json}; +use ajj::HandlerCtx; use alloy::{primitives::U64, rpc::types::Log}; use dashmap::DashMap; use std::{ - cmp::min, collections::VecDeque, - future::pending, sync::{ Arc, Weak, atomic::{AtomicU64, Ordering}, @@ -16,7 +14,7 @@ use std::{ }; use tokio::sync::broadcast::{self, error::RecvError}; use tokio_util::sync::{CancellationToken, WaitForCancellationFutureOwned}; -use tracing::{Instrument, debug, debug_span, enabled, trace}; +use tracing::{debug, debug_span, enabled, trace}; /// Either type for subscription outputs. #[derive(Debug, Clone, PartialEq, Eq, serde::Serialize)] @@ -206,7 +204,9 @@ impl SubscriptionTask { async fn task_future(self, ajj_ctx: HandlerCtx, ajj_cancel: WaitForCancellationFutureOwned) { let SubscriptionTask { id, filter, token, mut notifs } = self; - let Some(sender) = ajj_ctx.notifications() else { return }; + if !ajj_ctx.notifications_enabled() { + return; + } let mut notif_buffer = filter.empty_sub_buffer(); tokio::pin!(ajj_cancel); @@ -217,76 +217,67 @@ impl SubscriptionTask { span.record("filter", format!("{filter:?}")); } - let guard = span.enter(); - - let permit_fut = async { - if !notif_buffer.is_empty() { - sender.reserve_many(min(sender.max_capacity() / 2, notif_buffer.len())).await - } else { - pending().await + // Drain one buffered item per iteration, checking for + // cancellation between each send. + if let Some(item) = notif_buffer.pop_front() { + let notification = ajj::serde_json::json!({ + "jsonrpc": "2.0", + "method": "eth_subscription", + "params": { + "result": &item, + "subscription": id + }, + }); + + let _guard = span.enter(); + tokio::select! { + biased; + _ = &mut ajj_cancel => { + trace!("subscription cancelled by client disconnect"); + token.cancel(); + break; + } + _ = token.cancelled() => { + trace!("subscription cancelled by user"); + break; + } + result = ajj_ctx.notify(¬ification) => { + if result.is_err() { + trace!("channel to client closed"); + break; + } + } } + continue; } - .in_current_span(); - drop(guard); + // Buffer empty — wait for incoming broadcast notifications. + let _guard = span.enter(); tokio::select! { biased; _ = &mut ajj_cancel => { - let _guard = span.enter(); trace!("subscription cancelled by client disconnect"); token.cancel(); break; } _ = token.cancelled() => { - let _guard = span.enter(); trace!("subscription cancelled by user"); break; } - permits = permit_fut => { - let _guard = span.enter(); - let Ok(permits) = permits else { - trace!("channel to client closed"); - break - }; - - for permit in permits { - let Some(item) = notif_buffer.pop_front() else { - break; - }; - let notification = ajj::serde_json::json!{ - { - "jsonrpc": "2.0", - "method": "eth_subscription", - "params": { - "result": &item, - "subscription": id - }, - } - }; - let Ok(brv) = serde_json::value::to_raw_value(¬ification) else { - trace!(?item, "failed to serialize notification"); - continue - }; - permit.send(brv); - } - } notif_res = notifs.recv() => { - let _guard = span.enter(); - let notif = match notif_res { Ok(notif) => notif, Err(RecvError::Lagged(skipped)) => { trace!(skipped, "missed notifications"); continue; - }, - Err(e) =>{ + } + Err(e) => { trace!(?e, "notification stream closed"); break; } }; let output = filter.filter_notification_for_sub(¬if); - trace!(count = output.len(), "Filter applied to notification"); if !output.is_empty() { notif_buffer.extend(output); diff --git a/crates/rpc-storage/src/signet/endpoints.rs b/crates/rpc-storage/src/signet/endpoints.rs index 92e5102..5776522 100644 --- a/crates/rpc-storage/src/signet/endpoints.rs +++ b/crates/rpc-storage/src/signet/endpoints.rs @@ -75,7 +75,7 @@ where SignetError::Evm(e.to_string()) })); - ResponsePayload::Success(driver.into_response()) + ResponsePayload(Ok(driver.into_response())) }; let task = async move { From 9be14ceda3e389708666040e23337c21cea7ccd3 Mon Sep 17 00:00:00 2001 From: James Date: Sat, 14 Feb 2026 17:00:55 -0500 Subject: [PATCH 20/31] fix: migrate signet-rpc crate to ajj 0.5.0 API Apply the same ajj 0.5.0 migration to the canonical rpc crate: - ResponsePayload::Success/Failure replaced with ResponsePayload(Ok/Err) - Subscription task rewritten to use ctx.notify() instead of the removed notifications() channel accessor Co-Authored-By: Claude Opus 4.6 --- crates/rpc/src/debug/endpoints.rs | 4 +- crates/rpc/src/eth/endpoints.rs | 12 +-- crates/rpc/src/inspect/endpoints.rs | 2 +- crates/rpc/src/interest/subs.rs | 111 ++++++++++------------------ crates/rpc/src/signet/endpoints.rs | 2 +- 5 files changed, 50 insertions(+), 81 deletions(-) diff --git a/crates/rpc/src/debug/endpoints.rs b/crates/rpc/src/debug/endpoints.rs index 4933705..293c0ba 100644 --- a/crates/rpc/src/debug/endpoints.rs +++ b/crates/rpc/src/debug/endpoints.rs @@ -90,7 +90,7 @@ where tracing::debug!(tx_index = idx, tx_hash = ?tx.hash(), "Traced transaction"); } - ResponsePayload::Success(frames) + ResponsePayload(Ok(frames)) } .instrument(span); @@ -163,7 +163,7 @@ where let res = response_tri!(crate::debug::tracer::trace(trevm, &opts, tx_info)).0; - ResponsePayload::Success(res) + ResponsePayload(Ok(res)) } .instrument(span); diff --git a/crates/rpc/src/eth/endpoints.rs b/crates/rpc/src/eth/endpoints.rs index 7574594..da34a30 100644 --- a/crates/rpc/src/eth/endpoints.rs +++ b/crates/rpc/src/eth/endpoints.rs @@ -477,7 +477,7 @@ where let execution_result = response_tri!(trevm.call().map_err(EvmErrored::into_error)).0; - ResponsePayload::Success(execution_result) + ResponsePayload(Ok(execution_result)) } .instrument(span); @@ -501,14 +501,14 @@ where normalize_gas_stateless(&mut params.0, max_gas); await_handler!(@response_option hctx.spawn_with_ctx(|hctx| async move { - let res = match run_call(hctx, params, ctx).await { - ResponsePayload::Success(res) => res, - ResponsePayload::Failure(err) => return ResponsePayload::Failure(err), + let res = match run_call(hctx, params, ctx).await.0 { + Ok(res) => res, + Err(err) => return ResponsePayload(Err(err)), }; match res { ExecutionResult::Success { output, .. } => { - ResponsePayload::Success(output.data().clone()) + ResponsePayload(Ok(output.data().clone())) } ExecutionResult::Revert { output, .. } => { ResponsePayload::internal_error_with_message_and_obj( @@ -578,7 +578,7 @@ where let (estimate, _) = response_tri!(trevm.estimate_gas().map_err(EvmErrored::into_error)); match estimate { - EstimationResult::Success { limit, .. } => ResponsePayload::Success(U64::from(limit)), + EstimationResult::Success { limit, .. } => ResponsePayload(Ok(U64::from(limit))), EstimationResult::Revert { reason, .. } => { ResponsePayload::internal_error_with_message_and_obj( "execution reverted".into(), diff --git a/crates/rpc/src/inspect/endpoints.rs b/crates/rpc/src/inspect/endpoints.rs index e5155ff..e8f4c6d 100644 --- a/crates/rpc/src/inspect/endpoints.rs +++ b/crates/rpc/src/inspect/endpoints.rs @@ -30,7 +30,7 @@ where ); }; - ResponsePayload::Success(output) + ResponsePayload(Ok(output)) }; await_handler!(@response_option hctx.spawn_blocking(task)) diff --git a/crates/rpc/src/interest/subs.rs b/crates/rpc/src/interest/subs.rs index 36583d9..035c754 100644 --- a/crates/rpc/src/interest/subs.rs +++ b/crates/rpc/src/interest/subs.rs @@ -1,5 +1,5 @@ use crate::interest::InterestKind; -use ajj::{HandlerCtx, serde_json}; +use ajj::HandlerCtx; use alloy::{primitives::U64, rpc::types::Log}; use dashmap::DashMap; use reth::{ @@ -8,9 +8,7 @@ use reth::{ }; use signet_node_types::Pnt; use std::{ - cmp::min, collections::VecDeque, - future::pending, sync::{ Arc, Weak, atomic::{AtomicU64, Ordering}, @@ -19,7 +17,7 @@ use std::{ }; use tokio::sync::broadcast::error::RecvError; use tokio_util::sync::{CancellationToken, WaitForCancellationFutureOwned}; -use tracing::{Instrument, debug, debug_span, enabled, trace}; +use tracing::{debug, debug_span, enabled, trace}; /// Either type for subscription outputs. #[derive(Debug, Clone, PartialEq, Eq, serde::Serialize)] @@ -211,9 +209,10 @@ impl SubscriptionTask { ) { let SubscriptionTask { id, filter, token, mut notifs } = self; - let Some(sender) = ajj_ctx.notifications() else { return }; + if !ajj_ctx.notifications_enabled() { + return; + } - // Buffer for notifications to be sent to the client let mut notif_buffer = filter.empty_sub_buffer(); tokio::pin!(ajj_cancel); @@ -223,99 +222,69 @@ impl SubscriptionTask { span.record("filter", format!("{filter:?}")); } - let guard = span.enter(); - // This future checks if the notification buffer is non-empty and - // waits for the sender to have some capacity before sending. - let permit_fut = async { - if !notif_buffer.is_empty() { - // NB: we reserve half the capacity to avoid blocking other - // usage. This is a heuristic and can be adjusted as needed. - sender.reserve_many(min(sender.max_capacity() / 2, notif_buffer.len())).await - } else { - // If the notification buffer is empty, just never return - pending().await + // Drain one buffered item per iteration, checking for + // cancellation between each send. + if let Some(item) = notif_buffer.pop_front() { + let notification = ajj::serde_json::json!({ + "jsonrpc": "2.0", + "method": "eth_subscription", + "params": { + "result": &item, + "subscription": id + }, + }); + + let _guard = span.enter(); + tokio::select! { + biased; + _ = &mut ajj_cancel => { + trace!("subscription cancelled by client disconnect"); + token.cancel(); + break; + } + _ = token.cancelled() => { + trace!("subscription cancelled by user"); + break; + } + result = ajj_ctx.notify(¬ification) => { + if result.is_err() { + trace!("channel to client closed"); + break; + } + } } + continue; } - .in_current_span(); - drop(guard); - // NB: this select is biased, this ensures that the outbound - // buffer is either drained, or blocked on permits before checking - // the inbound buffer + // Buffer empty — wait for incoming notifications. + let _guard = span.enter(); tokio::select! { biased; _ = &mut ajj_cancel => { - let _guard = span.enter(); - // if AJJ cancelled us via client disconnect, we should - // cancel the token so that we can be reaped by the - // subscription cleaner task. trace!("subscription cancelled by client disconnect"); token.cancel(); break; } _ = token.cancelled() => { - // If the token is cancelled, this subscription has been - // cancelled by eth_unsubscribe - let _guard = span.enter(); trace!("subscription cancelled by user"); break; } - permits = permit_fut => { - let _guard = span.enter(); - // channel closed - let Ok(permits) = permits else { - trace!("channel to client closed"); - break - }; - - for permit in permits { - // Send notification to the client for each permit. - let Some(item) = notif_buffer.pop_front() else { - // if we run out of notifications, we should break - // This would be weird, as we only allocated - // permits for notifications we had. Let's handle it anyway. - break; - }; - let notification = ajj::serde_json::json!{ - { - "jsonrpc": "2.0", - "method": "eth_subscription", - "params": { - "result": &item, - "subscription": id - }, - } - }; - // Serialize and send. - let Ok(brv) = serde_json::value::to_raw_value(¬ification) else { - trace!(?item, "failed to serialize notification"); - continue - }; - permit.send(brv); - } - } notif_res = notifs.recv() => { - let _guard = span.enter(); - let notif = match notif_res { Ok(notif) => notif, Err(RecvError::Lagged(skipped)) => { trace!(skipped, "missed notifications"); continue; - }, - Err(e) =>{ + } + Err(e) => { trace!(?e, "CanonStateNotifications stream closed"); break; } }; let output = filter.filter_notification_for_sub(¬if); - trace!(count = output.len(), "Filter applied to notification"); if !output.is_empty() { - // NB: this will panic if the filter type changes - // mid-task. But that should never happen as it would - // break API guarantees anyway notif_buffer.extend(output); } } diff --git a/crates/rpc/src/signet/endpoints.rs b/crates/rpc/src/signet/endpoints.rs index 4abda48..10ed087 100644 --- a/crates/rpc/src/signet/endpoints.rs +++ b/crates/rpc/src/signet/endpoints.rs @@ -62,7 +62,7 @@ where response_tri!(trevm.drive_bundle(&mut driver).map_err(|e| e.into_error())); - ResponsePayload::Success(driver.into_response()) + ResponsePayload(Ok(driver.into_response())) }; let task = async move { From 2dcab79d4a9d1e73209b24833eab8355bf3ae9ef Mon Sep 17 00:00:00 2001 From: James Date: Sat, 14 Feb 2026 18:23:27 -0500 Subject: [PATCH 21/31] refactor: replace json! macro with typed subscription notification struct Add SubscriptionNotification and SubscriptionParams structs with derived Serialize impls, replacing hand-crafted json! macro usage in both rpc and rpc-storage subscription tasks. Co-Authored-By: Claude Opus 4.6 --- crates/rpc-storage/src/interest/subs.rs | 28 ++++++++++++++++++------- crates/rpc/src/interest/subs.rs | 28 ++++++++++++++++++------- 2 files changed, 40 insertions(+), 16 deletions(-) diff --git a/crates/rpc-storage/src/interest/subs.rs b/crates/rpc-storage/src/interest/subs.rs index 7bfdb97..74d5641 100644 --- a/crates/rpc-storage/src/interest/subs.rs +++ b/crates/rpc-storage/src/interest/subs.rs @@ -26,6 +26,21 @@ pub(crate) enum Either { Block(Box), } +/// JSON-RPC subscription notification envelope. +#[derive(serde::Serialize)] +struct SubscriptionNotification<'a> { + jsonrpc: &'static str, + method: &'static str, + params: SubscriptionParams<'a>, +} + +/// Params field of a subscription notification. +#[derive(serde::Serialize)] +struct SubscriptionParams<'a> { + result: &'a Either, + subscription: U64, +} + /// Buffer for subscription outputs. #[derive(Debug, Clone, PartialEq, Eq)] pub(crate) enum SubscriptionBuffer { @@ -220,14 +235,11 @@ impl SubscriptionTask { // Drain one buffered item per iteration, checking for // cancellation between each send. if let Some(item) = notif_buffer.pop_front() { - let notification = ajj::serde_json::json!({ - "jsonrpc": "2.0", - "method": "eth_subscription", - "params": { - "result": &item, - "subscription": id - }, - }); + let notification = SubscriptionNotification { + jsonrpc: "2.0", + method: "eth_subscription", + params: SubscriptionParams { result: &item, subscription: id }, + }; let _guard = span.enter(); tokio::select! { diff --git a/crates/rpc/src/interest/subs.rs b/crates/rpc/src/interest/subs.rs index 035c754..e93e676 100644 --- a/crates/rpc/src/interest/subs.rs +++ b/crates/rpc/src/interest/subs.rs @@ -27,6 +27,21 @@ pub enum Either { Block(Box
), } +/// JSON-RPC subscription notification envelope. +#[derive(serde::Serialize)] +struct SubscriptionNotification<'a> { + jsonrpc: &'static str, + method: &'static str, + params: SubscriptionParams<'a>, +} + +/// Params field of a subscription notification. +#[derive(serde::Serialize)] +struct SubscriptionParams<'a> { + result: &'a Either, + subscription: U64, +} + /// Buffer for subscription outputs. #[derive(Debug, Clone, PartialEq, Eq)] pub enum SubscriptionBuffer { @@ -225,14 +240,11 @@ impl SubscriptionTask { // Drain one buffered item per iteration, checking for // cancellation between each send. if let Some(item) = notif_buffer.pop_front() { - let notification = ajj::serde_json::json!({ - "jsonrpc": "2.0", - "method": "eth_subscription", - "params": { - "result": &item, - "subscription": id - }, - }); + let notification = SubscriptionNotification { + jsonrpc: "2.0", + method: "eth_subscription", + params: SubscriptionParams { result: &item, subscription: id }, + }; let _guard = span.enter(); tokio::select! { From 0d178054820346aaeb3e86f3767bad354f8ebacc Mon Sep 17 00:00:00 2001 From: James Date: Sat, 14 Feb 2026 18:54:14 -0500 Subject: [PATCH 22/31] chore: bump ajj to 0.6.0 Co-Authored-By: Claude Opus 4.6 --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 79a7097..5475376 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,7 +62,7 @@ signet-hot = "0.5.0" signet-storage-types = "0.5.0" # ajj -ajj = { version = "0.5.0" } +ajj = { version = "0.6.0" } # trevm trevm = { version = "0.34.0", features = ["full_env_cfg"] } From 4eb1b0a25e91513b03e20e1795f4ff6bef1c1397 Mon Sep 17 00:00:00 2001 From: James Date: Sat, 14 Feb 2026 19:17:37 -0500 Subject: [PATCH 23/31] fix: suppress dead_code warning in test harness and fix doc link Co-Authored-By: Claude Opus 4.6 --- crates/rpc-storage/src/eth/endpoints.rs | 2 +- crates/rpc-storage/tests/eth_rpc.rs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/crates/rpc-storage/src/eth/endpoints.rs b/crates/rpc-storage/src/eth/endpoints.rs index 0ae29ba..bf5b5c4 100644 --- a/crates/rpc-storage/src/eth/endpoints.rs +++ b/crates/rpc-storage/src/eth/endpoints.rs @@ -56,7 +56,7 @@ impl Serialize for EmptyArray { /// Block transactions with lazy serialization. /// -/// In both variants the raw [`RecoveredTx`] list is kept and transformed +/// In both variants the raw `RecoveredTx` list is kept and transformed /// during serialization — either to full RPC transaction objects or to bare /// hashes — avoiding an intermediate `Vec` allocation. pub(crate) enum BlockTransactions { diff --git a/crates/rpc-storage/tests/eth_rpc.rs b/crates/rpc-storage/tests/eth_rpc.rs index 01c742f..e9557cf 100644 --- a/crates/rpc-storage/tests/eth_rpc.rs +++ b/crates/rpc-storage/tests/eth_rpc.rs @@ -35,6 +35,7 @@ struct TestHarness { tags: BlockTags, #[allow(dead_code)] notif_tx: broadcast::Sender, + #[allow(dead_code)] ctx: StorageRpcCtx, _cancel: CancellationToken, } From a819a3f385cbd5d2f4356823701603c1fc2c932e Mon Sep 17 00:00:00 2001 From: James Date: Sun, 15 Feb 2026 11:27:44 -0500 Subject: [PATCH 24/31] feat: use cold storage log streaming for eth_getLogs and eth_getFilterChanges Replace `cold.get_logs()` with `cold.stream_logs()` in both endpoints to gain deadline enforcement (configurable, default 10s), dedicated concurrency control (separate semaphore, max 8 concurrent streams), and reorg detection via anchor hash. Bump signet-storage crates to 0.6.0. Co-Authored-By: Claude Opus 4.6 --- Cargo.toml | 8 ++--- crates/rpc-storage/Cargo.toml | 1 + crates/rpc-storage/FUTURE-EVALUATION.md | 32 ++++++++++++++------ crates/rpc-storage/src/config/rpc_config.rs | 9 ++++++ crates/rpc-storage/src/eth/endpoints.rs | 33 +++++++++++++++++++-- 5 files changed, 67 insertions(+), 16 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 5475376..87a8610 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -56,10 +56,10 @@ signet-tx-cache = "0.16.0-rc.8" signet-types = "0.16.0-rc.8" signet-zenith = "0.16.0-rc.8" signet-journal = "0.16.0-rc.8" -signet-storage = "0.5.0" -signet-cold = "0.5.0" -signet-hot = "0.5.0" -signet-storage-types = "0.5.0" +signet-storage = "0.6.0" +signet-cold = "0.6.0" +signet-hot = "0.6.0" +signet-storage-types = "0.6.0" # ajj ajj = { version = "0.6.0" } diff --git a/crates/rpc-storage/Cargo.toml b/crates/rpc-storage/Cargo.toml index 4a1fe01..f9982a0 100644 --- a/crates/rpc-storage/Cargo.toml +++ b/crates/rpc-storage/Cargo.toml @@ -22,6 +22,7 @@ signet-bundle.workspace = true alloy.workspace = true ajj.workspace = true tokio.workspace = true +tokio-stream = "0.1" tokio-util = "0.7" tracing.workspace = true thiserror.workspace = true diff --git a/crates/rpc-storage/FUTURE-EVALUATION.md b/crates/rpc-storage/FUTURE-EVALUATION.md index f1abf26..cd1efaf 100644 --- a/crates/rpc-storage/FUTURE-EVALUATION.md +++ b/crates/rpc-storage/FUTURE-EVALUATION.md @@ -13,7 +13,7 @@ this: `BlockTransactions` and `RpcBlock` types serialize full transactions or hashes lazily from `Vec`. -## Endpoints that cannot benefit +## Endpoints that cannot benefit from lazy serialization - **`debug_traceBlockByNumber`** — EVM state is sequential and destructively consumed per transaction. Computation must be eager; the `Vec` @@ -23,22 +23,36 @@ this: `alloy_serde::quantity` custom serializers. In-housing is moderate effort for low payoff. - **`eth_getLogs`** — `Vec` comes directly from cold storage with no - transformation at the API boundary. + transformation at the API boundary. Lazy serialization does not apply, but + this endpoint now uses `stream_logs` (see below). - **Other sites** — Vecs are needed for sorting (`calculate_reward_percentiles`, `suggest_tip_cap`), poll buffering (`FilterOutput`), or feed into alloy types we don't control. -## Channel-based cold storage streaming +## Channel-based cold storage streaming (adopted) -If cold storage returned `tokio::mpsc::Receiver` instead of `Vec`, lazy -async serialization is blocked by two constraints: +`eth_getLogs` and `eth_getFilterChanges` now use `stream_logs()`, which +returns a `ReceiverStream>` over a bounded MPSC channel +(256 buffer). The stream is collected into a `Vec` at the handler level +because: 1. `serde::Serialize` is synchronous — there is no way to `.await` a channel receiver inside `serialize()`. -2. ajj 0.5.0 buffers the entire response via `serde_json::to_raw_value` before +2. ajj buffers the entire response via `serde_json::to_raw_value` before sending over HTTP or WebSocket. There is no chunked or streaming response support. -To unlock channel-based streaming, ajj would need a streaming response API -(e.g. `AsyncSerialize` or HTTP chunked encoding with per-item flushing). Until -then, handlers must drain receivers before returning. +Despite the full collection, streaming provides: + +- **Deadline enforcement** — queries exceeding the configured wall-clock limit + are terminated early, freeing cold storage resources. +- **Dedicated concurrency** — log queries use a separate semaphore (max 8 + streams), preventing starvation of other read operations. +- **Reorg detection** — anchor hash check prevents returning stale/incorrect + data from reorganized blocks. +- **Progressive memory** — cold storage does not hold the entire result set + simultaneously (only 256 items in the channel buffer at a time). + +True async streaming to the HTTP response requires ajj to support a streaming +response API (e.g. `AsyncSerialize` or HTTP chunked encoding with per-item +flushing). diff --git a/crates/rpc-storage/src/config/rpc_config.rs b/crates/rpc-storage/src/config/rpc_config.rs index 690eb3a..b881deb 100644 --- a/crates/rpc-storage/src/config/rpc_config.rs +++ b/crates/rpc-storage/src/config/rpc_config.rs @@ -34,6 +34,14 @@ pub struct StorageRpcConfig { /// Default: `20_000`. pub max_logs_per_response: usize, + /// Maximum wall-clock time for a single log query. + /// + /// If a log query exceeds this duration, the stream is terminated + /// early and the handler returns a deadline-exceeded error. + /// + /// Default: `10` seconds. + pub max_log_query_deadline: Duration, + /// Maximum concurrent tracing/debug requests. /// /// Controls the size of the semaphore that gates debug @@ -81,6 +89,7 @@ impl Default for StorageRpcConfig { rpc_gas_cap: 30_000_000, max_blocks_per_filter: 10_000, max_logs_per_response: 20_000, + max_log_query_deadline: Duration::from_secs(10), max_tracing_requests: 25, stale_filter_ttl: Duration::from_secs(5 * 60), gas_oracle_block_count: 20, diff --git a/crates/rpc-storage/src/eth/endpoints.rs b/crates/rpc-storage/src/eth/endpoints.rs index bf5b5c4..9501080 100644 --- a/crates/rpc-storage/src/eth/endpoints.rs +++ b/crates/rpc-storage/src/eth/endpoints.rs @@ -1001,7 +1001,24 @@ where // Logs // --------------------------------------------------------------------------- +/// Drain a [`signet_cold::LogStream`] into a `Vec`. +/// +/// Errors from the stream (deadline exceeded, too many logs, reorg) are +/// propagated as the first encountered error. +async fn collect_log_stream(stream: signet_cold::LogStream) -> signet_cold::ColdResult> { + use tokio_stream::StreamExt; + let mut logs = Vec::new(); + let mut stream = std::pin::pin!(stream); + while let Some(log) = stream.next().await { + logs.push(log?); + } + Ok(logs) +} + /// `eth_getLogs` — query logs from cold storage with filter criteria. +/// +/// Uses `stream_logs` for deadline enforcement and dedicated concurrency +/// control. The stream is collected into a `Vec` for the JSON-RPC response. pub(crate) async fn get_logs( hctx: HandlerCtx, (filter,): (Filter,), @@ -1041,9 +1058,14 @@ where }; let max_logs = ctx.config().max_logs_per_response; - let logs = cold.get_logs(resolved_filter, max_logs).await.map_err(|e| e.to_string())?; + let deadline = ctx.config().max_log_query_deadline; - Ok(logs) + let stream = cold + .stream_logs(resolved_filter, max_logs, deadline) + .await + .map_err(|e| e.to_string())?; + + collect_log_stream(stream).await.map_err(|e| e.to_string()) }; await_handler!(@option hctx.spawn_blocking(task)) @@ -1144,7 +1166,12 @@ where }; let max_logs = ctx.config().max_logs_per_response; - let logs = cold.get_logs(resolved, max_logs).await.map_err(|e| e.to_string())?; + let deadline = ctx.config().max_log_query_deadline; + + let stream = + cold.stream_logs(resolved, max_logs, deadline).await.map_err(|e| e.to_string())?; + + let logs = collect_log_stream(stream).await.map_err(|e| e.to_string())?; entry.mark_polled(latest); Ok(FilterOutput::from(logs)) From 4e9c249fc34b5942e605c00227ef848b4fc480cc Mon Sep 17 00:00:00 2001 From: James Date: Sun, 15 Feb 2026 11:50:30 -0500 Subject: [PATCH 25/31] chore: remove future-evaluation --- crates/rpc-storage/FUTURE-EVALUATION.md | 58 ------------------------- crates/rpc-storage/src/debug/mod.rs | 1 + crates/rpc-storage/src/eth/mod.rs | 1 + 3 files changed, 2 insertions(+), 58 deletions(-) delete mode 100644 crates/rpc-storage/FUTURE-EVALUATION.md diff --git a/crates/rpc-storage/FUTURE-EVALUATION.md b/crates/rpc-storage/FUTURE-EVALUATION.md deleted file mode 100644 index cd1efaf..0000000 --- a/crates/rpc-storage/FUTURE-EVALUATION.md +++ /dev/null @@ -1,58 +0,0 @@ -# Future Evaluation Notes - -## Lazy Serialization (ajj 0.5.0) - -ajj 0.5.0 relaxed `RpcSend` bounds so that custom `Serialize` impls can be -returned from handlers without collecting into a `Vec`. Two endpoints now use -this: - -- **`eth_getBlockReceipts`** — `LazyReceipts` serializes receipts inline from - raw `ColdReceipt` + `RecoveredTx` data without an intermediate - `Vec`. -- **`eth_getBlockByHash` / `eth_getBlockByNumber`** — in-housed - `BlockTransactions` and `RpcBlock` types serialize full transactions or - hashes lazily from `Vec`. - -## Endpoints that cannot benefit from lazy serialization - -- **`debug_traceBlockByNumber`** — EVM state is sequential and destructively - consumed per transaction. Computation must be eager; the `Vec` - confirms all traces succeed before returning. Converting `DebugError` to - `serde::ser::Error` would lose variant information. -- **`eth_feeHistory`** — Vecs feed into `FeeHistory` (alloy type) which uses - `alloy_serde::quantity` custom serializers. In-housing is moderate effort - for low payoff. -- **`eth_getLogs`** — `Vec` comes directly from cold storage with no - transformation at the API boundary. Lazy serialization does not apply, but - this endpoint now uses `stream_logs` (see below). -- **Other sites** — Vecs are needed for sorting (`calculate_reward_percentiles`, - `suggest_tip_cap`), poll buffering (`FilterOutput`), or feed into alloy - types we don't control. - -## Channel-based cold storage streaming (adopted) - -`eth_getLogs` and `eth_getFilterChanges` now use `stream_logs()`, which -returns a `ReceiverStream>` over a bounded MPSC channel -(256 buffer). The stream is collected into a `Vec` at the handler level -because: - -1. `serde::Serialize` is synchronous — there is no way to `.await` a channel - receiver inside `serialize()`. -2. ajj buffers the entire response via `serde_json::to_raw_value` before - sending over HTTP or WebSocket. There is no chunked or streaming response - support. - -Despite the full collection, streaming provides: - -- **Deadline enforcement** — queries exceeding the configured wall-clock limit - are terminated early, freeing cold storage resources. -- **Dedicated concurrency** — log queries use a separate semaphore (max 8 - streams), preventing starvation of other read operations. -- **Reorg detection** — anchor hash check prevents returning stale/incorrect - data from reorganized blocks. -- **Progressive memory** — cold storage does not hold the entire result set - simultaneously (only 256 items in the channel buffer at a time). - -True async streaming to the HTTP response requires ajj to support a streaming -response API (e.g. `AsyncSerialize` or HTTP chunked encoding with per-item -flushing). diff --git a/crates/rpc-storage/src/debug/mod.rs b/crates/rpc-storage/src/debug/mod.rs index 44dcb94..2a4da50 100644 --- a/crates/rpc-storage/src/debug/mod.rs +++ b/crates/rpc-storage/src/debug/mod.rs @@ -5,6 +5,7 @@ use endpoints::{trace_block, trace_transaction}; mod error; pub use error::DebugError; pub(crate) mod tracer; +mod types; use crate::config::StorageRpcCtx; use alloy::{eips::BlockNumberOrTag, primitives::B256}; diff --git a/crates/rpc-storage/src/eth/mod.rs b/crates/rpc-storage/src/eth/mod.rs index f674336..190c0fb 100644 --- a/crates/rpc-storage/src/eth/mod.rs +++ b/crates/rpc-storage/src/eth/mod.rs @@ -14,6 +14,7 @@ mod error; pub use error::EthError; pub(crate) mod helpers; +pub(crate) mod types; use crate::config::StorageRpcCtx; use alloy::{eips::BlockNumberOrTag, primitives::B256}; From f5b91b05f179a7aa086a24327191dfbc808fa05d Mon Sep 17 00:00:00 2001 From: James Date: Sun, 15 Feb 2026 12:00:21 -0500 Subject: [PATCH 26/31] refactor: extract type definitions from endpoint files into type modules Move response/serialization types (EmptyArray, BlockTransactions, RpcBlock, LazyReceipts) and type aliases from eth/endpoints.rs into eth/types.rs, and parameter types (TraceBlockParams, TraceTransactionParams) from debug/endpoints.rs into debug/types.rs. Consolidate duplicate RpcTransaction/RpcReceipt aliases from eth/helpers.rs into eth/types.rs. Co-Authored-By: Claude Opus 4.6 --- crates/rpc-storage/src/debug/endpoints.rs | 16 ++-- crates/rpc-storage/src/debug/types.rs | 17 ++++ crates/rpc-storage/src/eth/endpoints.rs | 101 ++-------------------- crates/rpc-storage/src/eth/helpers.rs | 7 +- crates/rpc-storage/src/eth/types.rs | 98 +++++++++++++++++++++ 5 files changed, 126 insertions(+), 113 deletions(-) create mode 100644 crates/rpc-storage/src/debug/types.rs create mode 100644 crates/rpc-storage/src/eth/types.rs diff --git a/crates/rpc-storage/src/debug/endpoints.rs b/crates/rpc-storage/src/debug/endpoints.rs index c3d1e24..b140284 100644 --- a/crates/rpc-storage/src/debug/endpoints.rs +++ b/crates/rpc-storage/src/debug/endpoints.rs @@ -2,15 +2,17 @@ use crate::{ config::StorageRpcCtx, - debug::DebugError, + debug::{ + DebugError, + types::{TraceBlockParams, TraceTransactionParams}, + }, eth::helpers::{CfgFiller, await_handler, response_tri}, }; use ajj::{HandlerCtx, ResponsePayload}; use alloy::{ consensus::BlockHeader, eips::BlockId, - primitives::B256, - rpc::types::trace::geth::{GethDebugTracingOptions, GethTrace, TraceResult}, + rpc::types::trace::geth::{GethTrace, TraceResult}, }; use itertools::Itertools; use signet_evm::EvmErrored; @@ -20,14 +22,6 @@ use signet_types::MagicSig; use tracing::Instrument; use trevm::revm::database::DBErrorMarker; -/// Params for `debug_traceBlockByNumber` and `debug_traceBlockByHash`. -#[derive(Debug, serde::Deserialize)] -pub(super) struct TraceBlockParams(T, #[serde(default)] Option); - -/// Params for `debug_traceTransaction`. -#[derive(Debug, serde::Deserialize)] -pub(super) struct TraceTransactionParams(B256, #[serde(default)] Option); - /// `debug_traceBlockByNumber` and `debug_traceBlockByHash` handler. pub(super) async fn trace_block( hctx: HandlerCtx, diff --git a/crates/rpc-storage/src/debug/types.rs b/crates/rpc-storage/src/debug/types.rs new file mode 100644 index 0000000..d401489 --- /dev/null +++ b/crates/rpc-storage/src/debug/types.rs @@ -0,0 +1,17 @@ +//! Parameter types for debug namespace RPC endpoints. + +use alloy::{primitives::B256, rpc::types::trace::geth::GethDebugTracingOptions}; + +/// Params for `debug_traceBlockByNumber` and `debug_traceBlockByHash`. +#[derive(Debug, serde::Deserialize)] +pub(crate) struct TraceBlockParams( + pub(crate) T, + #[serde(default)] pub(crate) Option, +); + +/// Params for `debug_traceTransaction`. +#[derive(Debug, serde::Deserialize)] +pub(crate) struct TraceTransactionParams( + pub(crate) B256, + #[serde(default)] pub(crate) Option, +); diff --git a/crates/rpc-storage/src/eth/endpoints.rs b/crates/rpc-storage/src/eth/endpoints.rs index 9501080..ac886ab 100644 --- a/crates/rpc-storage/src/eth/endpoints.rs +++ b/crates/rpc-storage/src/eth/endpoints.rs @@ -9,6 +9,10 @@ use crate::{ TxParams, await_handler, build_receipt, build_rpc_transaction, hot_reader_at_block, normalize_gas_stateless, response_tri, }, + types::{ + BlockTransactions, EmptyArray, LazyReceipts, RpcBlock, RpcHeader, RpcReceipt, + RpcTransaction, + }, }, interest::{FilterOutput, InterestKind}, }; @@ -21,94 +25,17 @@ use alloy::{ eip2718::{Decodable2718, Encodable2718}, eip2930::AccessListResult, }, - network::{Ethereum, Network}, primitives::{B256, U64, U256}, rpc::types::{FeeHistory, Filter, Log}, }; use revm_inspectors::access_list::AccessListInspector; -use serde::{Serialize, Serializer, ser::SerializeSeq}; -use signet_cold::{ColdReceipt, HeaderSpecifier, ReceiptSpecifier}; +use signet_cold::{HeaderSpecifier, ReceiptSpecifier}; use signet_hot::{HistoryRead, HotKv, db::HotDbRead, model::HotKvRead}; use tracing::{Instrument, debug, trace_span}; use trevm::{ EstimationResult, revm::context::result::ExecutionResult, revm::database::DBErrorMarker, }; -/// RPC header type for the Ethereum network. -type RpcHeader = ::HeaderResponse; -/// RPC transaction type for the Ethereum network. -type RpcTransaction = ::TransactionResponse; -/// RPC receipt type for the Ethereum network. -type RpcReceipt = ::ReceiptResponse; - -// --------------------------------------------------------------------------- -// Lazy serialization types -// --------------------------------------------------------------------------- - -/// Serializes as an empty JSON array `[]` without allocating. -pub(crate) struct EmptyArray; - -impl Serialize for EmptyArray { - fn serialize(&self, serializer: S) -> Result { - serializer.serialize_seq(Some(0))?.end() - } -} - -/// Block transactions with lazy serialization. -/// -/// In both variants the raw `RecoveredTx` list is kept and transformed -/// during serialization — either to full RPC transaction objects or to bare -/// hashes — avoiding an intermediate `Vec` allocation. -pub(crate) enum BlockTransactions { - Full { - txs: Vec, - block_num: u64, - block_hash: B256, - base_fee: Option, - }, - Hashes(Vec), -} - -impl Serialize for BlockTransactions { - fn serialize(&self, serializer: S) -> Result { - match self { - Self::Full { txs, block_num, block_hash, base_fee } => { - let mut seq = serializer.serialize_seq(Some(txs.len()))?; - for (i, tx) in txs.iter().enumerate() { - let meta = signet_storage_types::ConfirmationMeta::new( - *block_num, - *block_hash, - i as u64, - ); - seq.serialize_element(&build_rpc_transaction(tx, &meta, *base_fee))?; - } - seq.end() - } - Self::Hashes(txs) => { - let mut seq = serializer.serialize_seq(Some(txs.len()))?; - for tx in txs { - seq.serialize_element(tx.tx_hash())?; - } - seq.end() - } - } - } -} - -/// RPC block response with lazy transaction serialization. -/// -/// Replaces the alloy `Block` type so that transactions are serialized -/// inline from raw storage data. Signet has no uncles or withdrawals, so -/// those are hardcoded as empty/absent to avoid allocations. -#[derive(Serialize)] -#[serde(rename_all = "camelCase")] -pub(crate) struct RpcBlock { - #[serde(flatten)] - header: alloy::rpc::types::Header, - transactions: BlockTransactions, - uncles: EmptyArray, -} - // --------------------------------------------------------------------------- // Not Supported // --------------------------------------------------------------------------- @@ -432,24 +359,6 @@ where await_handler!(@option hctx.spawn_blocking(task)) } -/// Lazily serialized receipt list. Each receipt is built and serialized -/// inline without allocating an intermediate `Vec`. -pub(crate) struct LazyReceipts { - txs: Vec, - receipts: Vec, - base_fee: Option, -} - -impl Serialize for LazyReceipts { - fn serialize(&self, serializer: S) -> Result { - let mut seq = serializer.serialize_seq(Some(self.txs.len()))?; - for (tx, cr) in self.txs.iter().zip(&self.receipts) { - seq.serialize_element(&build_receipt(cr, tx, self.base_fee))?; - } - seq.end() - } -} - /// `eth_getBlockReceipts` — all receipts in a block. pub(crate) async fn block_receipts( hctx: HandlerCtx, diff --git a/crates/rpc-storage/src/eth/helpers.rs b/crates/rpc-storage/src/eth/helpers.rs index f25667d..399a177 100644 --- a/crates/rpc-storage/src/eth/helpers.rs +++ b/crates/rpc-storage/src/eth/helpers.rs @@ -1,12 +1,12 @@ //! Parameter types, macros, and utility helpers for ETH RPC endpoints. +use super::types::{RpcReceipt, RpcTransaction}; use crate::interest::InterestKind; use alloy::{ consensus::{ ReceiptEnvelope, ReceiptWithBloom, Transaction, TxReceipt, transaction::Recovered, }, eips::BlockId, - network::{Ethereum, Network}, primitives::{Address, TxKind, U256}, rpc::types::{ BlockOverrides, Log, TransactionReceipt, TransactionRequest, pubsub::SubscriptionKind, @@ -18,11 +18,6 @@ use signet_cold::ColdReceipt; use signet_storage_types::ConfirmationMeta; use trevm::MIN_TRANSACTION_GAS; -/// RPC transaction type for the Ethereum network. -type RpcTransaction = ::TransactionResponse; -/// RPC receipt type for the Ethereum network. -type RpcReceipt = ::ReceiptResponse; - /// Args for `eth_call` and `eth_estimateGas`. #[derive(Debug, Deserialize)] pub(crate) struct TxParams( diff --git a/crates/rpc-storage/src/eth/types.rs b/crates/rpc-storage/src/eth/types.rs new file mode 100644 index 0000000..0f43742 --- /dev/null +++ b/crates/rpc-storage/src/eth/types.rs @@ -0,0 +1,98 @@ +//! Response and serialization types for ETH RPC endpoints. + +use super::helpers::{build_receipt, build_rpc_transaction}; +use alloy::{ + network::{Ethereum, Network}, + primitives::B256, +}; +use serde::{Serialize, Serializer, ser::SerializeSeq}; +use signet_cold::ColdReceipt; + +/// RPC header type for the Ethereum network. +pub(crate) type RpcHeader = ::HeaderResponse; +/// RPC transaction type for the Ethereum network. +pub(crate) type RpcTransaction = ::TransactionResponse; +/// RPC receipt type for the Ethereum network. +pub(crate) type RpcReceipt = ::ReceiptResponse; + +/// Serializes as an empty JSON array `[]` without allocating. +pub(crate) struct EmptyArray; + +impl Serialize for EmptyArray { + fn serialize(&self, serializer: S) -> Result { + serializer.serialize_seq(Some(0))?.end() + } +} + +/// Block transactions with lazy serialization. +/// +/// In both variants the raw `RecoveredTx` list is kept and transformed +/// during serialization — either to full RPC transaction objects or to bare +/// hashes — avoiding an intermediate `Vec` allocation. +pub(crate) enum BlockTransactions { + Full { + txs: Vec, + block_num: u64, + block_hash: B256, + base_fee: Option, + }, + Hashes(Vec), +} + +impl Serialize for BlockTransactions { + fn serialize(&self, serializer: S) -> Result { + match self { + Self::Full { txs, block_num, block_hash, base_fee } => { + let mut seq = serializer.serialize_seq(Some(txs.len()))?; + for (i, tx) in txs.iter().enumerate() { + let meta = signet_storage_types::ConfirmationMeta::new( + *block_num, + *block_hash, + i as u64, + ); + seq.serialize_element(&build_rpc_transaction(tx, &meta, *base_fee))?; + } + seq.end() + } + Self::Hashes(txs) => { + let mut seq = serializer.serialize_seq(Some(txs.len()))?; + for tx in txs { + seq.serialize_element(tx.tx_hash())?; + } + seq.end() + } + } + } +} + +/// RPC block response with lazy transaction serialization. +/// +/// Replaces the alloy `Block` type so that transactions are serialized +/// inline from raw storage data. Signet has no uncles or withdrawals, so +/// those are hardcoded as empty/absent to avoid allocations. +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct RpcBlock { + #[serde(flatten)] + pub(crate) header: alloy::rpc::types::Header, + pub(crate) transactions: BlockTransactions, + pub(crate) uncles: EmptyArray, +} + +/// Lazily serialized receipt list. Each receipt is built and serialized +/// inline without allocating an intermediate `Vec`. +pub(crate) struct LazyReceipts { + pub(crate) txs: Vec, + pub(crate) receipts: Vec, + pub(crate) base_fee: Option, +} + +impl Serialize for LazyReceipts { + fn serialize(&self, serializer: S) -> Result { + let mut seq = serializer.serialize_seq(Some(self.txs.len()))?; + for (tx, cr) in self.txs.iter().zip(&self.receipts) { + seq.serialize_element(&build_receipt(cr, tx, self.base_fee))?; + } + seq.end() + } +} From afdff5a06522bf64c68d6a063a263d51dafe649b Mon Sep 17 00:00:00 2001 From: James Date: Sun, 15 Feb 2026 12:28:40 -0500 Subject: [PATCH 27/31] feat: restore permit-based subscription notification batching Replace the one-notification-per-iteration drain pattern with permit_many batching for better throughput and fairness. Bumps ajj to 0.6.2 for permit_many/notification_capacity support. Co-Authored-By: Claude Opus 4.6 --- crates/rpc-storage/src/interest/subs.rs | 63 +++++++++++++------------ crates/rpc/src/interest/subs.rs | 63 +++++++++++++------------ 2 files changed, 66 insertions(+), 60 deletions(-) diff --git a/crates/rpc-storage/src/interest/subs.rs b/crates/rpc-storage/src/interest/subs.rs index 74d5641..bf42797 100644 --- a/crates/rpc-storage/src/interest/subs.rs +++ b/crates/rpc-storage/src/interest/subs.rs @@ -5,7 +5,9 @@ use ajj::HandlerCtx; use alloy::{primitives::U64, rpc::types::Log}; use dashmap::DashMap; use std::{ + cmp::min, collections::VecDeque, + future::pending, sync::{ Arc, Weak, atomic::{AtomicU64, Ordering}, @@ -14,7 +16,7 @@ use std::{ }; use tokio::sync::broadcast::{self, error::RecvError}; use tokio_util::sync::{CancellationToken, WaitForCancellationFutureOwned}; -use tracing::{debug, debug_span, enabled, trace}; +use tracing::{Instrument, debug, debug_span, enabled, trace}; /// Either type for subscription outputs. #[derive(Debug, Clone, PartialEq, Eq, serde::Serialize)] @@ -232,38 +234,23 @@ impl SubscriptionTask { span.record("filter", format!("{filter:?}")); } - // Drain one buffered item per iteration, checking for - // cancellation between each send. - if let Some(item) = notif_buffer.pop_front() { - let notification = SubscriptionNotification { - jsonrpc: "2.0", - method: "eth_subscription", - params: SubscriptionParams { result: &item, subscription: id }, - }; - - let _guard = span.enter(); - tokio::select! { - biased; - _ = &mut ajj_cancel => { - trace!("subscription cancelled by client disconnect"); - token.cancel(); - break; - } - _ = token.cancelled() => { - trace!("subscription cancelled by user"); - break; - } - result = ajj_ctx.notify(¬ification) => { - if result.is_err() { - trace!("channel to client closed"); - break; - } - } + // NB: reserve half the capacity to avoid blocking other + // usage. This is a heuristic and can be adjusted as needed. + let guard = span.enter(); + let permit_fut = async { + if !notif_buffer.is_empty() { + ajj_ctx + .permit_many(min(ajj_ctx.notification_capacity() / 2, notif_buffer.len())) + .await + } else { + pending().await } - continue; } + .in_current_span(); + drop(guard); - // Buffer empty — wait for incoming broadcast notifications. + // NB: biased select ensures we check cancellation before + // processing new notifications. let _guard = span.enter(); tokio::select! { biased; @@ -276,6 +263,22 @@ impl SubscriptionTask { trace!("subscription cancelled by user"); break; } + permits = permit_fut => { + let Some(permits) = permits else { + trace!("channel to client closed"); + break + }; + + for permit in permits { + let Some(item) = notif_buffer.pop_front() else { break }; + let notification = SubscriptionNotification { + jsonrpc: "2.0", + method: "eth_subscription", + params: SubscriptionParams { result: &item, subscription: id }, + }; + let _ = permit.send(¬ification); + } + } notif_res = notifs.recv() => { let notif = match notif_res { Ok(notif) => notif, diff --git a/crates/rpc/src/interest/subs.rs b/crates/rpc/src/interest/subs.rs index e93e676..c46b749 100644 --- a/crates/rpc/src/interest/subs.rs +++ b/crates/rpc/src/interest/subs.rs @@ -8,7 +8,9 @@ use reth::{ }; use signet_node_types::Pnt; use std::{ + cmp::min, collections::VecDeque, + future::pending, sync::{ Arc, Weak, atomic::{AtomicU64, Ordering}, @@ -17,7 +19,7 @@ use std::{ }; use tokio::sync::broadcast::error::RecvError; use tokio_util::sync::{CancellationToken, WaitForCancellationFutureOwned}; -use tracing::{debug, debug_span, enabled, trace}; +use tracing::{Instrument, debug, debug_span, enabled, trace}; /// Either type for subscription outputs. #[derive(Debug, Clone, PartialEq, Eq, serde::Serialize)] @@ -237,38 +239,23 @@ impl SubscriptionTask { span.record("filter", format!("{filter:?}")); } - // Drain one buffered item per iteration, checking for - // cancellation between each send. - if let Some(item) = notif_buffer.pop_front() { - let notification = SubscriptionNotification { - jsonrpc: "2.0", - method: "eth_subscription", - params: SubscriptionParams { result: &item, subscription: id }, - }; - - let _guard = span.enter(); - tokio::select! { - biased; - _ = &mut ajj_cancel => { - trace!("subscription cancelled by client disconnect"); - token.cancel(); - break; - } - _ = token.cancelled() => { - trace!("subscription cancelled by user"); - break; - } - result = ajj_ctx.notify(¬ification) => { - if result.is_err() { - trace!("channel to client closed"); - break; - } - } + // NB: reserve half the capacity to avoid blocking other + // usage. This is a heuristic and can be adjusted as needed. + let guard = span.enter(); + let permit_fut = async { + if !notif_buffer.is_empty() { + ajj_ctx + .permit_many(min(ajj_ctx.notification_capacity() / 2, notif_buffer.len())) + .await + } else { + pending().await } - continue; } + .in_current_span(); + drop(guard); - // Buffer empty — wait for incoming notifications. + // NB: biased select ensures we check cancellation before + // processing new notifications. let _guard = span.enter(); tokio::select! { biased; @@ -281,6 +268,22 @@ impl SubscriptionTask { trace!("subscription cancelled by user"); break; } + permits = permit_fut => { + let Some(permits) = permits else { + trace!("channel to client closed"); + break + }; + + for permit in permits { + let Some(item) = notif_buffer.pop_front() else { break }; + let notification = SubscriptionNotification { + jsonrpc: "2.0", + method: "eth_subscription", + params: SubscriptionParams { result: &item, subscription: id }, + }; + let _ = permit.send(¬ification); + } + } notif_res = notifs.recv() => { let notif = match notif_res { Ok(notif) => notif, From a98f49e0ce65e78af7bc5dde683ff3cd8be1f0fe Mon Sep 17 00:00:00 2001 From: James Date: Sun, 15 Feb 2026 15:34:39 -0500 Subject: [PATCH 28/31] refactor: move orphaned types to signet-node and remove journal hash tracking Move NodeStatus from signet-node-types to signet-node, move ServeConfig/RpcServerGuard from signet-rpc to signet-node, and remove the journal hash tracking system (JournalHashes table, journal module, GENESIS_JOURNAL_HASH, and all related parameters/methods). Co-Authored-By: Claude Opus 4.6 --- crates/block-processor/Cargo.toml | 1 - crates/block-processor/src/v1/processor.rs | 33 +-- crates/db/Cargo.toml | 3 - crates/db/README.md | 2 +- crates/db/src/journal/ingestor.rs | 72 ------ crates/db/src/journal/mod.rs | 10 - crates/db/src/journal/provider.rs | 80 ------- crates/db/src/journal/trait.rs | 55 ----- crates/db/src/lib.rs | 5 +- crates/db/src/provider.rs | 34 +-- crates/db/src/tables.rs | 16 -- crates/db/src/traits.rs | 17 +- crates/db/tests/db.rs | 8 +- crates/node-tests/src/context.rs | 4 +- crates/node/Cargo.toml | 4 + crates/node/src/builder.rs | 9 +- crates/node/src/lib.rs | 7 +- crates/node/src/node.rs | 11 +- crates/node/src/rpc.rs | 7 +- crates/node/src/serve.rs | 249 +++++++++++++++++++++ crates/node/src/status.rs | 8 + crates/rpc/Cargo.toml | 4 - crates/rpc/src/config.rs | 121 ---------- crates/rpc/src/lib.rs | 38 +--- crates/rpc/src/utils.rs | 134 +---------- 25 files changed, 295 insertions(+), 637 deletions(-) delete mode 100644 crates/db/src/journal/ingestor.rs delete mode 100644 crates/db/src/journal/mod.rs delete mode 100644 crates/db/src/journal/provider.rs delete mode 100644 crates/db/src/journal/trait.rs create mode 100644 crates/node/src/serve.rs create mode 100644 crates/node/src/status.rs delete mode 100644 crates/rpc/src/config.rs diff --git a/crates/block-processor/Cargo.toml b/crates/block-processor/Cargo.toml index f1ac842..b7785c8 100644 --- a/crates/block-processor/Cargo.toml +++ b/crates/block-processor/Cargo.toml @@ -13,7 +13,6 @@ repository.workspace = true signet-constants.workspace = true signet-evm.workspace = true signet-extract.workspace = true -signet-journal.workspace = true signet-types.workspace = true init4-bin-base.workspace = true diff --git a/crates/block-processor/src/v1/processor.rs b/crates/block-processor/src/v1/processor.rs index 154986e..cf76ca5 100644 --- a/crates/block-processor/src/v1/processor.rs +++ b/crates/block-processor/src/v1/processor.rs @@ -1,7 +1,7 @@ use crate::{AliasOracle, AliasOracleFactory, Chain, metrics}; use alloy::{ consensus::BlockHeader, - primitives::{Address, B256, map::HashSet}, + primitives::{Address, map::HashSet}, }; use core::fmt; use eyre::ContextCompat; @@ -21,10 +21,9 @@ use signet_constants::SignetSystemConstants; use signet_db::{DataCompat, DbProviderExt, RuChain, RuRevmState, RuWriter}; use signet_evm::{BlockResult, EvmNeedsCfg, SignetDriver}; use signet_extract::{Extractor, Extracts}; -use signet_journal::HostJournal; use signet_node_types::{NodeTypesDbTrait, SignetNodeTypes}; use std::{collections::VecDeque, sync::Arc}; -use tracing::{Instrument, debug, error, info, info_span, instrument}; +use tracing::{Instrument, error, info, info_span, instrument}; use trevm::revm::primitives::hardfork::SpecId; /// A block processor that listens to host chain commits and processes @@ -145,7 +144,6 @@ where let mut start = None; let mut current = 0; let last_ru_height = self.ru_provider.last_block_number()?; - let mut prev_block_journal = self.ru_provider.provider_rw()?.latest_journal_hash()?; let mut net_outcome = ExecutionOutcome::default(); @@ -191,10 +189,8 @@ where metrics::record_block_result(&block_result, &start_time); let _ = span.enter(); - let journal = - self.commit_evm_results(&block_extracts, &block_result, prev_block_journal)?; + self.commit_evm_results(&block_extracts, &block_result)?; - prev_block_journal = journal.journal_hash(); net_outcome.extend(block_result.execution_outcome.convert()); } info!("committed blocks"); @@ -308,36 +304,21 @@ where /// Commit the outputs of a zenith block to the database. #[instrument(skip_all)] - fn commit_evm_results<'a>( + fn commit_evm_results( &self, extracts: &Extracts<'_, ExtractableChainShim<'_>>, - block_result: &'a BlockResult, - prev_block_journal: B256, - ) -> eyre::Result> { - let journal = block_result.make_host_journal(prev_block_journal); - let time = std::time::Instant::now(); - let jh = journal.journal_hash(); - - debug!( - target: "signet::journal::serialize", - bytes = journal.serialized().len(), - hash = %jh, - elapsed_micros = %time.elapsed().as_micros(), - "journal produced" - ); - + block_result: &BlockResult, + ) -> eyre::Result<()> { self.ru_provider.provider_rw()?.update(|writer| { - // add execution results to database writer.append_host_block( extracts.ru_header(), extracts.transacts().cloned(), extracts.enters(), extracts.enter_tokens(), block_result, - jh, )?; Ok(()) })?; - Ok(journal) + Ok(()) } } diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 784f3b0..199954a 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -12,7 +12,6 @@ repository.workspace = true signet-node-types.workspace = true signet-evm.workspace = true -signet-journal.workspace = true signet-types.workspace = true signet-zenith.workspace = true @@ -28,8 +27,6 @@ reth-stages-types.workspace = true itertools.workspace = true serde.workspace = true tracing.workspace = true -futures-util = "0.3.31" -tokio.workspace = true auto_impl = "1.3.0" [dev-dependencies] diff --git a/crates/db/README.md b/crates/db/README.md index cf95ed3..cfab334 100644 --- a/crates/db/README.md +++ b/crates/db/README.md @@ -6,7 +6,7 @@ Node. This library contains the following: - Traits for reading and writing Signet events -- Table definitions for Signet Events, Headers, and JournalHashes +- Table definitions for Signet Events and Headers - Helpers for reading, writing, reverting, Signet EVM blocks and headers ## Significant Traits diff --git a/crates/db/src/journal/ingestor.rs b/crates/db/src/journal/ingestor.rs deleted file mode 100644 index 481e5a3..0000000 --- a/crates/db/src/journal/ingestor.rs +++ /dev/null @@ -1,72 +0,0 @@ -use crate::{SignetDbRw, journal::JournalDb}; -use futures_util::StreamExt; -use reth::providers::ProviderResult; -use signet_journal::{Journal, JournalStream}; -use signet_node_types::NodeTypesDbTrait; -use std::sync::Arc; -use tokio::task::JoinHandle; - -/// A task that ingests journals into a reth database. -#[derive(Debug)] -pub struct JournalIngestor { - db: Arc>, -} - -impl From> for JournalIngestor { - fn from(value: SignetDbRw) -> Self { - Self::new(value.into()) - } -} - -impl From>> for JournalIngestor { - fn from(value: Arc>) -> Self { - Self::new(value) - } -} - -impl JournalIngestor { - /// Create a new `JournalIngestor` with the given database provider. - pub const fn new(db: Arc>) -> Self { - Self { db } - } - - async fn task_future(self, mut stream: S) -> ProviderResult<()> - where - S: JournalStream<'static> + Send + Unpin + 'static, - { - while let Some(Journal::V1(journal)) = stream.next().await { - // FUTURE: Sanity check that the header height matches the update - // height. Sanity check that both heights are 1 greater than the - // last height in the database. - - let db = self.db.clone(); - - // DB interaction is sync, so we spawn a blocking task for it. We - // immediately await that task. This prevents blocking the worker - // thread - tokio::task::spawn_blocking(move || db.ingest(journal)) - .await - .expect("ingestion should not panic")?; - } - // Stream has ended, return Ok - Ok(()) - } - - /// Spawn a task to ingest journals from the provided stream. - pub fn spawn(self, stream: S) -> JoinHandle> - where - S: JournalStream<'static> + Send + Unpin + 'static, - { - tokio::spawn(self.task_future(stream)) - } -} - -/// Ingest journals from a stream into a reth database. -pub async fn ingest_journals(db: Arc>, stream: S) -> ProviderResult<()> -where - Db: NodeTypesDbTrait, - S: JournalStream<'static> + Send + Unpin + 'static, -{ - let ingestor = JournalIngestor::new(db); - ingestor.task_future(stream).await -} diff --git a/crates/db/src/journal/mod.rs b/crates/db/src/journal/mod.rs deleted file mode 100644 index 2e898db..0000000 --- a/crates/db/src/journal/mod.rs +++ /dev/null @@ -1,10 +0,0 @@ -//! Utilities for working with Signet journals in a reth database. - -mod r#trait; -pub use r#trait::JournalDb; - -mod provider; -pub use provider::JournalProviderTask; - -mod ingestor; -pub use ingestor::{JournalIngestor, ingest_journals}; diff --git a/crates/db/src/journal/provider.rs b/crates/db/src/journal/provider.rs deleted file mode 100644 index 31cad05..0000000 --- a/crates/db/src/journal/provider.rs +++ /dev/null @@ -1,80 +0,0 @@ -use crate::journal::JournalDb; -use futures_util::StreamExt; -use reth::{ - primitives::SealedHeader, - providers::{ - CanonChainTracker, DatabaseProviderFactory, DatabaseProviderRW, ProviderResult, - providers::BlockchainProvider, - }, - rpc::types::engine::ForkchoiceState, -}; -use signet_journal::{Journal, JournalStream}; -use signet_node_types::{NodeTypesDbTrait, SignetNodeTypes}; -use tokio::task::JoinHandle; - -/// A task that processes journal updates for a specific database, and calls -/// the appropriate methods on a [`BlockchainProvider`] to update the in-memory -/// chain view. -#[derive(Debug, Clone)] -pub struct JournalProviderTask { - provider: BlockchainProvider>, -} - -impl JournalProviderTask { - /// Instantiate a new task. - pub const fn new(provider: BlockchainProvider>) -> Self { - Self { provider } - } - - /// Get a reference to the provider. - pub const fn provider(&self) -> &BlockchainProvider> { - &self.provider - } - - /// Deconstruct the task into its provider. - pub fn into_inner(self) -> BlockchainProvider> { - self.provider - } - - /// Create a future for the task, suitable for [`tokio::spawn`] or another - /// task-spawning system. - pub async fn task_future(self, mut journals: S) -> ProviderResult<()> - where - S: JournalStream<'static> + Send + Unpin + 'static, - { - loop { - let Some(Journal::V1(journal)) = journals.next().await else { break }; - - let rw = self.provider.database_provider_rw().map(DatabaseProviderRW); - - let r_header = SealedHeader::new_unhashed(journal.header().clone()); - let block_hash = r_header.hash(); - - // DB interaction is sync, so we spawn a blocking task for it. We - // immediately await that task. This prevents blocking the worker - // thread - tokio::task::spawn_blocking(move || rw?.ingest(journal)) - .await - .expect("ingestion should not panic")?; - - self.provider.set_canonical_head(r_header.clone()); - self.provider.set_safe(r_header.clone()); - self.provider.set_finalized(r_header); - self.provider.on_forkchoice_update_received(&ForkchoiceState { - head_block_hash: block_hash, - safe_block_hash: block_hash, - finalized_block_hash: block_hash, - }); - } - - Ok(()) - } - - /// Spawn the journal provider task. - pub fn spawn(self, journals: S) -> JoinHandle> - where - S: JournalStream<'static> + Send + Unpin + 'static, - { - tokio::spawn(self.task_future(journals)) - } -} diff --git a/crates/db/src/journal/trait.rs b/crates/db/src/journal/trait.rs deleted file mode 100644 index 014714f..0000000 --- a/crates/db/src/journal/trait.rs +++ /dev/null @@ -1,55 +0,0 @@ -use crate::RuWriter; -use alloy::consensus::{BlockHeader, Header}; -use reth::{providers::ProviderResult, revm::db::BundleState}; -use signet_evm::{BlockResult, ExecutionOutcome}; -use signet_journal::HostJournal; -use signet_types::primitives::{RecoveredBlock, SealedBlock, SealedHeader, TransactionSigned}; - -/// A database that can be updated with journals. -pub trait JournalDb: RuWriter { - /// Ingest a journal into the database. - /// - /// This will create a [`BlockResult`] from the provided header and update, - /// and append it to the database using [`RuWriter::append_host_block`]. - /// - /// This DOES NOT update tables containing historical transactions, - /// receipts, events, etc. It only updates tables related to headers, - /// and state. - /// - /// This is intended to be used for tx simulation, and other purposes that - /// need fast state access WITHTOUT needing to retrieve historical data. - fn ingest(&self, journal: HostJournal<'static>) -> ProviderResult<()> { - let journal_hash = journal.journal_hash(); - - let (meta, bsi) = journal.into_parts(); - let (host_height, _, header) = meta.into_parts(); - - // TODO: remove the clone in future versions. This can be achieved by - // _NOT_ making a `BlockResult` and instead manually updating relevan - // tables. However, this means diverging more fro the underlying reth - // logic that we are currently re-using. - let bundle_state: BundleState = bsi.into(); - let execution_outcome = ExecutionOutcome::new(bundle_state, vec![], header.number()); - - let block: SealedBlock = - SealedBlock { header: SealedHeader::new(header), body: Default::default() }; - let block_result = BlockResult { - sealed_block: RecoveredBlock::new(block, vec![]), - execution_outcome, - host_height, - }; - - self.append_host_block( - None, - std::iter::empty(), - std::iter::empty(), - std::iter::empty(), - &block_result, - journal_hash, - )?; - - Ok(()) - } -} - -impl JournalDb for T where T: RuWriter {} diff --git a/crates/db/src/lib.rs b/crates/db/src/lib.rs index 7c0458c..74b7c11 100644 --- a/crates/db/src/lib.rs +++ b/crates/db/src/lib.rs @@ -23,14 +23,11 @@ pub use consistency::ProviderConsistencyExt; mod convert; pub use convert::DataCompat; -pub mod journal; - mod provider; mod tables; pub use tables::{ - DbEnter, DbEnterToken, DbSignetEvent, DbTransact, DbZenithHeader, JournalHashes, SignetEvents, - ZenithHeaders, + DbEnter, DbEnterToken, DbSignetEvent, DbTransact, DbZenithHeader, SignetEvents, ZenithHeaders, }; mod traits; diff --git a/crates/db/src/provider.rs b/crates/db/src/provider.rs index f2cd7da..ec32a71 100644 --- a/crates/db/src/provider.rs +++ b/crates/db/src/provider.rs @@ -1,6 +1,6 @@ use crate::{ DataCompat, DbZenithHeader, RuChain, SignetDbRw, ZenithHeaders, - tables::{DbSignetEvent, JournalHashes, SignetEvents}, + tables::{DbSignetEvent, SignetEvents}, traits::RuWriter, }; use alloy::{ @@ -41,28 +41,6 @@ where BlockNumReader::last_block_number(&self.0) } - fn insert_journal_hash(&self, ru_height: u64, hash: B256) -> ProviderResult<()> { - self.tx_ref().put::(ru_height, hash)?; - Ok(()) - } - - fn remove_journal_hash(&self, ru_height: u64) -> ProviderResult<()> { - self.tx_ref().delete::(ru_height, None)?; - Ok(()) - } - - fn get_journal_hash(&self, ru_height: u64) -> ProviderResult> { - self.tx_ref().get::(ru_height).map_err(Into::into) - } - - #[track_caller] - fn latest_journal_hash(&self) -> ProviderResult { - let latest_height = self.last_block_number()?; - Ok(self - .get_journal_hash(latest_height)? - .expect("DB in corrupt state. Missing Journal Hash for latest height")) - } - /// Insert an enter into the DB /// This is a signet-specific function that inserts an enter event into the /// [`SignetEvents`] table. @@ -118,7 +96,6 @@ where } /// Inserts the zenith block into the database, always modifying the following tables: - /// * [`JournalHashes`] /// * [`CanonicalHeaders`](tables::CanonicalHeaders) /// * [`Headers`](tables::Headers) /// * [`HeaderTerminalDifficulties`](tables::HeaderTerminalDifficulties) @@ -144,7 +121,6 @@ where &self, header: Option, block: &RecoveredBlock, - journal_hash: B256, ) -> ProviderResult { // Implementation largely copied from // `BlockWriter::insert_block` @@ -154,14 +130,9 @@ where // Last reviewed at tag v1.9.0 let block_number = block.number(); - // SIGNET-SPECIFIC - // Put journal hash into the DB if let Some(header) = header { self.insert_signet_header(header, block_number)?; } - // SIGNET-SPECIFIC - // Put journal hash into the DB - self.tx_ref().put::(block_number, journal_hash)?; let block_hash = block.block.header.hash(); let block_header = block.block.header.header(); @@ -366,7 +337,6 @@ where enters: impl IntoIterator, enter_tokens: impl IntoIterator, block_result: &BlockResult, - journal_hash: B256, ) -> ProviderResult<()> { // Implementation largely copied from // `BlockWriter::append_blocks_with_state` @@ -378,7 +348,7 @@ where let BlockResult { sealed_block: block, execution_outcome, .. } = block_result; let ru_height = block.number(); - self.insert_signet_block(header, block, journal_hash)?; + self.insert_signet_block(header, block)?; // Write the state and match the storage location that Reth uses. self.ru_write_state(execution_outcome, OriginalValuesKnown::No)?; diff --git a/crates/db/src/tables.rs b/crates/db/src/tables.rs index 3f5359d..b090ac3 100644 --- a/crates/db/src/tables.rs +++ b/crates/db/src/tables.rs @@ -346,22 +346,6 @@ impl Decompress for DbSignetEvent { } } -/// Table that maps rollup block heights to post-block journal hashes. -#[derive(Debug, Clone, Copy)] -pub struct JournalHashes { - _private: (), -} - -impl Table for JournalHashes { - const NAME: &'static str = ::NAME; - - const DUPSORT: bool = ::DUPSORT; - - type Key = u64; - - type Value = B256; -} - #[cfg(test)] mod test { use super::*; diff --git a/crates/db/src/traits.rs b/crates/db/src/traits.rs index 53499d6..ccb85bf 100644 --- a/crates/db/src/traits.rs +++ b/crates/db/src/traits.rs @@ -1,5 +1,5 @@ use crate::{DbExtractionResults, DbSignetEvent, RuChain, SignetDbRw}; -use alloy::primitives::{B256, BlockNumber}; +use alloy::primitives::BlockNumber; use itertools::Itertools; #[cfg(doc)] use reth::providers::DatabaseProviderRW; @@ -18,18 +18,6 @@ pub trait RuWriter { /// Get the last block number fn last_block_number(&self) -> ProviderResult; - /// Insert a journal hash into the DB. - fn insert_journal_hash(&self, rollup_height: u64, hash: B256) -> ProviderResult<()>; - - /// Remove a journal hash from the DB. - fn remove_journal_hash(&self, rollup_height: u64) -> ProviderResult<()>; - - /// Get a journal hash from the DB. - fn get_journal_hash(&self, rollup_height: u64) -> ProviderResult>; - - /// Get the latest journal hash from the DB. - fn latest_journal_hash(&self) -> ProviderResult; - /// Store a zenith header in the DB fn insert_signet_header( &self, @@ -45,7 +33,6 @@ pub trait RuWriter { &self, header: Option, block: &RecoveredBlock, - journal_hash: B256, ) -> ProviderResult; /// Append a zenith block body to the DB. @@ -225,7 +212,6 @@ pub trait RuWriter { } /// Add the output of a host block to the DB. - #[allow(clippy::too_many_arguments)] fn append_host_block( &self, header: Option, @@ -233,7 +219,6 @@ pub trait RuWriter { enters: impl IntoIterator, enter_tokens: impl IntoIterator, block_result: &BlockResult, - journal_hash: B256, ) -> ProviderResult<()>; /// Take the block and execution range from the DB, reverting the blocks diff --git a/crates/db/tests/db.rs b/crates/db/tests/db.rs index 6d4f769..b377d08 100644 --- a/crates/db/tests/db.rs +++ b/crates/db/tests/db.rs @@ -26,7 +26,6 @@ fn test_insert_signet_block() { let factory = test_common::create_test_provider_factory(); let writer = factory.provider_rw().unwrap(); - let journal_hash = B256::repeat_byte(0x55); let header = Some(Zenith::BlockHeader { rollupChainId: U256::from(RU_CHAIN_ID), hostBlockNumber: U256::from(DEPLOY_HEIGHT), @@ -55,15 +54,13 @@ fn test_insert_signet_block() { senders: std::iter::repeat_n(Address::repeat_byte(0x33), 10).collect(), }; - writer.insert_signet_block(header, &block, journal_hash).unwrap(); + writer.insert_signet_block(header, &block).unwrap(); writer.commit().unwrap(); let reader = factory.provider_rw().unwrap(); // Check basic updates assert_eq!(reader.last_block_number().unwrap(), block.number()); - assert_eq!(reader.latest_journal_hash().unwrap(), journal_hash); - assert_eq!(reader.get_journal_hash(block.number()).unwrap(), Some(journal_hash)); // This tests resolving `BlockId::Latest` assert_eq!(reader.best_block_number().unwrap(), block.number()); @@ -90,7 +87,6 @@ fn test_transaction_hash_indexing() { let factory = test_common::create_test_provider_factory(); let writer = factory.provider_rw().unwrap(); - let journal_hash = B256::repeat_byte(0x55); let header = Some(Zenith::BlockHeader { rollupChainId: U256::from(RU_CHAIN_ID), hostBlockNumber: U256::from(DEPLOY_HEIGHT), @@ -119,7 +115,7 @@ fn test_transaction_hash_indexing() { senders: std::iter::repeat_n(Address::repeat_byte(0x33), 5).collect(), }; - writer.insert_signet_block(header, &block, journal_hash).unwrap(); + writer.insert_signet_block(header, &block).unwrap(); writer.commit().unwrap(); let reader = factory.provider_rw().unwrap(); diff --git a/crates/node-tests/src/context.rs b/crates/node-tests/src/context.rs index cded0bd..b9da811 100644 --- a/crates/node-tests/src/context.rs +++ b/crates/node-tests/src/context.rs @@ -24,9 +24,9 @@ use reth_db::{PlainAccountState, transaction::DbTxMut}; use reth_exex_test_utils::{Adapter, TestExExHandle, TmpDB as TmpDb}; use reth_node_api::FullNodeComponents; use signet_db::DbProviderExt; -use signet_node::SignetNodeBuilder; +use signet_node::{NodeStatus, SignetNodeBuilder}; use signet_node_config::test_utils::test_config; -use signet_node_types::{NodeStatus, SignetNodeTypes}; +use signet_node_types::SignetNodeTypes; use signet_test_utils::contracts::counter::COUNTER_DEPLOY_CODE; use signet_types::constants::{HostPermitted, RollupPermitted, SignetSystemConstants}; use signet_zenith::{HostOrders::OrdersInstance, RollupPassage::RollupPassageInstance}; diff --git a/crates/node/Cargo.toml b/crates/node/Cargo.toml index d1219d0..3a6d7d7 100644 --- a/crates/node/Cargo.toml +++ b/crates/node/Cargo.toml @@ -19,7 +19,10 @@ signet-blobber.workspace = true signet-tx-cache.workspace = true signet-types.workspace = true +ajj.workspace = true alloy.workspace = true +axum = "0.8.1" +interprocess = { version = "2.2.2", features = ["tokio"] } reth.workspace = true reth-chainspec.workspace = true @@ -33,4 +36,5 @@ futures-util.workspace = true metrics.workspace = true reqwest.workspace = true tokio.workspace = true +tower-http = { version = "0.6.2", features = ["cors"] } tracing.workspace = true diff --git a/crates/node/src/builder.rs b/crates/node/src/builder.rs index 1cc4205..183b05b 100644 --- a/crates/node/src/builder.rs +++ b/crates/node/src/builder.rs @@ -1,6 +1,6 @@ #![allow(clippy::type_complexity)] -use crate::{GENESIS_JOURNAL_HASH, SignetNode}; +use crate::{NodeStatus, SignetNode}; use eyre::OptionExt; use reth::{ primitives::EthPrimitives, @@ -13,7 +13,7 @@ use reth_node_api::{FullNodeComponents, NodeTypes}; use signet_block_processor::AliasOracleFactory; use signet_db::DbProviderExt; use signet_node_config::SignetNodeConfig; -use signet_node_types::{NodeStatus, NodeTypesDbTrait, SignetNodeTypes}; +use signet_node_types::{NodeTypesDbTrait, SignetNodeTypes}; use std::sync::Arc; /// A type that does not implement [`AliasOracleFactory`]. @@ -199,11 +199,6 @@ where writer.tx_mut().clear::()?; writer.tx_mut().clear::()?; - writer.tx_ref().put::(0, GENESIS_JOURNAL_HASH)?; - // we do not need to pre-populate the `ZenithHeaders` or - // `SignetEvents` tables, as missing data is legal in those - // tables - Ok(()) }, )?; diff --git a/crates/node/src/lib.rs b/crates/node/src/lib.rs index 92b97f1..7b5aa9e 100644 --- a/crates/node/src/lib.rs +++ b/crates/node/src/lib.rs @@ -17,6 +17,11 @@ pub use builder::SignetNodeBuilder; mod metrics; mod node; -pub use node::{GENESIS_JOURNAL_HASH, SignetNode}; +pub use node::SignetNode; mod rpc; + +mod serve; + +mod status; +pub use status::NodeStatus; diff --git a/crates/node/src/node.rs b/crates/node/src/node.rs index 1839aa4..7d3d43a 100644 --- a/crates/node/src/node.rs +++ b/crates/node/src/node.rs @@ -1,8 +1,8 @@ -use crate::metrics; +use crate::{NodeStatus, metrics, serve::RpcServerGuard}; use alloy::{ consensus::BlockHeader, eips::NumHash, - primitives::{B256, BlockNumber, b256}, + primitives::{B256, BlockNumber}, }; use eyre::Context; use futures_util::StreamExt; @@ -22,17 +22,12 @@ use signet_blobber::BlobFetcher; use signet_block_processor::{AliasOracleFactory, SignetBlockProcessorV1}; use signet_db::{DbProviderExt, ProviderConsistencyExt, RuChain, RuWriter}; use signet_node_config::SignetNodeConfig; -use signet_node_types::{NodeStatus, NodeTypesDbTrait, SignetNodeTypes}; -use signet_rpc::RpcServerGuard; +use signet_node_types::{NodeTypesDbTrait, SignetNodeTypes}; use signet_types::{PairedHeights, constants::SignetSystemConstants}; use std::{fmt, mem::MaybeUninit, sync::Arc}; use tokio::sync::watch; use tracing::{debug, info, instrument}; -/// The genesis journal hash for the signet chain. -pub const GENESIS_JOURNAL_HASH: B256 = - b256!("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"); - /// Make it easier to write some args type PrimitivesOf = <::Types as NodeTypes>::Primitives; type ExExNotification = reth_exex::ExExNotification>; diff --git a/crates/node/src/rpc.rs b/crates/node/src/rpc.rs index 80d79c2..b963685 100644 --- a/crates/node/src/rpc.rs +++ b/crates/node/src/rpc.rs @@ -1,9 +1,12 @@ -use crate::SignetNode; +use crate::{ + SignetNode, + serve::{RpcServerGuard, ServeConfig}, +}; use reth::{primitives::EthPrimitives, rpc::builder::config::RethRpcServerConfig}; use reth_node_api::{FullNodeComponents, NodeTypes}; use signet_block_processor::AliasOracleFactory; use signet_node_types::NodeTypesDbTrait; -use signet_rpc::{RpcCtx, RpcServerGuard, ServeConfig}; +use signet_rpc::RpcCtx; use signet_tx_cache::TxCache; use tracing::info; diff --git a/crates/node/src/serve.rs b/crates/node/src/serve.rs new file mode 100644 index 0000000..bfd07da --- /dev/null +++ b/crates/node/src/serve.rs @@ -0,0 +1,249 @@ +use ajj::{ + Router, + pubsub::{Connect, ServerShutdown}, +}; +use axum::http::HeaderValue; +use interprocess::local_socket as ls; +use reqwest::Method; +use reth::{args::RpcServerArgs, rpc::builder::CorsDomainError, tasks::TaskExecutor}; +use std::{future::IntoFuture, net::SocketAddr}; +use tokio::task::JoinHandle; +use tower_http::cors::{AllowOrigin, Any, CorsLayer}; +use tracing::error; + +/// Guard to shutdown the RPC servers. When dropped, this will shutdown all +/// running servers +#[derive(Default)] +pub(crate) struct RpcServerGuard { + http: Option>, + ws: Option>, + ipc: Option, +} + +impl core::fmt::Debug for RpcServerGuard { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("RpcServerGuard") + .field("http", &self.http.is_some()) + .field("ipc", &self.ipc.is_some()) + .field("ws", &self.ws.is_some()) + .finish() + } +} + +impl Drop for RpcServerGuard { + fn drop(&mut self) { + if let Some(http) = self.http.take() { + http.abort(); + } + if let Some(ws) = self.ws.take() { + ws.abort(); + } + // IPC is handled by its own drop guards. + } +} + +/// Configuration for the RPC server. +#[derive(Clone, Debug)] +pub(crate) struct ServeConfig { + /// HTTP server addresses. + pub http: Vec, + /// CORS header to be used for HTTP (if any). + pub http_cors: Option, + /// WS server addresses. + pub ws: Vec, + /// CORS header to be used for WS (if any). + pub ws_cors: Option, + /// IPC name info. + pub ipc: Option, +} + +impl From for ServeConfig { + fn from(args: RpcServerArgs) -> Self { + let http = if args.http { + vec![SocketAddr::from((args.http_addr, args.http_port))] + } else { + vec![] + }; + let ws = + if args.ws { vec![SocketAddr::from((args.ws_addr, args.ws_port))] } else { vec![] }; + + let http_cors = args.http_corsdomain; + let ws_cors = args.ws_allowed_origins; + + let ipc = if !args.ipcdisable { Some(args.ipcpath) } else { None }; + + Self { http, http_cors, ws, ws_cors, ipc } + } +} + +impl ServeConfig { + /// Serve the router on the given addresses. + async fn serve_http( + &self, + tasks: &TaskExecutor, + router: Router<()>, + ) -> eyre::Result>> { + if self.http.is_empty() { + return Ok(None); + } + serve_axum(tasks, router, &self.http, self.http_cors.as_deref()).await.map(Some) + } + + /// Serve the router on the given addresses. + async fn serve_ws( + &self, + tasks: &TaskExecutor, + router: Router<()>, + ) -> eyre::Result>> { + if self.ws.is_empty() { + return Ok(None); + } + serve_ws(tasks, router, &self.ws, self.ws_cors.as_deref()).await.map(Some) + } + + /// Serve the router on the given ipc path. + async fn serve_ipc( + &self, + tasks: &TaskExecutor, + router: &Router<()>, + ) -> eyre::Result> { + let Some(endpoint) = &self.ipc else { return Ok(None) }; + let shutdown = serve_ipc(tasks, router, endpoint).await?; + Ok(Some(shutdown)) + } + + /// Serve the router. + pub(crate) async fn serve( + &self, + tasks: &TaskExecutor, + router: Router<()>, + ) -> eyre::Result { + let (http, ws, ipc) = tokio::try_join!( + self.serve_http(tasks, router.clone()), + self.serve_ws(tasks, router.clone()), + self.serve_ipc(tasks, &router), + )?; + Ok(RpcServerGuard { http, ws, ipc }) + } +} + +fn make_cors(cors: Option<&str>) -> Result { + let origins = match cors { + None | Some("*") => AllowOrigin::any(), + Some(cors) => { + if cors.split(',').any(|o| o == "*") { + return Err(CorsDomainError::WildCardNotAllowed { input: cors.to_string() }); + } + cors.split(',') + .map(|domain| { + domain + .parse::() + .map_err(|_| CorsDomainError::InvalidHeader { domain: domain.to_string() }) + }) + .collect::, _>>()? + .into() + } + }; + + Ok(CorsLayer::new() + .allow_methods([Method::GET, Method::POST]) + .allow_origin(origins) + .allow_headers(Any)) +} + +/// Serve the axum router on the specified addresses. +async fn serve( + tasks: &TaskExecutor, + addrs: &[SocketAddr], + service: axum::Router, +) -> Result, eyre::Error> { + let listener = tokio::net::TcpListener::bind(addrs).await?; + + let fut = async move { + match axum::serve(listener, service).into_future().await { + Ok(_) => (), + Err(err) => error!(%err, "Error serving RPC via axum"), + } + }; + + Ok(tasks.spawn(fut)) +} + +/// Serve the router on the given addresses using axum. +async fn serve_axum( + tasks: &TaskExecutor, + router: Router<()>, + addrs: &[SocketAddr], + cors: Option<&str>, +) -> eyre::Result> { + let handle = tasks.handle().clone(); + let cors = make_cors(cors)?; + + let service = router.into_axum_with_handle("/", handle).layer(cors); + + serve(tasks, addrs, service).await +} + +/// Serve the router on the given address using a Websocket. +async fn serve_ws( + tasks: &TaskExecutor, + router: Router<()>, + addrs: &[SocketAddr], + cors: Option<&str>, +) -> eyre::Result> { + let handle = tasks.handle().clone(); + let cors = make_cors(cors)?; + + let service = router.into_axum_with_ws_and_handle("/rpc", "/", handle).layer(cors); + + serve(tasks, addrs, service).await +} + +fn to_name(path: &std::ffi::OsStr) -> std::io::Result> { + if cfg!(windows) && !path.as_encoded_bytes().starts_with(br"\\.\pipe\") { + ls::ToNsName::to_ns_name::(path) + } else { + ls::ToFsName::to_fs_name::(path) + } +} + +/// Serve the router on the given address using IPC. +async fn serve_ipc( + tasks: &TaskExecutor, + router: &Router<()>, + endpoint: &str, +) -> eyre::Result { + let name = std::ffi::OsStr::new(endpoint); + let name = to_name(name).expect("invalid name"); + ls::ListenerOptions::new() + .name(name) + .serve_with_handle(router.clone(), tasks.handle().clone()) + .await + .map_err(Into::into) +} + +// Some code in this file has been copied and modified from reth +// +// The original license is included below: +// +// The MIT License (MIT) +// +// Copyright (c) 2022-2025 Reth Contributors +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +//. +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. diff --git a/crates/node/src/status.rs b/crates/node/src/status.rs new file mode 100644 index 0000000..0aff564 --- /dev/null +++ b/crates/node/src/status.rs @@ -0,0 +1,8 @@ +/// Items that can be sent via the status channel. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum NodeStatus { + /// Node is booting. + Booting, + /// Node's current height. + AtHeight(u64), +} diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index 6594a41..f0b0b95 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -30,16 +30,12 @@ reth-db-common.workspace = true reth-node-api.workspace = true reth-rpc-eth-api.workspace = true -axum = "0.8.1" dashmap = "6.1.0" eyre.workspace = true -interprocess = { version = "2.2.2", features = ["tokio"] } -reqwest.workspace = true serde.workspace = true thiserror.workspace = true tokio = { workspace = true, features = ["macros"] } tokio-util = "0.7.13" -tower-http = { version = "0.6.2", features = ["cors"] } tracing.workspace = true serde_json.workspace = true futures-util = "0.3.31" diff --git a/crates/rpc/src/config.rs b/crates/rpc/src/config.rs deleted file mode 100644 index 7dbc579..0000000 --- a/crates/rpc/src/config.rs +++ /dev/null @@ -1,121 +0,0 @@ -use crate::utils::{serve_axum, serve_ipc, serve_ws}; -use ajj::{Router, pubsub::ServerShutdown}; -use reth::{args::RpcServerArgs, tasks::TaskExecutor}; -use std::net::SocketAddr; -use tokio::task::JoinHandle; - -/// Guard to shutdown the RPC servers. When dropped, this will shutdown all -/// running servers -#[derive(Default)] -pub struct RpcServerGuard { - http: Option>, - ws: Option>, - ipc: Option, -} - -impl core::fmt::Debug for RpcServerGuard { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("RpcServerGuard") - .field("http", &self.http.is_some()) - .field("ipc", &self.ipc.is_some()) - .field("ws", &self.ws.is_some()) - .finish() - } -} - -impl Drop for RpcServerGuard { - fn drop(&mut self) { - if let Some(http) = self.http.take() { - http.abort(); - } - if let Some(ws) = self.ws.take() { - ws.abort(); - } - // IPC is handled by its own drop guards. - } -} - -/// Configuration for the RPC server. -#[derive(Clone, Debug)] -pub struct ServeConfig { - /// HTTP server addresses. - pub http: Vec, - /// CORS header to be used for HTTP (if any). - pub http_cors: Option, - /// WS server addresses. - pub ws: Vec, - /// CORS header to be used for WS (if any). - pub ws_cors: Option, - /// IPC name info. - pub ipc: Option, -} - -impl From for ServeConfig { - fn from(args: RpcServerArgs) -> Self { - let http = if args.http { - vec![SocketAddr::from((args.http_addr, args.http_port))] - } else { - vec![] - }; - let ws = - if args.ws { vec![SocketAddr::from((args.ws_addr, args.ws_port))] } else { vec![] }; - - let http_cors = args.http_corsdomain; - let ws_cors = args.ws_allowed_origins; - - let ipc = if !args.ipcdisable { Some(args.ipcpath) } else { None }; - - Self { http, http_cors, ws, ws_cors, ipc } - } -} - -impl ServeConfig { - /// Serve the router on the given addresses. - async fn serve_http( - &self, - tasks: &TaskExecutor, - router: Router<()>, - ) -> eyre::Result>> { - if self.http.is_empty() { - return Ok(None); - } - serve_axum(tasks, router, &self.http, self.http_cors.as_deref()).await.map(Some) - } - - /// Serve the router on the given addresses. - async fn serve_ws( - &self, - tasks: &TaskExecutor, - router: Router<()>, - ) -> eyre::Result>> { - if self.ws.is_empty() { - return Ok(None); - } - serve_ws(tasks, router, &self.ws, self.ws_cors.as_deref()).await.map(Some) - } - - /// Serve the router on the given ipc path. - async fn serve_ipc( - &self, - tasks: &TaskExecutor, - router: &Router<()>, - ) -> eyre::Result> { - let Some(endpoint) = &self.ipc else { return Ok(None) }; - let shutdown = serve_ipc(tasks, router, endpoint).await?; - Ok(Some(shutdown)) - } - - /// Serve the router. - pub async fn serve( - &self, - tasks: &TaskExecutor, - router: Router<()>, - ) -> eyre::Result { - let (http, ws, ipc) = tokio::try_join!( - self.serve_http(tasks, router.clone()), - self.serve_ws(tasks, router.clone()), - self.serve_ipc(tasks, &router), - )?; - Ok(RpcServerGuard { http, ws, ipc }) - } -} diff --git a/crates/rpc/src/lib.rs b/crates/rpc/src/lib.rs index 0a5ff8a..ea1a5aa 100644 --- a/crates/rpc/src/lib.rs +++ b/crates/rpc/src/lib.rs @@ -5,37 +5,6 @@ //! integrated with `reth`, and expects a variety of `reth`-specific types to be //! passed in. As such, it is mostly useful within the context of a `signet` //! node. -//! -//! ## Usage Example -//! -//! ```rust -//! # use signet_rpc::{RpcCtx}; -//! # use signet_node_types::Pnt; -//! # use reth_node_api::FullNodeComponents; -//! # use reth::tasks::TaskExecutor; -//! use signet_rpc::{router, ServeConfig}; -//! -//! # pub async fn f(ctx: RpcCtx, tasks: &TaskExecutor) -> eyre::Result<()> -//! # where -//! # Host: FullNodeComponents, -//! # Signet: Pnt, -//! # { -//! let router = signet_rpc::router().with_state(ctx); -//! -//! let cfg = ServeConfig { -//! http: vec!["localhost:8080".parse()?], -//! http_cors: None, -//! ws: vec![], -//! ws_cors: None, -//! ipc: None, -//! }; -//! -//! // Spawn the server on the given addresses, the shutdown guard -//! // will shutdown the server(s) when dropped. -//! let shutdown_guard = cfg.serve(tasks, router).await?; -//! # Ok(()) -//! # } -//! ``` #![warn( missing_copy_implementations, @@ -49,10 +18,6 @@ #![deny(unused_must_use, rust_2018_idioms)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -mod config; - -pub use config::{RpcServerGuard, ServeConfig}; - mod ctx; pub use ctx::{LoadState, RpcCtx, SignetCtx}; @@ -72,8 +37,7 @@ mod interest; pub mod receipts; -/// Utils and simple serve functions. -pub mod utils; +mod utils; /// Re-exported for convenience pub use ::ajj; diff --git a/crates/rpc/src/utils.rs b/crates/rpc/src/utils.rs index a20d561..0092d8e 100644 --- a/crates/rpc/src/utils.rs +++ b/crates/rpc/src/utils.rs @@ -1,15 +1,4 @@ -use ajj::{ - Router, - pubsub::{Connect, ServerShutdown}, -}; -use axum::http::HeaderValue; -use interprocess::local_socket as ls; -use reqwest::Method; -use reth::{rpc::builder::CorsDomainError, tasks::TaskExecutor}; -use std::{future::IntoFuture, iter::StepBy, net::SocketAddr, ops::RangeInclusive}; -use tokio::task::JoinHandle; -use tower_http::cors::{AllowOrigin, Any, CorsLayer}; -use tracing::error; +use std::{iter::StepBy, ops::RangeInclusive}; macro_rules! await_handler { ($h:expr) => { @@ -114,124 +103,3 @@ impl Iterator for BlockRangeInclusiveIter { Some((start, end)) } } - -fn make_cors(cors: Option<&str>) -> Result { - let origins = match cors { - None | Some("*") => AllowOrigin::any(), - Some(cors) => { - if cors.split(',').any(|o| o == "*") { - return Err(CorsDomainError::WildCardNotAllowed { input: cors.to_string() }); - } - cors.split(',') - .map(|domain| { - domain - .parse::() - .map_err(|_| CorsDomainError::InvalidHeader { domain: domain.to_string() }) - }) - .collect::, _>>()? - .into() - } - }; - - Ok(CorsLayer::new() - .allow_methods([Method::GET, Method::POST]) - .allow_origin(origins) - .allow_headers(Any)) -} - -/// Serve the axum router on the specified addresses. -async fn serve( - tasks: &TaskExecutor, - addrs: &[SocketAddr], - service: axum::Router, -) -> Result, eyre::Error> { - let listener = tokio::net::TcpListener::bind(addrs).await?; - - let fut = async move { - match axum::serve(listener, service).into_future().await { - Ok(_) => (), - Err(err) => error!(%err, "Error serving RPC via axum"), - } - }; - - Ok(tasks.spawn(fut)) -} - -/// Serve the router on the given addresses using axum. -pub async fn serve_axum( - tasks: &TaskExecutor, - router: Router<()>, - addrs: &[SocketAddr], - cors: Option<&str>, -) -> eyre::Result> { - let handle = tasks.handle().clone(); - let cors = make_cors(cors)?; - - let service = router.into_axum_with_handle("/", handle).layer(cors); - - serve(tasks, addrs, service).await -} - -/// Serve the router on the given address using a Websocket. -pub async fn serve_ws( - tasks: &TaskExecutor, - router: Router<()>, - addrs: &[SocketAddr], - cors: Option<&str>, -) -> eyre::Result> { - let handle = tasks.handle().clone(); - let cors = make_cors(cors)?; - - let service = router.into_axum_with_ws_and_handle("/rpc", "/", handle).layer(cors); - - serve(tasks, addrs, service).await -} - -fn to_name(path: &std::ffi::OsStr) -> std::io::Result> { - if cfg!(windows) && !path.as_encoded_bytes().starts_with(br"\\.\pipe\") { - ls::ToNsName::to_ns_name::(path) - } else { - ls::ToFsName::to_fs_name::(path) - } -} - -/// Serve the router on the given address using IPC. -pub async fn serve_ipc( - tasks: &TaskExecutor, - router: &Router<()>, - endpoint: &str, -) -> eyre::Result { - let name = std::ffi::OsStr::new(endpoint); - let name = to_name(name).expect("invalid name"); - ls::ListenerOptions::new() - .name(name) - .serve_with_handle(router.clone(), tasks.handle().clone()) - .await - .map_err(Into::into) -} - -// Some code in this file has been copied and modified from reth -// -// The original license is included below: -// -// The MIT License (MIT) -// -// Copyright (c) 2022-2025 Reth Contributors -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -//. -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. From f105e1b4d2e363f6d0583df32937cb3127e94959 Mon Sep 17 00:00:00 2001 From: James Date: Mon, 16 Feb 2026 06:31:53 -0500 Subject: [PATCH 29/31] =?UTF-8?q?refactor:=20address=20PR=20review=20?= =?UTF-8?q?=E2=80=94=20fix=20bugs,=20unify=20types,=20and=20clean=20up?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Fix SubCleanerTask thread leak (missing break on Weak upgrade failure) - Fix missing tasks.insert in subscribe() preventing unsubscribe - Implement eth_syncing with concrete SyncingResponse type - Fix reward percentile cursor overflow in fee_history - Switch cold-storage endpoints from spawn_blocking to spawn - Add StorageRpcConfigBuilder for 11-field config struct - Unify FilterOutput/SubscriptionBuffer into generic EventBuffer - Replace hard-coded 12s block time with slot_duration() from constants - Add EthError::TransactionMissing variant - Remove dead code (Either enums, unused methods, allow(dead_code)) - Fix license comment typos, min() style, mut rebinding nits Co-Authored-By: Claude Opus 4.6 --- crates/rpc-storage/src/config/ctx.rs | 8 +- crates/rpc-storage/src/config/rpc_config.rs | 98 ++++++++++++++++ crates/rpc-storage/src/debug/endpoints.rs | 4 +- crates/rpc-storage/src/debug/tracer.rs | 2 +- crates/rpc-storage/src/eth/endpoints.rs | 76 ++++++++---- crates/rpc-storage/src/eth/error.rs | 3 + crates/rpc-storage/src/eth/mod.rs | 4 +- crates/rpc-storage/src/interest/buffer.rs | 124 ++++++++++++++++++++ crates/rpc-storage/src/interest/filters.rs | 114 +----------------- crates/rpc-storage/src/interest/kind.rs | 6 - crates/rpc-storage/src/interest/mod.rs | 1 + crates/rpc-storage/src/interest/subs.rs | 99 ++-------------- crates/rpc-storage/src/signet/endpoints.rs | 2 +- crates/rpc-storage/tests/eth_rpc.rs | 23 +++- 14 files changed, 331 insertions(+), 233 deletions(-) create mode 100644 crates/rpc-storage/src/interest/buffer.rs diff --git a/crates/rpc-storage/src/config/ctx.rs b/crates/rpc-storage/src/config/ctx.rs index fb0cc95..e46324a 100644 --- a/crates/rpc-storage/src/config/ctx.rs +++ b/crates/rpc-storage/src/config/ctx.rs @@ -40,9 +40,9 @@ pub(crate) struct EvmBlockContext { /// /// # Construction /// -/// ```ignore -/// let ctx = StorageRpcCtx::new(storage, constants, tags, Some(tx_cache), StorageRpcConfig::default()); -/// ``` +/// Call [`StorageRpcCtx::new`] with unified storage, system constants, +/// block tags, an optional [`TxCache`], [`StorageRpcConfig`], and a +/// broadcast sender for [`NewBlockNotification`]s. #[derive(Debug)] pub struct StorageRpcCtx { inner: Arc>, @@ -265,7 +265,7 @@ impl StorageRpcCtx { if pending { header.parent_hash = parent_hash; header.number += 1; - header.timestamp += 12; + header.timestamp += self.constants().host().slot_duration(); header.base_fee_per_gas = header.next_block_base_fee(alloy::eips::eip1559::BaseFeeParams::ethereum()); header.gas_limit = self.config().rpc_gas_cap; diff --git a/crates/rpc-storage/src/config/rpc_config.rs b/crates/rpc-storage/src/config/rpc_config.rs index b881deb..64f562a 100644 --- a/crates/rpc-storage/src/config/rpc_config.rs +++ b/crates/rpc-storage/src/config/rpc_config.rs @@ -15,6 +15,16 @@ use std::time::Duration; /// // Use defaults (matches reth defaults). /// let config = StorageRpcConfig::default(); /// assert_eq!(config.rpc_gas_cap, 30_000_000); +/// +/// // Use the builder to customise individual fields. +/// let config = StorageRpcConfig::builder() +/// .rpc_gas_cap(50_000_000) +/// .max_blocks_per_filter(5_000) +/// .build(); +/// assert_eq!(config.rpc_gas_cap, 50_000_000); +/// assert_eq!(config.max_blocks_per_filter, 5_000); +/// // Other fields retain their defaults. +/// assert_eq!(config.max_logs_per_response, 20_000); /// ``` #[derive(Debug, Clone, Copy)] pub struct StorageRpcConfig { @@ -83,6 +93,13 @@ pub struct StorageRpcConfig { pub default_bundle_timeout_ms: u64, } +impl StorageRpcConfig { + /// Create a new builder with all fields set to their defaults. + pub fn builder() -> StorageRpcConfigBuilder { + StorageRpcConfigBuilder::default() + } +} + impl Default for StorageRpcConfig { fn default() -> Self { Self { @@ -100,3 +117,84 @@ impl Default for StorageRpcConfig { } } } + +/// Builder for [`StorageRpcConfig`]. +/// +/// All fields default to the same values as [`StorageRpcConfig::default`]. +#[derive(Debug, Clone, Copy, Default)] +pub struct StorageRpcConfigBuilder { + inner: StorageRpcConfig, +} + +impl StorageRpcConfigBuilder { + /// Set the maximum gas for `eth_call` and `eth_estimateGas`. + pub const fn rpc_gas_cap(mut self, cap: u64) -> Self { + self.inner.rpc_gas_cap = cap; + self + } + + /// Set the maximum block range per `eth_getLogs` query. + pub const fn max_blocks_per_filter(mut self, max: u64) -> Self { + self.inner.max_blocks_per_filter = max; + self + } + + /// Set the maximum number of logs returned per response. + pub const fn max_logs_per_response(mut self, max: usize) -> Self { + self.inner.max_logs_per_response = max; + self + } + + /// Set the maximum wall-clock time for a single log query. + pub const fn max_log_query_deadline(mut self, deadline: Duration) -> Self { + self.inner.max_log_query_deadline = deadline; + self + } + + /// Set the maximum concurrent tracing/debug requests. + pub const fn max_tracing_requests(mut self, max: usize) -> Self { + self.inner.max_tracing_requests = max; + self + } + + /// Set the time-to-live for stale filters and subscriptions. + pub const fn stale_filter_ttl(mut self, ttl: Duration) -> Self { + self.inner.stale_filter_ttl = ttl; + self + } + + /// Set the number of recent blocks for gas price suggestions. + pub const fn gas_oracle_block_count(mut self, count: u64) -> Self { + self.inner.gas_oracle_block_count = count; + self + } + + /// Set the percentile of effective tips for gas price suggestions. + pub const fn gas_oracle_percentile(mut self, percentile: f64) -> Self { + self.inner.gas_oracle_percentile = percentile; + self + } + + /// Set the maximum header history for `eth_feeHistory`. + pub const fn max_header_history(mut self, max: u64) -> Self { + self.inner.max_header_history = max; + self + } + + /// Set the maximum block history for `eth_feeHistory`. + pub const fn max_block_history(mut self, max: u64) -> Self { + self.inner.max_block_history = max; + self + } + + /// Set the default bundle simulation timeout in milliseconds. + pub const fn default_bundle_timeout_ms(mut self, ms: u64) -> Self { + self.inner.default_bundle_timeout_ms = ms; + self + } + + /// Build the configuration. + pub const fn build(self) -> StorageRpcConfig { + self.inner + } +} diff --git a/crates/rpc-storage/src/debug/endpoints.rs b/crates/rpc-storage/src/debug/endpoints.rs index b140284..d72fdf0 100644 --- a/crates/rpc-storage/src/debug/endpoints.rs +++ b/crates/rpc-storage/src/debug/endpoints.rs @@ -110,7 +110,7 @@ where } .instrument(span); - await_handler!(@response_option hctx.spawn_blocking(fut)) + await_handler!(@response_option hctx.spawn(fut)) } /// `debug_traceTransaction` handler. @@ -202,5 +202,5 @@ where } .instrument(span); - await_handler!(@response_option hctx.spawn_blocking(fut)) + await_handler!(@response_option hctx.spawn(fut)) } diff --git a/crates/rpc-storage/src/debug/tracer.rs b/crates/rpc-storage/src/debug/tracer.rs index 52ed546..fa24073 100644 --- a/crates/rpc-storage/src/debug/tracer.rs +++ b/crates/rpc-storage/src/debug/tracer.rs @@ -211,7 +211,7 @@ where // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: -//. +// // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // diff --git a/crates/rpc-storage/src/eth/endpoints.rs b/crates/rpc-storage/src/eth/endpoints.rs index ac886ab..e00f2c2 100644 --- a/crates/rpc-storage/src/eth/endpoints.rs +++ b/crates/rpc-storage/src/eth/endpoints.rs @@ -3,7 +3,7 @@ use crate::{ config::{EvmBlockContext, StorageRpcCtx, gas_oracle}, eth::{ - error::CallErrorData, + error::{CallErrorData, EthError}, helpers::{ AddrWithBlock, BlockParams, CfgFiller, FeeHistoryArgs, StorageAtArgs, SubscribeArgs, TxParams, await_handler, build_receipt, build_rpc_transaction, hot_reader_at_block, @@ -29,6 +29,7 @@ use alloy::{ rpc::types::{FeeHistory, Filter, Log}, }; use revm_inspectors::access_list::AccessListInspector; +use serde::Serialize; use signet_cold::{HeaderSpecifier, ReceiptSpecifier}; use signet_hot::{HistoryRead, HotKv, db::HotDbRead, model::HotKvRead}; use tracing::{Instrument, debug, trace_span}; @@ -44,6 +45,38 @@ pub(crate) async fn not_supported() -> ResponsePayload<(), ()> { ResponsePayload::method_not_found() } +/// Response for `eth_syncing`. +/// +/// Returns `false` when the node is fully synced, or a sync-status +/// object when it is still catching up. +#[derive(Debug, Clone, Serialize)] +#[serde(untagged)] +pub(crate) enum SyncingResponse { + /// Node is fully synced. + NotSyncing(bool), + /// Node is still syncing. + Syncing { + /// Block number the node started syncing from. + starting_block: U64, + /// Current block the node has synced to. + current_block: U64, + /// Highest known block number on the network. + highest_block: U64, + }, +} + +/// `eth_syncing` — returns sync status or `false` when fully synced. +pub(crate) async fn syncing(ctx: StorageRpcCtx) -> Result { + match ctx.tags().sync_status() { + Some(status) => Ok(SyncingResponse::Syncing { + starting_block: U64::from(status.starting_block), + current_block: U64::from(status.current_block), + highest_block: U64::from(status.highest_block), + }), + None => Ok(SyncingResponse::NotSyncing(false)), + } +} + /// Uncle count is always zero — Signet has no uncle blocks. pub(crate) async fn uncle_count() -> Result { Ok(U64::ZERO) @@ -96,7 +129,7 @@ where Ok(tip + U256::from(base_fee)) }; - await_handler!(@option hctx.spawn_blocking(task)) + await_handler!(@option hctx.spawn(task)) } /// `eth_maxPriorityFeePerGas` — suggests priority fee from recent block tips. @@ -115,7 +148,7 @@ where .map_err(|e| e.to_string()) }; - await_handler!(@option hctx.spawn_blocking(task)) + await_handler!(@option hctx.spawn(task)) } /// `eth_feeHistory` — returns base fee and reward percentile data. @@ -143,11 +176,12 @@ where block_count = block_count.min(max_fee_history); - let mut newest = newest; - if newest.is_pending() { - newest = BlockNumberOrTag::Latest; + let newest = if newest.is_pending() { block_count = block_count.saturating_sub(1); - } + BlockNumberOrTag::Latest + } else { + newest + }; let end_block = ctx.resolve_block_tag(newest); let end_block_plus = end_block + 1; @@ -224,7 +258,7 @@ where }) }; - await_handler!(@option hctx.spawn_blocking(task)) + await_handler!(@option hctx.spawn(task)) } /// Calculate reward percentiles for a single block. @@ -262,7 +296,7 @@ fn calculate_reward_percentiles( for &percentile in percentiles { let threshold = (gas_used as f64 * percentile / 100.0) as u64; - while tx_idx < tx_gas_and_tip.len() { + while tx_idx < tx_gas_and_tip.len() - 1 { cumulative_gas += tx_gas_and_tip[tx_idx].0; if cumulative_gas >= threshold { break; @@ -270,7 +304,7 @@ fn calculate_reward_percentiles( tx_idx += 1; } - result.push(tx_gas_and_tip.get(tx_idx).map(|&(_, tip)| tip).unwrap_or_default()); + result.push(tx_gas_and_tip[tx_idx].1); } result @@ -330,7 +364,7 @@ where })) }; - await_handler!(@option hctx.spawn_blocking(task)) + await_handler!(@option hctx.spawn(task)) } /// `eth_getBlockTransactionCount*` — transaction count in a block. @@ -356,7 +390,7 @@ where .map_err(|e| e.to_string()) }; - await_handler!(@option hctx.spawn_blocking(task)) + await_handler!(@option hctx.spawn(task)) } /// `eth_getBlockReceipts` — all receipts in a block. @@ -389,7 +423,7 @@ where Ok(Some(LazyReceipts { txs, receipts, base_fee })) }; - await_handler!(@option hctx.spawn_blocking(task)) + await_handler!(@option hctx.spawn(task)) } /// `eth_getBlockHeaderByHash` / `eth_getBlockHeaderByNumber`. @@ -454,7 +488,7 @@ where Ok(Some(build_rpc_transaction(&tx, &meta, base_fee))) }; - await_handler!(@option hctx.spawn_blocking(task)) + await_handler!(@option hctx.spawn(task)) } /// `eth_getRawTransactionByHash` — RLP-encoded transaction bytes. @@ -475,7 +509,7 @@ where .map_err(|e| e.to_string()) }; - await_handler!(@option hctx.spawn_blocking(task)) + await_handler!(@option hctx.spawn(task)) } /// `eth_getTransactionByBlock*AndIndex` — transaction by position in block. @@ -511,7 +545,7 @@ where Ok(Some(build_rpc_transaction(&tx, &meta, base_fee))) }; - await_handler!(@option hctx.spawn_blocking(task)) + await_handler!(@option hctx.spawn(task)) } /// `eth_getRawTransactionByBlock*AndIndex` — raw RLP bytes by position. @@ -537,7 +571,7 @@ where .map_err(|e| e.to_string()) }; - await_handler!(@option hctx.spawn_blocking(task)) + await_handler!(@option hctx.spawn(task)) } /// `eth_getTransactionReceipt` — receipt by tx hash. Fetches the receipt, @@ -566,13 +600,13 @@ where ) .map_err(|e| e.to_string())?; - let tx = tx.ok_or("receipt found but transaction missing")?.into_inner(); + let tx = tx.ok_or(EthError::TransactionMissing).map_err(|e| e.to_string())?.into_inner(); let base_fee = header.and_then(|h| h.base_fee_per_gas); Ok(Some(build_receipt(&cr, &tx, base_fee))) }; - await_handler!(@option hctx.spawn_blocking(task)) + await_handler!(@option hctx.spawn(task)) } // --------------------------------------------------------------------------- @@ -977,7 +1011,7 @@ where collect_log_stream(stream).await.map_err(|e| e.to_string()) }; - await_handler!(@option hctx.spawn_blocking(task)) + await_handler!(@option hctx.spawn(task)) } // --------------------------------------------------------------------------- @@ -1087,7 +1121,7 @@ where } }; - await_handler!(@option hctx.spawn_blocking(task)) + await_handler!(@option hctx.spawn(task)) } // --------------------------------------------------------------------------- diff --git a/crates/rpc-storage/src/eth/error.rs b/crates/rpc-storage/src/eth/error.rs index 52b4617..135b6b8 100644 --- a/crates/rpc-storage/src/eth/error.rs +++ b/crates/rpc-storage/src/eth/error.rs @@ -21,6 +21,9 @@ pub enum EthError { /// Block not found. #[error("block not found: {0}")] BlockNotFound(BlockId), + /// Receipt found but the corresponding transaction is missing. + #[error("receipt found but transaction missing")] + TransactionMissing, /// EVM execution error. #[error("evm: {0}")] Evm(String), diff --git a/crates/rpc-storage/src/eth/mod.rs b/crates/rpc-storage/src/eth/mod.rs index 190c0fb..43aa594 100644 --- a/crates/rpc-storage/src/eth/mod.rs +++ b/crates/rpc-storage/src/eth/mod.rs @@ -6,7 +6,7 @@ use endpoints::{ code_at, create_access_list, estimate_gas, fee_history, gas_price, get_filter_changes, get_logs, header_by, max_priority_fee_per_gas, new_block_filter, new_filter, not_supported, raw_transaction_by_block_and_index, raw_transaction_by_hash, send_raw_transaction, storage_at, - subscribe, transaction_by_block_and_index, transaction_by_hash, transaction_receipt, + subscribe, syncing, transaction_by_block_and_index, transaction_by_hash, transaction_receipt, uncle_block, uncle_count, uninstall_filter, unsubscribe, }; @@ -66,7 +66,7 @@ where // Unsupported methods // --- .route("protocolVersion", not_supported) - .route("syncing", not_supported) + .route("syncing", syncing::) .route("gasPrice", gas_price::) .route("maxPriorityFeePerGas", max_priority_fee_per_gas::) .route("feeHistory", fee_history::) diff --git a/crates/rpc-storage/src/interest/buffer.rs b/crates/rpc-storage/src/interest/buffer.rs new file mode 100644 index 0000000..0485714 --- /dev/null +++ b/crates/rpc-storage/src/interest/buffer.rs @@ -0,0 +1,124 @@ +//! Unified event buffer for filters and subscriptions. +//! +//! [`EventBuffer`] is a generic buffer over the block representation. +//! Filters use `EventBuffer` (block hashes), while subscriptions +//! use `EventBuffer
` (full headers). Both variants share a +//! common log-event arm. + +use alloy::{ + primitives::B256, + rpc::types::{Header, Log}, +}; +use serde::Serialize; +use std::collections::VecDeque; + +/// Buffer of chain events, parameterized by the block representation. +/// +/// Filters use `EventBuffer` (block hashes), while subscriptions +/// use `EventBuffer
` (full headers). +#[derive(Debug, Clone, PartialEq, Eq, Serialize)] +#[serde(untagged)] +pub(crate) enum EventBuffer { + /// Log entries. + Log(VecDeque), + /// Block events. + Block(VecDeque), +} + +impl EventBuffer { + /// True if the buffer contains no events. + pub(crate) fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Number of events in the buffer. + pub(crate) fn len(&self) -> usize { + match self { + Self::Log(logs) => logs.len(), + Self::Block(blocks) => blocks.len(), + } + } + + /// Extend this buffer with events from another buffer of the same kind. + /// + /// # Panics + /// + /// Panics if the buffers are of different kinds (log vs. block). + pub(crate) fn extend(&mut self, other: Self) { + match (self, other) { + (Self::Log(a), Self::Log(b)) => a.extend(b), + (Self::Block(a), Self::Block(b)) => a.extend(b), + _ => panic!("attempted to extend with mismatched buffer kinds"), + } + } + + /// Pop the next event from the front of the buffer. + pub(crate) fn pop_front(&mut self) -> Option> { + match self { + Self::Log(logs) => logs.pop_front().map(|l| EventItem::Log(Box::new(l))), + Self::Block(blocks) => blocks.pop_front().map(EventItem::Block), + } + } +} + +/// A single event popped from an [`EventBuffer`]. +#[derive(Debug, Clone, PartialEq, Eq, Serialize)] +#[serde(untagged)] +pub(crate) enum EventItem { + /// A log entry. + Log(Box), + /// A block event. + Block(B), +} + +// --- FilterOutput (EventBuffer) conversions --- + +impl From> for EventBuffer { + fn from(hashes: Vec) -> Self { + Self::Block(hashes.into()) + } +} + +impl From> for EventBuffer { + fn from(logs: Vec) -> Self { + Self::Log(logs.into()) + } +} + +impl FromIterator for EventBuffer { + fn from_iter>(iter: T) -> Self { + Self::Log(iter.into_iter().collect()) + } +} + +impl FromIterator for EventBuffer { + fn from_iter>(iter: T) -> Self { + Self::Block(iter.into_iter().collect()) + } +} + +// --- SubscriptionBuffer (EventBuffer
) conversions --- + +impl From> for EventBuffer
{ + fn from(logs: Vec) -> Self { + Self::Log(logs.into()) + } +} + +impl From> for EventBuffer
{ + fn from(headers: Vec
) -> Self { + Self::Block(headers.into()) + } +} + +impl FromIterator for EventBuffer
{ + fn from_iter>(iter: T) -> Self { + Self::Log(iter.into_iter().collect()) + } +} + +impl FromIterator
for EventBuffer
{ + fn from_iter>(iter: T) -> Self { + Self::Block(iter.into_iter().collect()) + } +} diff --git a/crates/rpc-storage/src/interest/filters.rs b/crates/rpc-storage/src/interest/filters.rs index 6d98fd0..ae09367 100644 --- a/crates/rpc-storage/src/interest/filters.rs +++ b/crates/rpc-storage/src/interest/filters.rs @@ -1,13 +1,12 @@ //! Filter management for `eth_newFilter` / `eth_getFilterChanges`. -use crate::interest::InterestKind; +use crate::interest::{InterestKind, buffer::EventBuffer}; use alloy::{ primitives::{B256, U64}, - rpc::types::{Filter, Log}, + rpc::types::Filter, }; use dashmap::{DashMap, mapref::one::RefMut}; use std::{ - collections::VecDeque, sync::{ Arc, Weak, atomic::{AtomicU64, Ordering}, @@ -18,105 +17,8 @@ use tracing::trace; type FilterId = U64; -/// Either type for filter outputs. -#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize)] -#[serde(untagged)] -#[allow(dead_code)] -pub(crate) enum Either { - /// Log - Log(Log), - /// Block hash - Block(B256), -} - -/// The output of a filter. -/// -/// This will be either a list of logs or a list of block hashes. Pending tx -/// filters are not supported by Signet. For convenience, there is a special -/// variant for empty results. -#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize)] -#[serde(untagged)] -pub(crate) enum FilterOutput { - /// Empty output. Holds a `[(); 0]` to make sure it serializes as an empty - /// array. - Empty([(); 0]), - /// Logs - Log(VecDeque), - /// Block hashes - Block(VecDeque), -} - -#[allow(dead_code)] -impl FilterOutput { - /// Create an empty filter output. - pub(crate) const fn empty() -> Self { - Self::Empty([]) - } - - /// True if this is an empty filter output. - pub(crate) fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// The length of this filter output. - pub(crate) fn len(&self) -> usize { - match self { - Self::Empty(_) => 0, - Self::Log(logs) => logs.len(), - Self::Block(blocks) => blocks.len(), - } - } - - /// Extend this filter output with another. - /// - /// # Panics - /// - /// If the two filter outputs are of different types. - pub(crate) fn extend(&mut self, other: Self) { - match (self, other) { - (Self::Log(logs), Self::Log(other_logs)) => logs.extend(other_logs), - (Self::Block(blocks), Self::Block(other_blocks)) => blocks.extend(other_blocks), - (_, Self::Empty(_)) => (), - (this @ Self::Empty(_), other) => *this = other, - _ => panic!("attempted to mix log and block outputs"), - } - } - - /// Pop a value from the front of the filter output. - pub(crate) fn pop_front(&mut self) -> Option { - match self { - Self::Log(logs) => logs.pop_front().map(Either::Log), - Self::Block(blocks) => blocks.pop_front().map(Either::Block), - Self::Empty(_) => None, - } - } -} - -impl From> for FilterOutput { - fn from(block_hashes: Vec) -> Self { - Self::Block(block_hashes.into()) - } -} - -impl From> for FilterOutput { - fn from(logs: Vec) -> Self { - Self::Log(logs.into()) - } -} - -impl FromIterator for FilterOutput { - fn from_iter>(iter: T) -> Self { - let inner: VecDeque<_> = iter.into_iter().collect(); - if inner.is_empty() { Self::empty() } else { Self::Log(inner) } - } -} - -impl FromIterator for FilterOutput { - fn from_iter>(iter: T) -> Self { - let inner: VecDeque<_> = iter.into_iter().collect(); - if inner.is_empty() { Self::empty() } else { Self::Block(inner) } - } -} +/// Output of a polled filter: log entries or block hashes. +pub(crate) type FilterOutput = EventBuffer; /// An active filter. /// @@ -142,12 +44,6 @@ impl core::fmt::Display for ActiveFilter { } impl ActiveFilter { - /// True if this is a log filter. - #[allow(dead_code)] - pub(crate) const fn is_filter(&self) -> bool { - self.kind.is_filter() - } - /// True if this is a block filter. pub(crate) const fn is_block(&self) -> bool { self.kind.is_block() @@ -313,7 +209,7 @@ impl FilterCleanTask { // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: -//. +// // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // diff --git a/crates/rpc-storage/src/interest/kind.rs b/crates/rpc-storage/src/interest/kind.rs index ff2392e..ed4f49c 100644 --- a/crates/rpc-storage/src/interest/kind.rs +++ b/crates/rpc-storage/src/interest/kind.rs @@ -16,12 +16,6 @@ pub(crate) enum InterestKind { } impl InterestKind { - /// True if this is a log filter. - #[allow(dead_code)] - pub(crate) const fn is_filter(&self) -> bool { - matches!(self, Self::Log(_)) - } - /// True if this is a block filter. pub(crate) const fn is_block(&self) -> bool { matches!(self, Self::Block) diff --git a/crates/rpc-storage/src/interest/mod.rs b/crates/rpc-storage/src/interest/mod.rs index 9193af8..644d64b 100644 --- a/crates/rpc-storage/src/interest/mod.rs +++ b/crates/rpc-storage/src/interest/mod.rs @@ -35,6 +35,7 @@ //! [`DashMap`]: dashmap::DashMap //! [`DashMap::retain`]: dashmap::DashMap::retain +mod buffer; mod filters; pub(crate) use filters::{FilterManager, FilterOutput}; mod kind; diff --git a/crates/rpc-storage/src/interest/subs.rs b/crates/rpc-storage/src/interest/subs.rs index bf42797..3df1a00 100644 --- a/crates/rpc-storage/src/interest/subs.rs +++ b/crates/rpc-storage/src/interest/subs.rs @@ -1,12 +1,13 @@ //! Subscription management for `eth_subscribe` / `eth_unsubscribe`. -use crate::interest::{InterestKind, NewBlockNotification}; +use crate::interest::{ + InterestKind, NewBlockNotification, + buffer::{EventBuffer, EventItem}, +}; use ajj::HandlerCtx; -use alloy::{primitives::U64, rpc::types::Log}; +use alloy::primitives::U64; use dashmap::DashMap; use std::{ - cmp::min, - collections::VecDeque, future::pending, sync::{ Arc, Weak, @@ -18,15 +19,8 @@ use tokio::sync::broadcast::{self, error::RecvError}; use tokio_util::sync::{CancellationToken, WaitForCancellationFutureOwned}; use tracing::{Instrument, debug, debug_span, enabled, trace}; -/// Either type for subscription outputs. -#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize)] -#[serde(untagged)] -pub(crate) enum Either { - /// A log entry. - Log(Box), - /// A block header. - Block(Box), -} +/// Buffer for subscription outputs: log entries or block headers. +pub(crate) type SubscriptionBuffer = EventBuffer; /// JSON-RPC subscription notification envelope. #[derive(serde::Serialize)] @@ -39,79 +33,10 @@ struct SubscriptionNotification<'a> { /// Params field of a subscription notification. #[derive(serde::Serialize)] struct SubscriptionParams<'a> { - result: &'a Either, + result: &'a EventItem, subscription: U64, } -/// Buffer for subscription outputs. -#[derive(Debug, Clone, PartialEq, Eq)] -pub(crate) enum SubscriptionBuffer { - /// Log buffer. - Log(VecDeque), - /// Block header buffer. - Block(VecDeque), -} - -impl SubscriptionBuffer { - /// True if the buffer is empty. - pub(crate) fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Get the number of items in the buffer. - pub(crate) fn len(&self) -> usize { - match self { - Self::Log(buf) => buf.len(), - Self::Block(buf) => buf.len(), - } - } - - /// Extend this buffer with another buffer. - /// - /// # Panics - /// - /// Panics if the buffers are of different types. - pub(crate) fn extend(&mut self, other: Self) { - match (self, other) { - (Self::Log(buf), Self::Log(other)) => buf.extend(other), - (Self::Block(buf), Self::Block(other)) => buf.extend(other), - _ => panic!("mismatched buffer types"), - } - } - - /// Pop the front of the buffer. - pub(crate) fn pop_front(&mut self) -> Option { - match self { - Self::Log(buf) => buf.pop_front().map(|log| Either::Log(Box::new(log))), - Self::Block(buf) => buf.pop_front().map(|header| Either::Block(Box::new(header))), - } - } -} - -impl From> for SubscriptionBuffer { - fn from(logs: Vec) -> Self { - Self::Log(logs.into()) - } -} - -impl FromIterator for SubscriptionBuffer { - fn from_iter>(iter: T) -> Self { - Self::Log(iter.into_iter().collect()) - } -} - -impl From> for SubscriptionBuffer { - fn from(headers: Vec) -> Self { - Self::Block(headers.into()) - } -} - -impl FromIterator for SubscriptionBuffer { - fn from_iter>(iter: T) -> Self { - Self::Block(iter.into_iter().collect()) - } -} - /// Tracks ongoing subscription tasks. /// /// Performs the following functions: @@ -193,6 +118,7 @@ impl SubscriptionManagerInner { let id = self.next_id(); let token = CancellationToken::new(); + self.tasks.insert(id, token.clone()); let task = SubscriptionTask { id, filter, @@ -240,7 +166,7 @@ impl SubscriptionTask { let permit_fut = async { if !notif_buffer.is_empty() { ajj_ctx - .permit_many(min(ajj_ctx.notification_capacity() / 2, notif_buffer.len())) + .permit_many((ajj_ctx.notification_capacity() / 2).min(notif_buffer.len())) .await } else { pending().await @@ -330,8 +256,9 @@ impl SubCleanerTask { std::thread::spawn(move || { loop { std::thread::sleep(self.interval); - if let Some(inner) = self.inner.upgrade() { - inner.tasks.retain(|_, task| !task.is_cancelled()); + match self.inner.upgrade() { + Some(inner) => inner.tasks.retain(|_, task| !task.is_cancelled()), + None => break, } } }); diff --git a/crates/rpc-storage/src/signet/endpoints.rs b/crates/rpc-storage/src/signet/endpoints.rs index 5776522..7294073 100644 --- a/crates/rpc-storage/src/signet/endpoints.rs +++ b/crates/rpc-storage/src/signet/endpoints.rs @@ -91,5 +91,5 @@ where } }; - await_handler!(@response_option hctx.spawn_blocking(task)) + await_handler!(@response_option hctx.spawn(task)) } diff --git a/crates/rpc-storage/tests/eth_rpc.rs b/crates/rpc-storage/tests/eth_rpc.rs index e9557cf..24d7d8b 100644 --- a/crates/rpc-storage/tests/eth_rpc.rs +++ b/crates/rpc-storage/tests/eth_rpc.rs @@ -605,12 +605,33 @@ async fn test_get_logs_empty() { #[tokio::test] async fn test_not_supported() { let h = TestHarness::new(0).await; - let resp = rpc_call_raw(&h.app, "eth_syncing", json!([])).await; + let resp = rpc_call_raw(&h.app, "eth_protocolVersion", json!([])).await; assert!(resp.get("error").is_some()); let msg = resp["error"]["message"].as_str().unwrap(); assert!(msg.contains("not found"), "unexpected error: {msg}"); } +#[tokio::test] +async fn test_syncing_not_syncing() { + let h = TestHarness::new(0).await; + let result = rpc_call(&h.app, "eth_syncing", json!([])).await; + assert_eq!(result, json!(false)); +} + +#[tokio::test] +async fn test_syncing_in_progress() { + let h = TestHarness::new(0).await; + h.tags.set_sync_status(signet_rpc_storage::SyncStatus { + starting_block: 0, + current_block: 50, + highest_block: 100, + }); + let result = rpc_call(&h.app, "eth_syncing", json!([])).await; + assert_eq!(result["starting_block"], json!("0x0")); + assert_eq!(result["current_block"], json!("0x32")); + assert_eq!(result["highest_block"], json!("0x64")); +} + #[tokio::test] async fn test_send_raw_tx_no_cache() { let h = TestHarness::new(0).await; From 0acb3824123f896a0d3f0242de8a2096b46f6dfe Mon Sep 17 00:00:00 2001 From: James Prestwich Date: Mon, 16 Feb 2026 07:32:37 -0500 Subject: [PATCH 30/31] refactor: rewrite block processor and node to use signet-storage (#79) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * refactor: rewrite block processor to use signet-hot storage Replace the reth ProviderFactory-based storage with HotKv for rollup state reads. The processor becomes a stateless executor that reads from hot storage, runs the EVM, and returns ExecutedBlock. Extraction is moved to the node (PR3) to avoid lifetime issues with borrowed Extracts. - Replace Db: NodeTypesDbTrait generic with H: HotKv - Replace state_provider_database() with revm_state() using RevmRead - Remove on_host_commit() and commit_evm_results() - Add process_block() returning ExecutedBlock - Add build_executed_block() for type conversion - Remove signet-db, signet-node-types, reth-exex, reth-node-api deps - Add signet-hot, signet-storage-types deps - Remove Chain/PrimitivesOf/ExExNotification type aliases from lib.rs Co-Authored-By: Claude Opus 4.6 * refactor: rewrite signet-node to use unified storage and rpc-storage Replace reth's ProviderFactory/BlockchainProvider with UnifiedStorage and swap signet-rpc for signet-rpc-storage. The node now holds Arc> and shares state with the RPC context through BlockTags (atomic block tag tracking) and broadcast::Sender (new block notifications). Key changes: - StorageRpcCtx accepts Arc> for shared ownership - Node struct uses HotKv generic instead of NodeTypesDbTrait - Block processing uses signet-extract's Extractor + ExtractableChainShim - Genesis loading via HistoryWrite::load_genesis + UnsafeDbWrite::commit - Fix metrics bug: record_notification_received now correctly increments the received counter instead of the processed counter Co-Authored-By: Claude Opus 4.6 * refactor: rewrite node-tests to use unified storage and align gas oracle Replace TmpDb/ProviderFactory with MemKv/UnifiedStorage in all node test infrastructure. Update signet-storage crates to v0.6.2 to fix MemKv intra-transaction read visibility. Align the cold-storage gas oracle with reth's GasPriceOracle by adding default_gas_price (1 Gwei), ignore_price (2 wei), and max_price (500 Gwei) to StorageRpcConfig. Co-Authored-By: Claude Opus 4.6 * refactor: delete signet-db, signet-node-types, and signet-rpc These crates are fully replaced by signet-storage, signet-hot, and signet-rpc-storage respectively. Remove 6,700+ lines of dead code and clean up 22 unused workspace dependencies. Replace reth-db tempdir_path with tempfile in signet-node-config. Co-Authored-By: Claude Opus 4.6 * refactor: rename signet-rpc-storage to signet-rpc Reclaim the signet-rpc name now that the old reth-backed crate is deleted. Rename directory, package, and all import paths. Co-Authored-By: Claude Opus 4.6 * refactor: restore RUN EVM ascii art banner Co-Authored-By: Claude Opus 4.6 * fix: resolve clippy empty_line_after_doc_comments warning in ascii banner Co-Authored-By: Claude Opus 4.6 * fix: address PR review — bugs, style, and test infra - Fix log_index per-receipt → per-block (kind.rs) - Fix hash/height mismatch in update_highest_processed_height (node.rs) - Fix serve_ipc panic: expect → ? (serve.rs) - Use seal_unchecked to avoid redundant header re-hashing (processor.rs) - Refactor imperative event loop → functional chain (processor.rs) - Add #[instrument(skip_all)] to run_evm and build_executed_block - Add TODO for two-reader consistency risk (processor.rs) - Group signet_hot imports across 6 RPC files - Move supported methods above "Unsupported" comment (eth/mod.rs) - Hoist function-scoped imports to module level (eth_rpc.rs) - Fix stale signet_rpc_storage reference (eth_rpc.rs) - Differentiate HTTP/WS doc comments, fix Result type (serve.rs) - Wrap cold storage polling loop in 30s timeout (context.rs) - Remove dead signet_events_in_range method (context.rs) - Update README to reflect new API types Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- Cargo.toml | 36 +- crates/block-processor/Cargo.toml | 6 +- crates/block-processor/README.md | 14 +- crates/block-processor/src/lib.rs | 10 - crates/block-processor/src/metrics.rs | 13 - crates/block-processor/src/v1/processor.rs | 307 ++-- crates/db/Cargo.toml | 36 - crates/db/README.md | 19 - crates/db/src/aliases.rs | 15 - crates/db/src/chain.rs | 46 - crates/db/src/consistency.rs | 303 ---- crates/db/src/convert.rs | 109 -- crates/db/src/lib.rs | 34 - crates/db/src/provider.rs | 638 --------- crates/db/src/tables.rs | 408 ------ crates/db/src/traits.rs | 271 ---- crates/db/tests/common/mod.rs | 39 - crates/db/tests/db.rs | 167 --- crates/node-config/Cargo.toml | 6 +- crates/node-config/src/test_utils.rs | 4 +- crates/node-tests/Cargo.toml | 9 +- crates/node-tests/src/context.rs | 209 ++- crates/node-tests/src/lib.rs | 2 +- crates/node-tests/src/utils.rs | 29 +- crates/node-tests/tests/db.rs | 31 +- crates/node-tests/tests/host_events.rs | 94 +- crates/node-tests/tests/multiple-blocks.rs | 728 ++++------ crates/node-tests/tests/rpc.rs | 55 +- crates/node-tests/tests/rpc_debug.rs | 10 +- crates/node-types/Cargo.toml | 21 - crates/node-types/README.md | 10 - crates/node-types/src/block.rs | 26 - crates/node-types/src/lib.rs | 174 --- crates/node-types/src/utils.rs | 24 - crates/node/Cargo.toml | 11 +- crates/node/src/builder.rs | 220 +-- crates/node/src/metrics.rs | 35 +- crates/node/src/node.rs | 541 +++---- crates/node/src/rpc.rs | 30 +- crates/node/src/serve.rs | 10 +- crates/rpc-storage/Cargo.toml | 47 - crates/rpc-storage/README.md | 16 - crates/rpc-storage/src/debug/endpoints.rs | 206 --- crates/rpc-storage/src/debug/error.rs | 50 - crates/rpc-storage/src/debug/mod.rs | 26 - crates/rpc-storage/src/debug/tracer.rs | 224 --- crates/rpc-storage/src/eth/endpoints.rs | 1160 --------------- crates/rpc-storage/src/eth/error.rs | 61 - crates/rpc-storage/src/eth/helpers.rs | 239 ---- crates/rpc-storage/src/eth/mod.rs | 99 -- crates/rpc-storage/src/interest/filters.rs | 222 --- crates/rpc-storage/src/interest/kind.rs | 100 -- crates/rpc-storage/src/interest/mod.rs | 58 - crates/rpc-storage/src/interest/subs.rs | 266 ---- crates/rpc-storage/src/lib.rs | 40 - crates/rpc-storage/src/signet/endpoints.rs | 95 -- crates/rpc-storage/src/signet/error.rs | 27 - crates/rpc-storage/src/signet/mod.rs | 19 - crates/rpc/Cargo.toml | 57 +- crates/rpc/README.md | 58 +- crates/{rpc-storage => rpc}/src/config/ctx.rs | 15 +- .../src/config/gas_oracle.rs | 25 +- crates/{rpc-storage => rpc}/src/config/mod.rs | 0 .../src/config/resolve.rs | 2 +- .../src/config/rpc_config.rs | 25 +- crates/rpc/src/ctx/fee_hist.rs | 150 -- crates/rpc/src/ctx/full.rs | 282 ---- crates/rpc/src/ctx/mod.rs | 8 - crates/rpc/src/ctx/signet.rs | 928 ------------ crates/rpc/src/debug/endpoints.rs | 208 +-- crates/rpc/src/debug/error.rs | 76 +- crates/rpc/src/debug/mod.rs | 29 +- crates/rpc/src/debug/tracer.rs | 130 +- .../{rpc-storage => rpc}/src/debug/types.rs | 0 crates/rpc/src/eth/endpoints.rs | 1261 +++++++++++------ crates/rpc/src/eth/error.rs | 69 +- crates/rpc/src/eth/helpers.rs | 254 +++- crates/rpc/src/eth/mod.rs | 110 +- crates/{rpc-storage => rpc}/src/eth/types.rs | 0 crates/rpc/src/inspect/db.rs | 164 --- crates/rpc/src/inspect/endpoints.rs | 37 - crates/rpc/src/inspect/mod.rs | 16 - .../src/interest/buffer.rs | 0 crates/rpc/src/interest/filters.rs | 134 +- crates/rpc/src/interest/kind.rs | 118 +- crates/rpc/src/interest/mod.rs | 56 +- crates/rpc/src/interest/subs.rs | 173 +-- crates/rpc/src/lib.rs | 64 +- crates/rpc/src/receipts.rs | 134 -- crates/rpc/src/signet/endpoints.rs | 78 +- crates/rpc/src/signet/error.rs | 38 +- crates/rpc/src/signet/mod.rs | 19 +- crates/rpc/src/utils.rs | 105 -- crates/{rpc-storage => rpc}/tests/eth_rpc.rs | 26 +- 94 files changed, 2701 insertions(+), 9853 deletions(-) delete mode 100644 crates/db/Cargo.toml delete mode 100644 crates/db/README.md delete mode 100644 crates/db/src/aliases.rs delete mode 100644 crates/db/src/chain.rs delete mode 100644 crates/db/src/consistency.rs delete mode 100644 crates/db/src/convert.rs delete mode 100644 crates/db/src/lib.rs delete mode 100644 crates/db/src/provider.rs delete mode 100644 crates/db/src/tables.rs delete mode 100644 crates/db/src/traits.rs delete mode 100644 crates/db/tests/common/mod.rs delete mode 100644 crates/db/tests/db.rs delete mode 100644 crates/node-types/Cargo.toml delete mode 100644 crates/node-types/README.md delete mode 100644 crates/node-types/src/block.rs delete mode 100644 crates/node-types/src/lib.rs delete mode 100644 crates/node-types/src/utils.rs delete mode 100644 crates/rpc-storage/Cargo.toml delete mode 100644 crates/rpc-storage/README.md delete mode 100644 crates/rpc-storage/src/debug/endpoints.rs delete mode 100644 crates/rpc-storage/src/debug/error.rs delete mode 100644 crates/rpc-storage/src/debug/mod.rs delete mode 100644 crates/rpc-storage/src/debug/tracer.rs delete mode 100644 crates/rpc-storage/src/eth/endpoints.rs delete mode 100644 crates/rpc-storage/src/eth/error.rs delete mode 100644 crates/rpc-storage/src/eth/helpers.rs delete mode 100644 crates/rpc-storage/src/eth/mod.rs delete mode 100644 crates/rpc-storage/src/interest/filters.rs delete mode 100644 crates/rpc-storage/src/interest/kind.rs delete mode 100644 crates/rpc-storage/src/interest/mod.rs delete mode 100644 crates/rpc-storage/src/interest/subs.rs delete mode 100644 crates/rpc-storage/src/lib.rs delete mode 100644 crates/rpc-storage/src/signet/endpoints.rs delete mode 100644 crates/rpc-storage/src/signet/error.rs delete mode 100644 crates/rpc-storage/src/signet/mod.rs rename crates/{rpc-storage => rpc}/src/config/ctx.rs (97%) rename crates/{rpc-storage => rpc}/src/config/gas_oracle.rs (68%) rename crates/{rpc-storage => rpc}/src/config/mod.rs (100%) rename crates/{rpc-storage => rpc}/src/config/resolve.rs (99%) rename crates/{rpc-storage => rpc}/src/config/rpc_config.rs (88%) delete mode 100644 crates/rpc/src/ctx/fee_hist.rs delete mode 100644 crates/rpc/src/ctx/full.rs delete mode 100644 crates/rpc/src/ctx/mod.rs delete mode 100644 crates/rpc/src/ctx/signet.rs rename crates/{rpc-storage => rpc}/src/debug/types.rs (100%) rename crates/{rpc-storage => rpc}/src/eth/types.rs (100%) delete mode 100644 crates/rpc/src/inspect/db.rs delete mode 100644 crates/rpc/src/inspect/endpoints.rs delete mode 100644 crates/rpc/src/inspect/mod.rs rename crates/{rpc-storage => rpc}/src/interest/buffer.rs (100%) delete mode 100644 crates/rpc/src/receipts.rs delete mode 100644 crates/rpc/src/utils.rs rename crates/{rpc-storage => rpc}/tests/eth_rpc.rs (97%) diff --git a/Cargo.toml b/Cargo.toml index 87a8610..af7af2e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,14 +36,11 @@ incremental = false [workspace.dependencies] signet-blobber = { version = "0.16.0-rc.7", path = "crates/blobber" } signet-block-processor = { version = "0.16.0-rc.7", path = "crates/block-processor" } -signet-db = { version = "0.16.0-rc.7", path = "crates/db" } signet-genesis = { version = "0.16.0-rc.7", path = "crates/genesis" } signet-node = { version = "0.16.0-rc.7", path = "crates/node" } signet-node-config = { version = "0.16.0-rc.7", path = "crates/node-config" } signet-node-tests = { version = "0.16.0-rc.7", path = "crates/node-tests" } -signet-node-types = { version = "0.16.0-rc.7", path = "crates/node-types" } signet-rpc = { version = "0.16.0-rc.7", path = "crates/rpc" } -signet-rpc-storage = { version = "0.16.0-rc.7", path = "crates/rpc-storage" } init4-bin-base = { version = "0.18.0-rc.8", features = ["alloy"] } @@ -56,10 +53,10 @@ signet-tx-cache = "0.16.0-rc.8" signet-types = "0.16.0-rc.8" signet-zenith = "0.16.0-rc.8" signet-journal = "0.16.0-rc.8" -signet-storage = "0.6.0" -signet-cold = "0.6.0" -signet-hot = "0.6.0" -signet-storage-types = "0.6.0" +signet-storage = "0.6.2" +signet-cold = "0.6.2" +signet-hot = "0.6.2" +signet-storage-types = "0.6.2" # ajj ajj = { version = "0.6.0" } @@ -76,28 +73,13 @@ alloy = { version = "1.4.0", features = [ "genesis", "arbitrary", ] } -alloy-contract = { version = "1.4.0", features = ["pubsub"] } # Reth reth = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } reth-chainspec = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-codecs = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-db-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-db-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-eth-wire-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-evm-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } reth-exex = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } reth-exex-test-utils = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-libmdbx = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-network-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-network-peers = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } reth-node-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-prune-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-rpc-eth-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-stages-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } reth-transaction-pool = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } # Foundry periphery @@ -105,14 +87,8 @@ foundry-blob-explorers = "0.17" # Async tokio = { version = "1.43.0", features = ["macros"] } -async-trait = "0.1.87" - -# Pinned for compatibility with reth -parking_lot = "0.12" # Misc -chrono = "0.4.38" -clap = "4" eyre = "0.6.12" futures-util = "0.3.31" hex = { package = "const-hex", version = "1.10", default-features = false, features = [ @@ -121,19 +97,15 @@ hex = { package = "const-hex", version = "1.10", default-features = false, featu itertools = "0.14.0" metrics = "0.24.2" openssl = { version = "0.10", features = ["vendored"] } -proptest = "1.6.0" reqwest = "0.12.9" serde = { version = "1.0.217", features = ["derive"] } serde_json = "1.0.137" -smallvec = "1.15.1" tracing = "0.1.41" tracing-subscriber = "0.3.19" thiserror = "2.0.12" url = "2.5.4" -uuid = "1.16.0" # Test Utils -alloy-rlp = "0.3.11" tempfile = "3.17.0" # [patch.crates-io] diff --git a/crates/block-processor/Cargo.toml b/crates/block-processor/Cargo.toml index b7785c8..ce5eae9 100644 --- a/crates/block-processor/Cargo.toml +++ b/crates/block-processor/Cargo.toml @@ -20,14 +20,12 @@ init4-bin-base.workspace = true trevm.workspace = true signet-blobber.workspace = true -signet-db.workspace = true -signet-node-types.workspace = true +signet-hot.workspace = true +signet-storage-types.workspace = true alloy.workspace = true reth.workspace = true -reth-exex.workspace = true -reth-node-api.workspace = true reth-chainspec.workspace = true eyre.workspace = true diff --git a/crates/block-processor/README.md b/crates/block-processor/README.md index c239a6d..6a49851 100644 --- a/crates/block-processor/README.md +++ b/crates/block-processor/README.md @@ -1,13 +1,11 @@ # Signet Block Processor -Block processing logic for the Signet Node. This crate takes a reth `Chain`, -runs the Signet EVM, and commits the results to a database. +Block processing logic for the Signet Node. This crate extracts and processes +Signet blocks from host chain commits using the EVM, reading rollup state from +hot storage. # Significant Types -- A few convenience type aliases: - - `PrimitivesOf` - The primitives type used by the host. - - `Chain` - A reth `Chain` using the host's primitives. - - `ExExNotification` - A reth `ExExNotification` using the host's - primitives. -- `SignetBlockProcessorV1` - The first version of the block processor. +- `SignetBlockProcessor` — The block processor. Reads state from + `HotKv` storage, runs the EVM via `signet_evm`, and returns an + `ExecutedBlock`. diff --git a/crates/block-processor/src/lib.rs b/crates/block-processor/src/lib.rs index d836442..a2c957b 100644 --- a/crates/block-processor/src/lib.rs +++ b/crates/block-processor/src/lib.rs @@ -21,13 +21,3 @@ pub use utils::revm_spec; mod v1; pub use v1::SignetBlockProcessor as SignetBlockProcessorV1; - -/// Primitives used by the host. -pub type PrimitivesOf = - <::Types as reth_node_api::NodeTypes>::Primitives; - -/// A [`reth::providers::Chain`] using the host primitives. -pub type Chain = reth::providers::Chain>; - -/// A [`reth_exex::ExExNotification`] using the host primitives. -pub type ExExNotification = reth_exex::ExExNotification>; diff --git a/crates/block-processor/src/metrics.rs b/crates/block-processor/src/metrics.rs index 5288d11..67fe4a7 100644 --- a/crates/block-processor/src/metrics.rs +++ b/crates/block-processor/src/metrics.rs @@ -57,9 +57,6 @@ const ENTER_TOKEN_PROCESSED_HELP: &str = const TRANSACT_PROCESSED: &str = "signet.block_processor.transact_events.processed"; const TRANSACT_PROCESSED_HELP: &str = "Histogram of number of transact events processed per block"; -const EXTRACTION_TIME: &str = "signet.block_processor.extraction.time"; -const EXTRACTION_TIME_HELP: &str = "Time taken to extract signet outputs from a host notification. Note: sometimes the extraction includes multiple blocks."; - const PROCESSING_TIME: &str = "signet.block_processor.processing.time"; const PROCESSING_TIME_HELP: &str = "Time taken to process a single signet block from extracts, in milliseconds."; @@ -80,7 +77,6 @@ static DESCRIBE: LazyLock<()> = LazyLock::new(|| { describe_histogram!(ENTER_PROCESSED, ENTER_PROCESSED_HELP); describe_histogram!(ENTER_TOKEN_PROCESSED, ENTER_TOKEN_PROCESSED_HELP); describe_histogram!(TRANSACT_PROCESSED, TRANSACT_PROCESSED_HELP); - describe_histogram!(EXTRACTION_TIME, EXTRACTION_TIME_HELP); describe_histogram!(PROCESSING_TIME, PROCESSING_TIME_HELP); describe_histogram!(BLOCK_GAS_USED, BLOCK_GAS_USED_HELP); }); @@ -184,15 +180,6 @@ fn record_transacts_processed(value: u64) { transacts_processed().record(value as f64); } -fn extraction_time() -> Histogram { - LazyLock::force(&DESCRIBE); - histogram!(EXTRACTION_TIME) -} - -pub(crate) fn record_extraction_time(started_at: &std::time::Instant) { - extraction_time().record(started_at.elapsed().as_millis() as f64); -} - fn processing_time() -> Histogram { LazyLock::force(&DESCRIBE); histogram!(PROCESSING_TIME) diff --git a/crates/block-processor/src/v1/processor.rs b/crates/block-processor/src/v1/processor.rs index cf76ca5..93b580c 100644 --- a/crates/block-processor/src/v1/processor.rs +++ b/crates/block-processor/src/v1/processor.rs @@ -1,47 +1,53 @@ -use crate::{AliasOracle, AliasOracleFactory, Chain, metrics}; +use crate::{AliasOracle, AliasOracleFactory, metrics}; use alloy::{ consensus::BlockHeader, - primitives::{Address, map::HashSet}, + primitives::{Address, Sealable, map::HashSet}, }; use core::fmt; use eyre::ContextCompat; use init4_bin_base::utils::calc::SlotCalculator; -use reth::{ - primitives::EthPrimitives, - providers::{ - BlockNumReader, BlockReader, ExecutionOutcome, HeaderProvider, ProviderFactory, - StateProviderFactory, - }, - revm::{database::StateProviderDatabase, db::StateBuilder}, -}; +use reth::{providers::StateProviderFactory, revm::db::StateBuilder}; use reth_chainspec::ChainSpec; -use reth_node_api::{FullNodeComponents, NodeTypes}; use signet_blobber::{CacheHandle, ExtractableChainShim}; use signet_constants::SignetSystemConstants; -use signet_db::{DataCompat, DbProviderExt, RuChain, RuRevmState, RuWriter}; use signet_evm::{BlockResult, EvmNeedsCfg, SignetDriver}; -use signet_extract::{Extractor, Extracts}; -use signet_node_types::{NodeTypesDbTrait, SignetNodeTypes}; +use signet_extract::Extracts; +use signet_hot::{ + db::HotDbRead, + model::{HotKv, HotKvRead, RevmRead}, +}; +use signet_storage_types::{DbSignetEvent, DbZenithHeader, ExecutedBlock, ExecutedBlockBuilder}; use std::{collections::VecDeque, sync::Arc}; -use tracing::{Instrument, error, info, info_span, instrument}; -use trevm::revm::primitives::hardfork::SpecId; +use tracing::{error, instrument}; +use trevm::revm::{ + database::{DBErrorMarker, State}, + primitives::hardfork::SpecId, +}; -/// A block processor that listens to host chain commits and processes -/// Signet blocks accordingly. -pub struct SignetBlockProcessor> +/// The revm state type backed by hot storage. +type HotRevmState = State::RoTx>>; + +/// A block processor that extracts and processes Signet blocks from host +/// chain commits. +/// +/// The processor is a stateless executor: it reads state from hot storage, +/// runs the EVM, and returns an [`ExecutedBlock`]. The caller (node) handles +/// extraction, persistence, and orchestrates the per-block loop. +pub struct SignetBlockProcessor> where - Db: NodeTypesDbTrait, + H: HotKv, { - /// Signet System Constants + /// Signet System Constants. constants: SignetSystemConstants, /// The chain specification, used to determine active hardforks. chain_spec: Arc, - /// A [`ProviderFactory`] instance to allow RU database access. - ru_provider: ProviderFactory>, + /// Hot storage handle for rollup state reads. + hot: H, - /// A [`ProviderFactory`] instance to allow Host database access. + /// An oracle for determining whether addresses should be aliased. + /// Reads HOST (L1) state, not rollup state. alias_oracle: Alias, /// The slot calculator. @@ -51,30 +57,32 @@ where blob_cacher: CacheHandle, } -impl fmt::Debug for SignetBlockProcessor +impl fmt::Debug for SignetBlockProcessor where - Db: NodeTypesDbTrait, + H: HotKv, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("SignetBlockProcessor").finish() } } -impl SignetBlockProcessor +impl SignetBlockProcessor where - Db: NodeTypesDbTrait, + H: HotKv, + H::RoTx: 'static, + ::Error: DBErrorMarker, Alias: AliasOracleFactory, { /// Create a new [`SignetBlockProcessor`]. pub const fn new( constants: SignetSystemConstants, chain_spec: Arc, - ru_provider: ProviderFactory>, + hot: H, alias_oracle: Alias, slot_calculator: SlotCalculator, blob_cacher: CacheHandle, ) -> Self { - Self { constants, chain_spec, ru_provider, alias_oracle, slot_calculator, blob_cacher } + Self { constants, chain_spec, hot, alias_oracle, slot_calculator, blob_cacher } } /// Get the active spec id at the given timestamp. @@ -82,27 +90,23 @@ where crate::revm_spec(&self.chain_spec, timestamp) } - /// Make a [`StateProviderDatabase`] from the read-write provider, suitable - /// for use with Trevm. - fn state_provider_database(&self, height: u64) -> eyre::Result { - // Get the state provider for the block number - let sp = self.ru_provider.history_by_block_number(height)?; - - // Wrap in Revm comatibility layer - let spd = StateProviderDatabase::new(sp); - let builder = StateBuilder::new_with_database(spd); - - Ok(builder.with_bundle_update().build()) + /// Build a revm [`State`] backed by hot storage at the given parent + /// height. + fn revm_state(&self, parent_height: u64) -> eyre::Result> { + let reader = self.hot.reader()?; + let db = RevmRead::at_height(reader, parent_height); + Ok(StateBuilder::new_with_database(db).with_bundle_update().build()) } /// Make a new Trevm instance, building on the given height. - fn trevm(&self, parent_height: u64, spec_id: SpecId) -> eyre::Result> { - let db = self.state_provider_database(parent_height)?; - + fn trevm( + &self, + parent_height: u64, + spec_id: SpecId, + ) -> eyre::Result>> { + let db = self.revm_state(parent_height)?; let mut trevm = signet_evm::signet_evm(db, self.constants.clone()); - trevm.set_spec_id(spec_id); - Ok(trevm) } @@ -111,110 +115,30 @@ where self.alias_oracle.create()?.should_alias(address) } - /// Called when the host chain has committed a block or set of blocks. - #[instrument(skip_all, fields(count = chain.len(), first = chain.first().number(), tip = chain.tip().number()))] - pub async fn on_host_commit(&self, chain: &Chain) -> eyre::Result> - where - Host: FullNodeComponents, - Host::Types: NodeTypes, - { - let highest = chain.tip().number(); - if highest < self.constants.host_deploy_height() { - return Ok(None); - } - - // this should never happen but we want to handle it anyway - if chain.is_empty() { - return Ok(None); - } - + /// Process a single extracted block, returning an [`ExecutedBlock`]. + /// + /// The caller is responsible for driving extraction (via [`Extractor`]) + /// and persisting the result to storage between calls. + /// + /// [`Extractor`]: signet_extract::Extractor + #[instrument(skip_all, fields( + ru_height = block_extracts.ru_height, + host_height = block_extracts.host_block.number(), + has_ru_block = block_extracts.submitted.is_some(), + ))] + pub async fn process_block( + &self, + block_extracts: &Extracts<'_, ExtractableChainShim<'_>>, + ) -> eyre::Result { let start_time = std::time::Instant::now(); + let spec_id = self.spec_id(block_extracts.host_block.timestamp()); - let extractor = Extractor::new(self.constants.clone()); - let shim = ExtractableChainShim::new(chain); - let outputs = extractor.extract_signet(&shim); - - metrics::record_extraction_time(&start_time); - - // TODO: ENG-481 Inherit prune modes from Reth configuration. - // https://linear.app/initiates/issue/ENG-481/inherit-prune-modes-from-reth-node - - // The extractor will filter out blocks at or before the deployment - // height, so we don't need compute the start from the notification. - let mut start = None; - let mut current = 0; - let last_ru_height = self.ru_provider.last_block_number()?; - - let mut net_outcome = ExecutionOutcome::default(); - - // There might be a case where we can get a notification that starts - // "lower" than our last processed block, - // but contains new information beyond one point. In this case, we - // should simply skip the block. - for block_extracts in outputs.skip_while(|extract| extract.ru_height <= last_ru_height) { - // If we haven't set the start yet, set it to the first block. - if start.is_none() { - let new_ru_height = block_extracts.ru_height; - - // If the above condition passes, we should always be - // committing without skipping a range of blocks. - if new_ru_height != last_ru_height + 1 { - error!( - %new_ru_height, - %last_ru_height, - "missing range of DB blocks" - ); - eyre::bail!("missing range of DB blocks"); - } - start = Some(new_ru_height); - } - - metrics::record_extracts(&block_extracts); - - let start_time = std::time::Instant::now(); - current = block_extracts.ru_height; - let spec_id = self.spec_id(block_extracts.host_block.timestamp()); + metrics::record_extracts(block_extracts); - let span = info_span!( - "signet::handle_zenith_outputs::block_processing", - start = start.unwrap(), - ru_height = block_extracts.ru_height, - host_height = block_extracts.host_block.number(), - has_ru_block = block_extracts.submitted.is_some(), - height_before_notification = last_ru_height, - ); - - let block_result = - self.run_evm(&block_extracts, spec_id).instrument(span.clone()).await?; - metrics::record_block_result(&block_result, &start_time); - - let _ = span.enter(); - self.commit_evm_results(&block_extracts, &block_result)?; - - net_outcome.extend(block_result.execution_outcome.convert()); - } - info!("committed blocks"); + let block_result = self.run_evm(block_extracts, spec_id).await?; + metrics::record_block_result(&block_result, &start_time); - // If we didn't process any blocks, we don't need to return anything. - // In practice, this should never happen, as we should always have at - // least one block to process. - if start.is_none() { - return Ok(None); - } - let start = start.expect("checked by early return"); - - // Return the range of blocks we processed - let provider = self.ru_provider.provider_rw()?; - - let ru_info = provider.get_extraction_results(start..=current)?; - - let inner = Chain::::new( - provider.recovered_block_range(start..=current)?, - net_outcome, - Default::default(), - ); - - Ok(Some(RuChain { inner, ru_info })) + self.build_executed_block(block_extracts, block_result) } /// ========================== @@ -233,6 +157,9 @@ where /// ███████ ████ ██ ██ /// =========================== /// =========================== + /// + /// Run the EVM for a single block extraction. + #[instrument(skip_all)] async fn run_evm( &self, block_extracts: &Extracts<'_, ExtractableChainShim<'_>>, @@ -242,11 +169,17 @@ where let host_height = block_extracts.host_block.number(); let timestamp = block_extracts.host_block.timestamp(); + // TODO: this opens a second reader (revm_state opens the first). If a write + // occurs between the two, they could see different snapshots. Consider + // opening one reader at the start and threading it through. let parent_header = self - .ru_provider - .sealed_header(block_extracts.ru_height.saturating_sub(1))? + .hot + .reader()? + .get_header(ru_height.saturating_sub(1)) + .map_err(Into::::into)? .wrap_err("parent ru block not present in DB") .inspect_err(|e| error!(%e))?; + let parent_header = signet_types::primitives::SealedHeader::new(parent_header.into_inner()); let txns = match &block_extracts.submitted { Some(submitted) => { @@ -281,7 +214,7 @@ where block_extracts, to_alias, txns, - parent_header.convert(), + parent_header, self.constants.clone(), ); @@ -302,23 +235,69 @@ where }) } - /// Commit the outputs of a zenith block to the database. + /// Build an [`ExecutedBlock`] from processor outputs. #[instrument(skip_all)] - fn commit_evm_results( + fn build_executed_block( &self, extracts: &Extracts<'_, ExtractableChainShim<'_>>, - block_result: &BlockResult, - ) -> eyre::Result<()> { - self.ru_provider.provider_rw()?.update(|writer| { - writer.append_host_block( - extracts.ru_header(), - extracts.transacts().cloned(), - extracts.enters(), - extracts.enter_tokens(), - block_result, - )?; - Ok(()) - })?; - Ok(()) + block_result: BlockResult, + ) -> eyre::Result { + let BlockResult { sealed_block, execution_outcome, .. } = block_result; + + // Header from the sealed block. Re-use the known hash to avoid + // recomputing it. + let hash = sealed_block.block.header.hash(); + let header = sealed_block.block.header.header().clone().seal_unchecked(hash); + + // Bundle and receipts from execution outcome. + let (bundle, receipt_vecs, _) = execution_outcome.into_parts(); + + // Flatten receipts (one block → one inner vec) and convert to + // storage Receipt type. + let receipts = receipt_vecs + .into_iter() + .flatten() + .map(|envelope| { + let tx_type = envelope.tx_type(); + signet_storage_types::Receipt { tx_type, inner: envelope.into_receipt() } + }) + .collect(); + + // Transactions: zip txs + senders → Vec. + let transactions = sealed_block + .block + .body + .transactions + .into_iter() + .zip(sealed_block.senders) + .map(|(tx, sender)| signet_storage_types::Recovered::new_unchecked(tx, sender)) + .collect(); + + // Signet events with a single incrementing index across all types. + let signet_events: Vec<_> = extracts + .enters() + .map(|e| DbSignetEvent::Enter(0, e)) + .chain(extracts.enter_tokens().map(|e| DbSignetEvent::EnterToken(0, e))) + .chain(extracts.transacts().map(|t| DbSignetEvent::Transact(0, t.clone()))) + .enumerate() + .map(|(i, e)| match e { + DbSignetEvent::Enter(_, v) => DbSignetEvent::Enter(i as u64, v), + DbSignetEvent::EnterToken(_, v) => DbSignetEvent::EnterToken(i as u64, v), + DbSignetEvent::Transact(_, v) => DbSignetEvent::Transact(i as u64, v), + }) + .collect(); + + // Zenith header from extracts. + let zenith_header = extracts.ru_header().map(DbZenithHeader::from); + + ExecutedBlockBuilder::new() + .header(header) + .bundle(bundle) + .transactions(transactions) + .receipts(receipts) + .signet_events(signet_events) + .zenith_header(zenith_header) + .build() + .map_err(|e| eyre::eyre!("failed to build ExecutedBlock: {e}")) } } diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml deleted file mode 100644 index 199954a..0000000 --- a/crates/db/Cargo.toml +++ /dev/null @@ -1,36 +0,0 @@ -[package] -name = "signet-db" -version.workspace = true -edition.workspace = true -rust-version.workspace = true -authors.workspace = true -license.workspace = true -homepage.workspace = true -repository.workspace = true - -[dependencies] -signet-node-types.workspace = true - -signet-evm.workspace = true -signet-types.workspace = true -signet-zenith.workspace = true - -trevm.workspace = true - -alloy.workspace = true - -reth.workspace = true -reth-db.workspace = true -reth-prune-types.workspace = true -reth-stages-types.workspace = true - -itertools.workspace = true -serde.workspace = true -tracing.workspace = true -auto_impl = "1.3.0" - -[dev-dependencies] -serde_json.workspace = true -reth-db = { workspace = true, features = ["test-utils"] } -reth-exex-test-utils.workspace = true -signet-constants = { workspace = true, features = ["test-utils"] } diff --git a/crates/db/README.md b/crates/db/README.md deleted file mode 100644 index cfab334..0000000 --- a/crates/db/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# Signet Database - -Extensions and modifications to reth's Database system for use in the Signet -Node. - -This library contains the following: - -- Traits for reading and writing Signet events -- Table definitions for Signet Events and Headers -- Helpers for reading, writing, reverting, Signet EVM blocks and headers - -## Significant Traits - -- `RuWriter` - Encapsulates logic for reading and writing Signet events, state, - headers, etc. -- `DbProviderExt` - Extends the reth `DatabaseProviderRW` with a scope-guarded - `update` method. -- `DataCompat` - Provides methods for converting between Signet and reth data - structures, such as `ExecutionOutcome` and `Receipt`. diff --git a/crates/db/src/aliases.rs b/crates/db/src/aliases.rs deleted file mode 100644 index db880fb..0000000 --- a/crates/db/src/aliases.rs +++ /dev/null @@ -1,15 +0,0 @@ -use reth::{ - providers::{DatabaseProviderRW, StateProviderBox}, - revm::database::StateProviderDatabase, -}; -use signet_node_types::SignetNodeTypes; -use trevm::revm::database::State; - -/// A Convenience alias for a [`DatabaseProviderRW`] using [`SignetNodeTypes`]. -pub type SignetDbRw = DatabaseProviderRW>; - -/// Type alias for EVMs using a [`StateProviderBox`] as the `DB` type for -/// trevm. -/// -/// [`StateProviderBox`]: reth::providers::StateProviderBox -pub type RuRevmState = State>; diff --git a/crates/db/src/chain.rs b/crates/db/src/chain.rs deleted file mode 100644 index f517502..0000000 --- a/crates/db/src/chain.rs +++ /dev/null @@ -1,46 +0,0 @@ -use alloy::primitives::BlockNumber; -use reth::providers::Chain; -use signet_zenith::{Passage, Transactor, Zenith}; -use std::collections::BTreeMap; - -/// Host extraction contrents for a single block. -/// -/// This is a container type for DB operations. It is held by the [`RuChain`] -/// struct, and constructed during the [`RuWriter::get_extraction_results`] -/// method. -/// -/// [`RuWriter::get_extraction_results`]: -/// crate::traits::RuWriter::get_extraction_results -#[derive(Debug, Clone, Default, PartialEq, Eq)] -pub struct DbExtractionResults { - /// The zenith header for the block. - pub header: Option, - /// The enters for the block. - pub enters: Vec, - /// The transacts for the block. - pub transacts: Vec, - /// The enter tokens for the block. - pub enter_tokens: Vec, -} - -/// Equivalent of [`Chain`] but also containing [`DbExtractionResults`] for -/// each block. -#[derive(Debug, Clone, Default, PartialEq, Eq)] -pub struct RuChain { - /// Inner chain of RU blocks. - pub inner: Chain, - /// Zenith headers for each block. - pub ru_info: BTreeMap, -} - -impl RuChain { - /// Get the length of the chain in blocks. - pub fn len(&self) -> usize { - self.inner.len() - } - - /// Check if the chain is empty. - pub fn is_empty(&self) -> bool { - self.inner.is_empty() - } -} diff --git a/crates/db/src/consistency.rs b/crates/db/src/consistency.rs deleted file mode 100644 index 5c81f97..0000000 --- a/crates/db/src/consistency.rs +++ /dev/null @@ -1,303 +0,0 @@ -use alloy::primitives::BlockNumber; -use reth::{ - api::NodePrimitives, - primitives::EthPrimitives, - providers::{ - BlockBodyIndicesProvider, ProviderFactory, ProviderResult, StageCheckpointReader, - StaticFileProviderFactory, StaticFileSegment, StaticFileWriter, - }, -}; -use reth_db::{cursor::DbCursorRO, table::Table, tables, transaction::DbTx}; -use reth_stages_types::StageId; -use signet_node_types::{NodeTypesDbTrait, SignetNodeTypes}; -use tracing::{debug, info, info_span, instrument, warn}; - -/// Extension trait that provides consistency checking for the RU database -/// provider. Consistency checks are MANDATORY on node startup to ensure that -/// the static file segments and database are in sync. -/// -/// In general, this should not be implemented outside this crate. -pub trait ProviderConsistencyExt { - /// Check the consistency of the static file segments and return the last - /// known-good block number. - fn ru_check_consistency(&self) -> ProviderResult>; -} - -impl ProviderConsistencyExt for ProviderFactory> -where - Db: NodeTypesDbTrait, -{ - /// Check the consistency of the static file segments and return the last - /// known good block number. - #[instrument(skip(self), fields(read_only = self.static_file_provider().is_read_only()))] - fn ru_check_consistency(&self) -> ProviderResult> { - // Based on `StaticFileProvider::check_consistency` in - // `reth/crates/storage/provider/src/providers/static_file/manager.rs` - // with modifications for RU-specific logic. - // - // Comments are largely reproduced from the original source for context. - // - // Last updated @ reth@1.9.1 - let prune_modes = self.provider_rw()?.prune_modes_ref().clone(); - let sfp = self.static_file_provider(); - - debug!("Checking static file consistency."); - - let mut last_good_height: Option = None; - - let mut update_last_good_height = |new_height: BlockNumber| { - last_good_height = - last_good_height.map(|current| current.min(new_height)).or(Some(new_height)); - }; - - for segment in StaticFileSegment::iter() { - let initial_highest_block = sfp.get_highest_static_file_block(segment); - - if prune_modes.has_receipts_pruning() && segment.is_receipts() { - // Pruned nodes (including full node) do not store receipts as static files. - continue; - } - - let span = info_span!( - "checking_segment", - ?segment, - initial_highest_block, - highest_block = tracing::field::Empty, - highest_tx = tracing::field::Empty - ); - let _guard = span.enter(); - - // File consistency is broken if: - // - // * appending data was interrupted before a config commit, then - // data file will be truncated according to the config. - // - // * pruning data was interrupted before a config commit, then we - // have deleted data that we are expected to still have. We need - // to check the Database and unwind everything accordingly. - if sfp.is_read_only() { - sfp.check_segment_consistency(segment)?; - } else { - // Fetching the writer will attempt to heal any file level - // inconsistency. - sfp.latest_writer(segment)?; - } - - // Only applies to block-based static files. (Headers) - // - // The updated `highest_block` may have decreased if we healed from a pruning - // interruption. - let mut highest_block = sfp.get_highest_static_file_block(segment); - span.record("highest_block", highest_block); - - if initial_highest_block != highest_block { - update_last_good_height(highest_block.unwrap_or_default()); - } - - // Only applies to transaction-based static files. (Receipts & Transactions) - // - // Make sure the last transaction matches the last block from its indices, since a heal - // from a pruning interruption might have decreased the number of transactions without - // being able to update the last block of the static file segment. - let highest_tx = sfp.get_highest_static_file_tx(segment); - if let Some(highest_tx) = highest_tx { - span.record("highest_tx", highest_tx); - let mut last_block = highest_block.unwrap_or_default(); - loop { - if let Some(indices) = self.block_body_indices(last_block)? { - if indices.last_tx_num() <= highest_tx { - break; - } - } else { - // If the block body indices can not be found, then it means that static - // files is ahead of database, and the `ensure_invariants` check will fix - // it by comparing with stage checkpoints. - break; - } - if last_block == 0 { - break; - } - last_block -= 1; - - highest_block = Some(last_block); - update_last_good_height(last_block); - } - } - - if let Some(unwind) = match segment { - StaticFileSegment::Headers => { - ensure_invariants::< - _, - tables::Headers<::BlockHeader>, - >(self, segment, highest_block, highest_block)? - } - StaticFileSegment::Transactions => { - ensure_invariants::< - _, - tables::Transactions<::SignedTx>, - >(self, segment, highest_tx, highest_block)? - } - StaticFileSegment::Receipts => { - ensure_invariants::< - _, - tables::Receipts<::Receipt>, - >(self, segment, highest_tx, highest_block)? - } - StaticFileSegment::TransactionSenders => { - ensure_invariants::<_, tables::TransactionSenders>( - self, - segment, - highest_tx, - highest_block, - )? - } - StaticFileSegment::AccountChangeSets => ensure_invariants::< - _, - tables::AccountChangeSets, - >( - self, segment, highest_tx, highest_block - )?, - } { - update_last_good_height(unwind); - } - } - - Ok(last_good_height) - } -} - -/// Check invariants for each corresponding table and static file segment: -/// -/// 1. The corresponding database table should overlap or have continuity in -/// their keys ([`TxNumber`] or [`BlockNumber`]). -/// 2. Its highest block should match the stage checkpoint block number if it's -/// equal or higher than the corresponding database table last entry. -/// * If the checkpoint block is higher, then request a pipeline unwind to -/// the static file block. This is expressed by returning [`Some`] with the -/// requested pipeline unwind target. -/// * If the checkpoint block is lower, then heal by removing rows from the -/// static file. In this case, the rows will be removed and [`None`] will -/// be returned. -/// 3. If the database tables overlap with static files and have contiguous -/// keys, or the checkpoint block matches the highest static files block, -/// then [`None`] will be returned. -/// -/// [`TxNumber`]: alloy::primitives::TxNumber -#[instrument(skip(this, segment), fields(table = T::NAME))] -fn ensure_invariants>( - this: &ProviderFactory>, - segment: StaticFileSegment, - highest_static_file_entry: Option, - highest_static_file_block: Option, -) -> ProviderResult> -where - Db: NodeTypesDbTrait, -{ - let provider = this.provider_rw()?; - let sfp = this.static_file_provider(); - - let mut db_cursor = provider.tx_ref().cursor_read::()?; - - if let Some((db_first_entry, _)) = db_cursor.first()? { - if let (Some(highest_entry), Some(highest_block)) = - (highest_static_file_entry, highest_static_file_block) - { - // If there is a gap between the entry found in static file and - // database, then we have most likely lost static file data and - // need to unwind so we can load it again - if !(db_first_entry <= highest_entry || highest_entry + 1 == db_first_entry) { - info!(unwind_target = highest_block, "Setting unwind target."); - return Ok(Some(highest_block)); - } - } - - if let Some((db_last_entry, _)) = db_cursor.last()? - && highest_static_file_entry.is_none_or(|highest_entry| db_last_entry > highest_entry) - { - return Ok(None); - } - } - - let highest_static_file_entry = highest_static_file_entry.unwrap_or_default(); - let highest_static_file_block = highest_static_file_block.unwrap_or_default(); - - // If static file entry is ahead of the database entries, then ensure the - // checkpoint block number matches. - let checkpoint_block_number = provider - .get_stage_checkpoint(match segment { - StaticFileSegment::Headers => StageId::Headers, - StaticFileSegment::Transactions => StageId::Bodies, - StaticFileSegment::Receipts | StaticFileSegment::AccountChangeSets => { - StageId::Execution - } - StaticFileSegment::TransactionSenders => StageId::SenderRecovery, - })? - .unwrap_or_default() - .block_number; - - // If the checkpoint is ahead, then we lost static file data. May be data corruption. - if checkpoint_block_number > highest_static_file_block { - info!( - checkpoint_block_number, - unwind_target = highest_static_file_block, - "Setting unwind target." - ); - return Ok(Some(highest_static_file_block)); - } - - // If the checkpoint is behind, then we failed to do a database commit - // **but committed** to static files on executing a stage, or the reverse - // on unwinding a stage. - // - // All we need to do is to prune the extra static file rows. - if checkpoint_block_number < highest_static_file_block { - info!( - from = highest_static_file_block, - to = checkpoint_block_number, - "Unwinding static file segment." - ); - - let mut writer = sfp.latest_writer(segment)?; - if segment.is_headers() { - // TODO(joshie): is_block_meta - writer.prune_headers(highest_static_file_block - checkpoint_block_number)?; - } else if let Some(block) = provider.block_body_indices(checkpoint_block_number)? { - // todo joshie: is querying block_body_indices a potential issue - // once bbi is moved to sf as well - let number = highest_static_file_entry - block.last_tx_num(); - if segment.is_receipts() { - writer.prune_receipts(number, checkpoint_block_number)?; - } else { - writer.prune_transactions(number, checkpoint_block_number)?; - } - } - writer.commit()?; - } - - Ok(None) -} - -// Some code in this file is adapted from reth. It is used under the terms of -// the MIT License. -// -// The MIT License (MIT) -// -// Copyright (c) 2022-2025 Reth Contributors -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. diff --git a/crates/db/src/convert.rs b/crates/db/src/convert.rs deleted file mode 100644 index 7737b84..0000000 --- a/crates/db/src/convert.rs +++ /dev/null @@ -1,109 +0,0 @@ -//! Type conversion traits and implementations for converting between Reth, Alloy, and Signet types. -//! -//! This module provides a set of conversion traits that enable seamless -//! interoperability between different type systems used in the Ethereum -//! ecosystem: -//! -//! - **Reth types**: Core primitives from the Reth Ethereum client -//! - **Alloy types**: Modern Ethereum types from the Alloy framework -//! - **Signet types**: Custom types specific to the Signet protocol -use alloy::consensus::TxReceipt; - -/// Trait for types that can be converted into other types as they're already compatible. -/// Used for converting between alloy/reth/signet types. -pub trait DataCompat>: Sized { - /// Convert `self` into the target type. - fn convert(self) -> Other; - - /// Convert `self` into the target type by cloning. - fn clone_convert(&self) -> Other - where - Self: Clone, - { - self.clone().convert() - } -} - -impl DataCompat> for Vec -where - U: DataCompat, - T: DataCompat, -{ - fn convert(self) -> Vec { - self.into_iter().map(|item| item.convert()).collect() - } -} - -impl DataCompat for signet_evm::ExecutionOutcome { - fn convert(self) -> reth::providers::ExecutionOutcome { - let (bundle, receipts, first_block) = self.into_parts(); - - reth::providers::ExecutionOutcome { - bundle, - receipts: receipts.convert(), - first_block, - requests: Default::default(), // Requests are not present in Signet's ExecutionOutcome - } - } -} - -impl DataCompat for reth::providers::ExecutionOutcome { - fn convert(self) -> signet_evm::ExecutionOutcome { - signet_evm::ExecutionOutcome::new( - self.bundle, - self.receipts.into_iter().map(DataCompat::convert).collect(), - self.first_block, - ) - } -} - -impl DataCompat for alloy::consensus::ReceiptEnvelope { - fn convert(self) -> reth::primitives::Receipt { - reth::primitives::Receipt { - tx_type: self.tx_type(), - success: self.is_success(), - cumulative_gas_used: self.cumulative_gas_used(), - logs: self.logs().to_owned(), - } - } -} - -impl DataCompat for reth::primitives::Receipt { - fn convert(self) -> alloy::consensus::ReceiptEnvelope { - let receipt = alloy::consensus::Receipt { - status: self.status().into(), - cumulative_gas_used: self.cumulative_gas_used, - logs: self.logs.to_owned(), - }; - - match self.tx_type { - reth::primitives::TxType::Legacy => { - alloy::consensus::ReceiptEnvelope::Legacy(receipt.into()) - } - reth::primitives::TxType::Eip2930 => { - alloy::consensus::ReceiptEnvelope::Eip2930(receipt.into()) - } - reth::primitives::TxType::Eip1559 => { - alloy::consensus::ReceiptEnvelope::Eip1559(receipt.into()) - } - reth::primitives::TxType::Eip4844 => { - alloy::consensus::ReceiptEnvelope::Eip4844(receipt.into()) - } - reth::primitives::TxType::Eip7702 => { - alloy::consensus::ReceiptEnvelope::Eip7702(receipt.into()) - } - } - } -} - -impl DataCompat for reth::primitives::SealedHeader { - fn convert(self) -> signet_types::primitives::SealedHeader { - signet_types::primitives::SealedHeader::new(self.into_header()) - } -} - -impl DataCompat for signet_types::primitives::SealedHeader { - fn convert(self) -> reth::primitives::SealedHeader { - reth::primitives::SealedHeader::new_unhashed(self.header().to_owned()) - } -} diff --git a/crates/db/src/lib.rs b/crates/db/src/lib.rs deleted file mode 100644 index 74b7c11..0000000 --- a/crates/db/src/lib.rs +++ /dev/null @@ -1,34 +0,0 @@ -#![doc = include_str!("../README.md")] -#![warn( - missing_copy_implementations, - missing_debug_implementations, - missing_docs, - unreachable_pub, - clippy::missing_const_for_fn, - rustdoc::all -)] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![deny(unused_must_use, rust_2018_idioms)] -#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] - -mod aliases; -pub use aliases::{RuRevmState, SignetDbRw}; - -mod chain; -pub use chain::{DbExtractionResults, RuChain}; - -mod consistency; -pub use consistency::ProviderConsistencyExt; - -mod convert; -pub use convert::DataCompat; - -mod provider; - -mod tables; -pub use tables::{ - DbEnter, DbEnterToken, DbSignetEvent, DbTransact, DbZenithHeader, SignetEvents, ZenithHeaders, -}; - -mod traits; -pub use traits::{DbProviderExt, RuWriter}; diff --git a/crates/db/src/provider.rs b/crates/db/src/provider.rs deleted file mode 100644 index ec32a71..0000000 --- a/crates/db/src/provider.rs +++ /dev/null @@ -1,638 +0,0 @@ -use crate::{ - DataCompat, DbZenithHeader, RuChain, SignetDbRw, ZenithHeaders, - tables::{DbSignetEvent, SignetEvents}, - traits::RuWriter, -}; -use alloy::{ - consensus::BlockHeader, - primitives::{Address, B256, BlockNumber}, -}; -use reth::{ - primitives::StaticFileSegment, - providers::{ - BlockBodyIndicesProvider, BlockNumReader, BlockReader, BlockWriter, Chain, DBProvider, - HistoryWriter, OriginalValuesKnown, ProviderError, ProviderResult, StageCheckpointWriter, - StateWriteConfig, StateWriter, StaticFileProviderFactory, StaticFileWriter, - }, -}; -use reth_db::{ - cursor::{DbCursorRO, DbCursorRW}, - models::{BlockNumberAddress, StoredBlockBodyIndices}, - tables, - transaction::{DbTx, DbTxMut}, -}; -use reth_prune_types::{MINIMUM_PRUNING_DISTANCE, PruneMode}; -use signet_evm::BlockResult; -use signet_node_types::NodeTypesDbTrait; -use signet_types::primitives::RecoveredBlock; -use signet_zenith::{ - Passage::{self, Enter, EnterToken}, - Transactor::Transact, - Zenith, -}; -use std::ops::{Not, RangeInclusive}; -use tracing::{debug, instrument, trace, warn}; - -impl RuWriter for SignetDbRw -where - Db: NodeTypesDbTrait, -{ - fn last_block_number(&self) -> ProviderResult { - BlockNumReader::last_block_number(&self.0) - } - - /// Insert an enter into the DB - /// This is a signet-specific function that inserts an enter event into the - /// [`SignetEvents`] table. - fn insert_enter(&self, ru_height: u64, index: u64, enter: Enter) -> ProviderResult<()> { - self.tx_ref() - .put::(ru_height, DbSignetEvent::Enter(index, enter)) - .map_err(Into::into) - } - - /// Insert an enter token event into the DB - /// This is a signet-specific function that inserts an enter token event - /// into the [`SignetEvents`] table. - fn insert_enter_token( - &self, - ru_height: u64, - index: u64, - enter_token: EnterToken, - ) -> ProviderResult<()> { - self.tx_ref() - .put::(ru_height, DbSignetEvent::EnterToken(index, enter_token))?; - Ok(()) - } - - /// Insert a Transact into the DB - /// This is a signet-specific function that inserts a transact event into the - /// [`SignetEvents`] table. - fn insert_transact( - &self, - ru_height: u64, - index: u64, - transact: &Transact, - ) -> ProviderResult<()> { - // this is unfortunate, but probably fine because the large part is the - // shared Bytes object. - let t = transact.clone(); - self.tx_ref() - .put::(ru_height, DbSignetEvent::Transact(index, t)) - .map_err(Into::into) - } - - fn insert_signet_header( - &self, - header: Zenith::BlockHeader, - ru_height: u64, - ) -> ProviderResult<()> { - self.tx_ref().put::(ru_height, header.into())?; - - Ok(()) - } - - fn get_signet_header(&self, ru_height: u64) -> ProviderResult> { - self.tx_ref().get::(ru_height).map(|h| h.map(Into::into)).map_err(Into::into) - } - - /// Inserts the zenith block into the database, always modifying the following tables: - /// * [`CanonicalHeaders`](tables::CanonicalHeaders) - /// * [`Headers`](tables::Headers) - /// * [`HeaderTerminalDifficulties`](tables::HeaderTerminalDifficulties) - /// * [`HeaderNumbers`](tables::HeaderNumbers) - /// * [`BlockBodyIndices`](tables::BlockBodyIndices) (through - /// [`RuWriter::append_signet_block_body`]) - /// - /// If there are transactions in the block, the following tables will be - /// modified: - /// * [`Transactions`](tables::Transactions) (through - /// [`RuWriter::append_signet_block_body`]) - /// * [`TransactionBlocks`](tables::TransactionBlocks) (through - /// [`RuWriter::append_signet_block_body`]) - /// - /// If the provider has __not__ configured full sender pruning, this will - /// modify [`TransactionSenders`](tables::TransactionSenders). - /// - /// If the provider has __not__ configured full transaction lookup pruning, - /// this will modify [`TransactionHashNumbers`](tables::TransactionHashNumbers). - /// - /// Ommers and withdrawals are not inserted, as Signet does not use them. - fn insert_signet_block( - &self, - header: Option, - block: &RecoveredBlock, - ) -> ProviderResult { - // Implementation largely copied from - // `BlockWriter::insert_block` - // in `reth/crates/storage/provider/src/providers/database/provider.rs` - // duration metrics have been removed - // - // Last reviewed at tag v1.9.0 - let block_number = block.number(); - - if let Some(header) = header { - self.insert_signet_header(header, block_number)?; - } - - let block_hash = block.block.header.hash(); - let block_header = block.block.header.header(); - - self.static_file_provider() - .get_writer(block_number, StaticFileSegment::Headers)? - .append_header(block_header, &block_hash)?; - - self.tx_ref().put::(block_hash, block_number)?; - - let mut next_tx_num = self - .tx_ref() - .cursor_read::()? - .last()? - .map(|(n, _)| n + 1) - .unwrap_or_default(); - let first_tx_num = next_tx_num; - let tx_count = block.block.body.transactions.len() as u64; - - for (sender, transaction) in block.senders.iter().zip(block.block.body.transactions()) { - let hash = *transaction.hash(); - debug_assert_ne!(hash, B256::ZERO, "transaction hash is zero"); - - if self.prune_modes_ref().sender_recovery.as_ref().is_none_or(|m| !m.is_full()) { - self.tx_ref().put::(next_tx_num, *sender)?; - } - - if self.prune_modes_ref().transaction_lookup.is_none_or(|m| !m.is_full()) { - self.tx_ref().put::(hash, next_tx_num)?; - } - - next_tx_num += 1; - } - - self.append_signet_block_body((block_number, block))?; - - debug!(?block_number, "Inserted block"); - - Ok(StoredBlockBodyIndices { first_tx_num, tx_count }) - } - - /// Appends the body of a signet block to the database. - fn append_signet_block_body(&self, body: (BlockNumber, &RecoveredBlock)) -> ProviderResult<()> { - // Implementation largely copied from - // `DatabaseProvider::append_block_bodies` - // in `reth/crates/storage/provider/src/providers/database/provider.rs` - // duration metrics have been removed, and the implementation has been - // modified to work with a single signet block. - // - // last reviewed at tag v1.10.1 - - let from_block = body.0; - let sf = self.static_file_provider(); - - // Initialize writer if we will be writing transactions to staticfiles - let mut tx_writer = sf.get_writer(from_block, StaticFileSegment::Transactions)?; - - let mut block_indices_cursor = self.tx_ref().cursor_write::()?; - let mut tx_block_cursor = self.tx_ref().cursor_write::()?; - - let block_number = body.0; - let block = body.1; - - // Get id for the next tx_num or zero if there are no transactions. - let mut next_tx_num = tx_block_cursor.last()?.map(|(n, _)| n + 1).unwrap_or_default(); - - // Increment block on static file header - tx_writer.increment_block(block_number)?; - - let tx_count = block.block.body.transactions.len() as u64; - let block_indices = StoredBlockBodyIndices { first_tx_num: next_tx_num, tx_count }; - - // insert block meta - block_indices_cursor.append(block_number, &block_indices)?; - - // write transaction block index - if tx_count != 0 { - tx_block_cursor.append(block_indices.last_tx_num(), &block_number)?; - } - - // Write transactions - for transaction in block.block.body.transactions() { - tx_writer.append_transaction(next_tx_num, transaction)?; - - // Increment transaction id for each transaction - next_tx_num += 1; - } - - debug!( - target: "signet_db_lifecycle", - ?block_number, - "Inserted block body" - ); - - // NB: Here we'd usually write ommers and withdrawals, via - // `write_block_bodies` (which does not write txns, as you might - // expect). Signet doesn't have ommers or withdrawals. Therefore we're - // able to just return. - Ok(()) - } - - fn get_signet_headers( - &self, - range: RangeInclusive, - ) -> ProviderResult> { - // Implementation largely copied from - // `DatabaseProvider::get_or_take` - // in `reth/crates/storage/provider/src/providers/database/provider.rs` - // which after 1.1.3, has been removed and its functionality inlined. - // - // We have to customize the impl to unwrap the DbZenithHeader - let mut items = Vec::new(); - - trace!(target: "signet_db_lifecycle", "getting zenith headers"); - let mut cursor = self.tx_ref().cursor_read::()?; - let mut walker = cursor.walk_range(range)?; - - while let Some((k, DbZenithHeader(e))) = walker.next().transpose()? { - items.push((k, e)) - } - - Ok(items) - } - - /// Take zenith headers from the DB. - fn take_signet_headers_above( - &self, - target: BlockNumber, - ) -> ProviderResult> { - // Implementation largely copied from - // `DatabaseProvider::get_or_take` - // in `reth/crates/storage/provider/src/providers/database/provider.rs` - // which after 1.1.3, has been removed and the functionality inlined. - - // We have to customize the impl to unwrap the DB enters - let mut items = Vec::new(); - trace!(target: "signet_db_lifecycle", "taking zenith headers"); - let mut cursor_write = self.tx_ref().cursor_write::()?; - let mut walker = cursor_write.walk_range(target + 1..)?; - while let Some((k, DbZenithHeader(e))) = walker.next().transpose()? { - walker.delete_current()?; - items.push((k, e)) - } - - Ok(items) - } - - /// Remove [`Zenith::BlockHeader`] objects above the specified height from the DB. - fn remove_signet_headers_above(&self, target: BlockNumber) -> ProviderResult<()> { - self.remove::(target + 1..)?; - Ok(()) - } - - /// Get [`Passage::EnterToken`], [`Passage::Enter`] and - /// [`Transactor::Transact`] events. - /// - /// [`Transactor::Transact`]: signet_zenith::Transactor::Transact - fn get_signet_events( - &self, - range: RangeInclusive, - ) -> ProviderResult> { - let mut cursor = self.tx_ref().cursor_read::()?; - let walker = cursor.walk_range(range)?; - walker.collect::, _>>().map_err(Into::into) - } - - /// Take [`Passage::EnterToken`]s from the DB. - fn take_signet_events_above( - &self, - target: BlockNumber, - ) -> ProviderResult> { - let range = target + 1..=self.last_block_number()?; - let items = self.get_signet_events(range)?; - self.remove_signet_events_above(target)?; - Ok(items) - } - - /// Remove [`Passage::EnterToken`], [`Passage::Enter`] and - /// [`Transactor::Transact`] events above the specified height from the DB. - /// - /// [`Transactor::Transact`]: signet_zenith::Transactor::Transact - fn remove_signet_events_above(&self, target: BlockNumber) -> ProviderResult<()> { - self.remove::(target + 1..)?; - Ok(()) - } - - /// Appends the signet-related contents of a host block to the DB: - /// (RU block, state, enters, enter tokens, transactions) - /// The contents MUST be appended in the following order: - /// - The Signet Block (through `RuWriter::insert_signet_block`) - /// - The state modified by the block (through `RuWriter::ru_write_state`) - /// - The enters, if any (through `RuWriter::insert_enter`) - /// - The enter tokens, if any (through `RuWriter::insert_enter_token`) - /// - The force-included transactions, if any (through `RuWriter::insert_transact`) - /// - /// Several DB tables are affected throughout this process. For a detailed breakdown, - /// see the documentation for each function. - fn append_host_block( - &self, - header: Option, - transacts: impl IntoIterator, - enters: impl IntoIterator, - enter_tokens: impl IntoIterator, - block_result: &BlockResult, - ) -> ProviderResult<()> { - // Implementation largely copied from - // `BlockWriter::append_blocks_with_state` - // in `reth/crates/storage/provider/src/providers/database/provider.rs` - // duration metrics have been removed - // - // last reviewed at tag v1.9.0 - - let BlockResult { sealed_block: block, execution_outcome, .. } = block_result; - - let ru_height = block.number(); - self.insert_signet_block(header, block)?; - - // Write the state and match the storage location that Reth uses. - self.ru_write_state(execution_outcome, OriginalValuesKnown::No)?; - - // NB: At this point, reth writes hashed state and trie updates. Signet - // skips this. We re-use these tables to write the enters, enter tokens, - // and transact events. - - let mut index: u64 = 0; - for enter in enters.into_iter() { - self.insert_enter(ru_height, index, enter)?; - debug!(ru_height, index, "inserted enter"); - index += 1; - } - - for enter_token in enter_tokens.into_iter() { - self.insert_enter_token(ru_height, index, enter_token)?; - debug!(ru_height, index, "inserted enter token"); - index += 1; - } - - for transact in transacts.into_iter() { - self.insert_transact(ru_height, index, &transact)?; - debug!(ru_height, index, "inserted transact"); - index += 1; - } - - self.update_history_indices(ru_height..=ru_height)?; - - self.update_pipeline_stages(ru_height, false)?; - - debug!(target: "signet_db_lifecycle", ru_height, "Appended blocks"); - - Ok(()) - } - - #[instrument(skip(self))] - fn ru_take_blocks_and_execution_above(&self, target: BlockNumber) -> ProviderResult { - // Implementation largely copied from - // `BlockExecutionWriter::take_block_and_execution_above` - // in `reth/crates/storage/provider/src/providers/database/provider.rs` - // - // last reviewed at tag v1.9.0 - - let range = target + 1..=self.last_block_number()?; - - // This block is copied from `unwind_trie_state_range` - // - // last reviewed at tag v1.9.0 - { - let changed_accounts = self - .tx_ref() - .cursor_read::()? - .walk_range(range.clone())? - .collect::, _>>()?; - // There's no need to also unwind account hashes, since that is - // only useful for filling intermediate tables that deal with state - // root calculation, which we don't use. - self.unwind_account_history_indices(changed_accounts.iter())?; - - let storage_start = BlockNumberAddress((target, Address::ZERO)); - - // Unwind storage history indices. Similarly, we don't need to - // unwind storage hashes, since we don't use them. - let changed_storages = self - .tx_ref() - .cursor_read::()? - .walk_range(storage_start..)? - .collect::, _>>()?; - - self.unwind_storage_history_indices(changed_storages.iter().copied())?; - - // We also skip calculating the reverted root here. - } - - trace!("trie state unwound"); - - let execution_state = self.take_state_above(target)?; - - trace!("state taken"); - - // get blocks - let blocks = self.recovered_block_range(range.clone())?; - - trace!(count = blocks.len(), "blocks loaded"); - - // remove block bodies it is needed for both get block range and get block execution results - // that is why it is deleted afterwards. - self.remove_blocks_above(target)?; - - trace!("blocks removed"); - - // SIGNET-SPECIFIC - // This is a Signet-specific addition that removes the enters, - // entertokens, zenith headers, and transact events. - let ru_info = self.take_extraction_results_above(target)?.into_iter().collect(); - - trace!("extraction results taken"); - - // Update pipeline stages - self.update_pipeline_stages(target, true)?; - - let chain = Chain::new(blocks, execution_state, Default::default()); - - debug!("Succesfully reverted blocks and updated pipeline stages"); - - Ok(RuChain { inner: chain, ru_info }) - } - - #[instrument(skip(self))] - fn ru_remove_blocks_and_execution_above(&self, target: BlockNumber) -> ProviderResult<()> { - // Implementation largely copied from - // `BlockExecutionWriter::remove_block_and_execution_above` - // in `reth/crates/storage/provider/src/providers/database/provider.rs` - // duration metrics have been removed - // - // last reviewed at tag v1.9.0 - - let range = target + 1..=self.last_block_number()?; - - // This block is copied from `unwind_trie_state_range` - // - // last reviewed at tag v1.9.0 - { - let changed_accounts = self - .tx_ref() - .cursor_read::()? - .walk_range(range.clone())? - .collect::, _>>()?; - // There's no need to also unwind account hashes, since that is - // only useful for filling intermediate tables that deal with state - // root calculation, which we don't use. - self.unwind_account_history_indices(changed_accounts.iter())?; - - let storage_start = BlockNumberAddress((target, Address::ZERO)); - - // Unwind storage history indices. Similarly, we don't need to - // unwind storage hashes, since we don't use them. - let changed_storages = self - .tx_ref() - .cursor_read::()? - .walk_range(storage_start..)? - .collect::, _>>()?; - - self.unwind_storage_history_indices(changed_storages.iter().copied())?; - - // We also skip calculating the reverted root here. - } - - self.remove_state_above(target)?; - self.remove_blocks_above(target)?; - - // Signet specific: - self.remove_extraction_results_above(target)?; - - // Update pipeline stages - self.update_pipeline_stages(target, true)?; - - Ok(()) - } - - fn ru_write_state( - &self, - execution_outcome: &signet_evm::ExecutionOutcome, - is_value_known: OriginalValuesKnown, - ) -> ProviderResult<()> { - // Implementation largely copied from - // `StateWriter::write_state` for `DatabaseProvider` - // in `reth/crates/storage/provider/src/providers/database/provider.rs` - // - // Last reviewed at tag v1.9.0 - let first_block = execution_outcome.first_block(); - let block_count = execution_outcome.len() as u64; - let last_block = execution_outcome.last_block(); - let block_range = first_block..=last_block; - - let tip = self.last_block_number()?.max(last_block); - - let (plain_state, reverts) = - execution_outcome.bundle().to_plain_state_and_reverts(is_value_known); - - self.write_state_reverts(reverts, first_block, StateWriteConfig::default())?; - self.write_state_changes(plain_state)?; - - // Fetch the first transaction number for each block in the range - let block_indices: Vec<_> = self - .block_body_indices_range(block_range)? - .into_iter() - .map(|b| b.first_tx_num) - .collect(); - - // Ensure all expected blocks are present. - if block_indices.len() < block_count as usize { - let missing_blocks = block_count - block_indices.len() as u64; - return Err(ProviderError::BlockBodyIndicesNotFound( - last_block.saturating_sub(missing_blocks - 1), - )); - } - - let has_receipts_pruning = self.prune_modes_ref().has_receipts_pruning(); - - // Prepare receipts cursor if we are going to write receipts to the database - // - // We are writing to database if requested or if there's any kind of receipt pruning - // configured - let mut receipts_cursor = - self.tx_ref().cursor_write::>()?; - - // SIGNET: This is a departure from Reth's implementation. Becuase their - // impl is on `DatabaseProvider`, it has access to the static file - // provider which is its own prop, and has access to its private field. - // We are implementing this on `DatabaseProviderRW`, and are not able - // to borrow from the inner, only to clone it. So we break up the - // static file provider into a separate variable, and then use it to - // create the static file writer. - let sfp = self.0.static_file_provider(); - - let mut receipts_static_writer = has_receipts_pruning - .not() - .then(|| sfp.get_writer(first_block, StaticFileSegment::Receipts)) - .transpose()?; - - // All receipts from the last 128 blocks are required for blockchain tree, even with - // [`PruneSegment::ContractLogs`]. - let prunable_receipts = - PruneMode::Distance(MINIMUM_PRUNING_DISTANCE).should_prune(first_block, tip); - - for (idx, (receipts, first_tx_index)) in - execution_outcome.receipts().iter().zip(block_indices).enumerate() - { - let block_number = first_block + idx as u64; - - // Increment block number for receipts static file writer - if let Some(writer) = receipts_static_writer.as_mut() { - writer.increment_block(block_number)?; - } - - // Skip writing receipts if pruning configuration requires us to. - if prunable_receipts - && self - .prune_modes_ref() - .receipts - .is_some_and(|mode| mode.should_prune(block_number, tip)) - { - continue; - } - - for (idx, receipt) in receipts.iter().map(DataCompat::clone_convert).enumerate() { - let receipt_idx = first_tx_index + idx as u64; - - if let Some(writer) = &mut receipts_static_writer { - writer.append_receipt(receipt_idx, &receipt)?; - } else { - receipts_cursor.append(receipt_idx, &receipt)?; - } - } - } - - Ok(()) - } -} - -// Some code in this file has been copied and modified from reth -// -// The original license is included below: -// -// The MIT License (MIT) -// -// Copyright (c) 2022-2024 Reth Contributors -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -//. -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. diff --git a/crates/db/src/tables.rs b/crates/db/src/tables.rs deleted file mode 100644 index b090ac3..0000000 --- a/crates/db/src/tables.rs +++ /dev/null @@ -1,408 +0,0 @@ -use alloy::{ - primitives::{Address, B256, BlockNumber, Bytes, U256, bytes::BufMut}, - rlp::Buf, -}; -use reth_db::{ - DatabaseError, - table::{Compress, Decompress, DupSort, Table}, - tables, -}; -use signet_zenith::{ - Passage::{Enter, EnterToken}, - Transactor::Transact, - Zenith::{self, BlockHeader}, -}; - -const FLAG_TRANSACT: u8 = 0; -const FLAG_ENTER: u8 = 1; -const FLAG_ENTER_TOKEN: u8 = 2; - -/// Table that maps heights numbers to zenith headers. We reuse a table from -/// reth's existing schema for this. -#[derive(Debug, Clone, Copy)] -pub struct ZenithHeaders { - _private: (), -} - -impl Table for ZenithHeaders { - const NAME: &'static str = ::NAME; - - const DUPSORT: bool = ::DUPSORT; - - type Key = u64; - - type Value = DbZenithHeader; -} - -/// Newtype for [`BlockHeader`] that implements [`Compress`] and [`Decompress`]. -/// -/// This is an implementation detail of the [`ZenithHeaders`] table, and should -/// not be used outside the DB module. -#[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize)] -pub struct DbZenithHeader(pub BlockHeader); - -impl From for DbZenithHeader { - fn from(header: BlockHeader) -> Self { - Self(header) - } -} - -impl From for BlockHeader { - fn from(header: DbZenithHeader) -> Self { - header.0 - } -} - -impl Compress for DbZenithHeader { - type Compressed = as Compress>::Compressed; - - fn compress_to_buf>(&self, buf: &mut B) { - let DbZenithHeader(Zenith::BlockHeader { - rollupChainId, - hostBlockNumber, - gasLimit, - rewardAddress, - blockDataHash, - }) = self; - buf.put_slice(&rollupChainId.to_le_bytes::<32>()); - buf.put_slice(&hostBlockNumber.to::().to_le_bytes()); - buf.put_slice(&gasLimit.to::().to_le_bytes()); - buf.put_slice(rewardAddress.as_ref()); - buf.put_slice(blockDataHash.as_ref()); - } -} - -impl Decompress for DbZenithHeader { - fn decompress(value: &[u8]) -> Result { - if value.len() < 32 + 8 + 8 + 20 + 32 { - tracing::error!(target: "signet", "decoding error"); - return Err(DatabaseError::Decode); - } - - Ok(Self(Zenith::BlockHeader { - rollupChainId: U256::from_le_slice(&value[0..32]), - hostBlockNumber: U256::from(u64::from_le_bytes(value[32..40].try_into().unwrap())), - gasLimit: U256::from(u64::from_le_bytes(value[40..48].try_into().unwrap())), - rewardAddress: Address::from_slice(&value[48..68]), - blockDataHash: B256::from_slice(&value[68..100]), - })) - } -} - -/// Newtype for extracted Signet events that implements [`Compress`] and -/// [`Decompress`]. -/// -/// This is an implementation detail of the [`SignetEvents`] table, and should -/// not be used outside the DB module. -/// -/// Each event is stored as a separate entry in the same table. -/// The first element of each event tuple is the event's order within all -/// events in the block. -/// -/// The second element is the event itself. -/// -/// We reuse a table from reth's existing schema for this table. -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, PartialEq)] -pub enum DbSignetEvent { - /// Each Transact event is stored as a separate entry in the same table. - Transact(u64, Transact), - /// Each Enter event is stored as a separate entry in the same table. - Enter(u64, Enter), - /// Each EnterToken event is stored as a separate entry in the same table. - EnterToken(u64, EnterToken), -} - -/// Table that maps block number and index number to signet events. We reuse a -/// table from reth's existing schema for this. The key is the rollup block -/// number, and the subkey is the index of the event within the block. -#[derive(Debug, Clone, Copy)] -pub struct SignetEvents { - _private: (), -} - -impl Table for SignetEvents { - const NAME: &'static str = ::NAME; - - const DUPSORT: bool = ::DUPSORT; - - type Key = BlockNumber; - - type Value = DbSignetEvent; -} - -impl DupSort for SignetEvents { - type SubKey = u64; -} - -/// Newtype for [`Transactor::Transact`] that implements [`Compress`] and -/// [`Decompress`]. -/// -/// This is an implementation detail of the [`SignetEvents`] table, and -/// should not be used outside the DB module. -/// -/// The two fields are the index of the transact within the set of all -/// transacts within the block, and the transact itself. The index is used as a -/// subkey in the table. I.e. if this is the first transact in the block, the -/// index would be 0. -/// -/// [`Transactor::Transact`]: signet_zenith::Transactor::Transact -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] -pub struct DbTransact(pub u64, pub Transact); - -impl From for Transact { - fn from(transact: DbTransact) -> Self { - transact.1 - } -} - -// TODO: ENG-484 - Consider using CompactU256 -// https://linear.app/initiates/issue/ENG-484/consider-using-compactu256-in-compress-impls -impl Compress for DbTransact { - type Compressed = as Compress>::Compressed; - - fn compress_to_buf>(&self, buf: &mut B) { - let Transact { rollupChainId, sender, to, data, value, gas, maxFeePerGas } = &self.1; - - buf.put_slice(&self.0.to_be_bytes()); - buf.put_slice(&rollupChainId.to_le_bytes::<32>()); - buf.put_slice(sender.as_ref()); - buf.put_slice(to.as_ref()); - buf.put_slice(&value.to_le_bytes::<32>()); - buf.put_slice(&gas.to_le_bytes::<32>()); - buf.put_slice(&maxFeePerGas.to_le_bytes::<32>()); - // variable element last - buf.put_slice(data.as_ref()); - } -} - -impl Decompress for DbTransact { - fn decompress(value: &[u8]) -> Result { - if value.len() < 176 { - return Err(DatabaseError::Decode); - } - - let data = if value.len() >= 176 { - Bytes::copy_from_slice(&value[176..]) - } else { - Default::default() - }; - - Ok(Self( - u64::from_be_bytes(value[0..8].try_into().unwrap()), - Transact { - rollupChainId: U256::from_le_slice(&value[8..40]), - sender: Address::from_slice(&value[40..60]), - to: Address::from_slice(&value[60..80]), - data, - value: U256::from_le_slice(&value[80..112]), - gas: U256::from_le_slice(&value[112..144]), - maxFeePerGas: U256::from_le_slice(&value[144..176]), - }, - )) - } -} - -/// Newtype for [`Passage::Enter`] that implements [`Compress`] and -/// [`Decompress`]. -/// -/// This is an implementation detail of the [`SignetEvents`] table, and should -/// not be used outside the DB module. -/// -/// The two fields are the index of the enter within the set of all -/// transacts within the block, and the enter itself. The index is used as a -/// subkey in the table. I.e. if this is the first enter in the block, the -/// index would be 0. -/// -/// [`Passage::Enter`]: signet_zenith::Passage::Enter -#[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize)] -pub struct DbEnter(pub u64, pub Enter); - -impl From for Enter { - fn from(enter: DbEnter) -> Self { - enter.1 - } -} - -// TODO: ENG-484 - Consider using CompactU256 -impl Compress for DbEnter { - type Compressed = as Compress>::Compressed; - - fn compress_to_buf>(&self, buf: &mut B) { - // BE here is important for the subkey - buf.put_slice(&self.0.to_be_bytes()); - buf.put_slice(&self.1.rollupChainId.to_le_bytes::<32>()); - buf.put_slice(self.1.rollupRecipient.as_ref()); - buf.put_slice(&self.1.amount.to_le_bytes::<32>()); - } -} - -impl Decompress for DbEnter { - fn decompress(value: &[u8]) -> Result { - if value.len() < 8 + 32 + 20 + 32 { - return Err(DatabaseError::Decode); - } - - Ok(Self( - u64::from_be_bytes(value[0..8].try_into().unwrap()), - Enter { - rollupChainId: U256::from_le_slice(&value[8..40]), - rollupRecipient: Address::from_slice(&value[40..60]), - amount: U256::from_le_slice(&value[60..92]), - }, - )) - } -} - -/// Newtype for [`Passage::EnterToken`] that implements [`Compress`] and -/// [`Decompress`]. -/// -/// This is an implementation detail of the [`SignetEvents`] table, and should -/// not be used outside the DB module. -/// -/// [`Passage::EnterToken`]: signet_zenith::Passage::EnterToken -#[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize)] -pub struct DbEnterToken(pub u64, pub EnterToken); - -impl From for EnterToken { - fn from(enter_token: DbEnterToken) -> Self { - enter_token.1 - } -} - -impl Compress for DbEnterToken { - type Compressed = as Compress>::Compressed; - - fn compress_to_buf>(&self, buf: &mut B) { - buf.put_slice(&self.0.to_be_bytes()); // 8 bytes // BE here is important for the subkey - buf.put_slice(&self.1.rollupChainId.to_le_bytes::<32>()); // 32 bytes - buf.put_slice(self.1.rollupRecipient.as_slice()); // 20 bytes - buf.put_slice(&self.1.amount.to_le_bytes::<32>()); // 32 bytes - buf.put_slice(self.1.token.as_slice()); // 20 bytes - } -} - -impl Decompress for DbEnterToken { - fn decompress(value: &[u8]) -> Result { - if value.len() < 8 + 32 + 20 + 32 + 20 { - return Err(DatabaseError::Decode); - } - - Ok(Self( - u64::from_be_bytes(value[0..8].try_into().unwrap()), - EnterToken { - rollupChainId: U256::from_le_slice(&value[8..40]), - rollupRecipient: Address::from_slice(&value[40..60]), - amount: U256::from_le_slice(&value[60..92]), - token: Address::from_slice(&value[92..112]), - }, - )) - } -} - -impl Compress for DbSignetEvent { - type Compressed = as Compress>::Compressed; - - fn compress_to_buf>(&self, buf: &mut B) { - match self { - Self::Transact(idx, transact) => { - buf.put_u8(FLAG_TRANSACT); - DbTransact(*idx, transact.clone()).compress_to_buf(buf); - } - Self::Enter(idx, enter) => { - buf.put_u8(FLAG_ENTER); - DbEnter(*idx, *enter).compress_to_buf(buf); - } - Self::EnterToken(idx, enter_token) => { - buf.put_u8(FLAG_ENTER_TOKEN); - DbEnterToken(*idx, *enter_token).compress_to_buf(buf); - } - } - } -} - -impl Decompress for DbSignetEvent { - fn decompress(value: &[u8]) -> Result { - let value = &mut &*value; - - if value.is_empty() { - return Err(DatabaseError::Decode); - } - - match value.get_u8() { - FLAG_TRANSACT => { - let transact = DbTransact::decompress(value)?; - Ok(Self::Transact(transact.0, transact.1)) - } - FLAG_ENTER => { - let enter = DbEnter::decompress(value)?; - Ok(Self::Enter(enter.0, enter.1)) - } - FLAG_ENTER_TOKEN => { - let enter_token = DbEnterToken::decompress(value)?; - Ok(Self::EnterToken(enter_token.0, enter_token.1)) - } - _ => Err(DatabaseError::Decode), - } - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn db_event_transact_roundtrip() { - let event = DbSignetEvent::Transact( - 32, - Transact { - rollupChainId: U256::from(1), - sender: Address::repeat_byte(1), - to: Address::repeat_byte(2), - data: Bytes::from(vec![1, 2, 3]), - value: U256::from(100), - gas: U256::from(200), - maxFeePerGas: U256::from(300), - }, - ); - - let buf = event.clone().compress(); - - let decompressed = DbSignetEvent::decompress(buf.as_slice()).unwrap(); - assert_eq!(event, decompressed); - } - - #[test] - fn db_event_enter_roundtrip() { - let event = DbSignetEvent::Enter( - 32, - Enter { - rollupChainId: U256::from(1), - rollupRecipient: Address::repeat_byte(1), - amount: U256::from(100), - }, - ); - - let buf = event.clone().compress(); - - let decompressed = DbSignetEvent::decompress(buf.as_slice()).unwrap(); - assert_eq!(event, decompressed); - } - - #[test] - fn db_event_enter_token_roundtrip() { - let event = DbSignetEvent::EnterToken( - 32, - EnterToken { - rollupChainId: U256::from(1), - rollupRecipient: Address::repeat_byte(1), - amount: U256::from(100), - token: Address::repeat_byte(2), - }, - ); - - let buf = event.clone().compress(); - - let decompressed = DbSignetEvent::decompress(buf.as_slice()).unwrap(); - assert_eq!(event, decompressed); - } -} diff --git a/crates/db/src/traits.rs b/crates/db/src/traits.rs deleted file mode 100644 index ccb85bf..0000000 --- a/crates/db/src/traits.rs +++ /dev/null @@ -1,271 +0,0 @@ -use crate::{DbExtractionResults, DbSignetEvent, RuChain, SignetDbRw}; -use alloy::primitives::BlockNumber; -use itertools::Itertools; -#[cfg(doc)] -use reth::providers::DatabaseProviderRW; -use reth::providers::{OriginalValuesKnown, ProviderResult}; -use reth_db::models::StoredBlockBodyIndices; -use signet_evm::BlockResult; -use signet_node_types::NodeTypesDbTrait; -use signet_types::primitives::RecoveredBlock; -use signet_zenith::{Passage, Transactor, Zenith}; -use std::{collections::BTreeMap, ops::RangeInclusive}; -use tracing::trace; - -/// Writer for [`Passage::Enter`] events. -#[auto_impl::auto_impl(&, Arc, Box)] -pub trait RuWriter { - /// Get the last block number - fn last_block_number(&self) -> ProviderResult; - - /// Store a zenith header in the DB - fn insert_signet_header( - &self, - header: Zenith::BlockHeader, - host_height: u64, - ) -> ProviderResult<()>; - - /// Get a Zenith header from the DB. - fn get_signet_header(&self, host_height: u64) -> ProviderResult>; - - /// Store a zenith block in the DB. - fn insert_signet_block( - &self, - header: Option, - block: &RecoveredBlock, - ) -> ProviderResult; - - /// Append a zenith block body to the DB. - fn append_signet_block_body(&self, body: (BlockNumber, &RecoveredBlock)) -> ProviderResult<()>; - - /// Get zenith headers from the DB. - fn get_signet_headers( - &self, - range: RangeInclusive, - ) -> ProviderResult>; - - /// Take zenith headers from the DB. - fn take_signet_headers_above( - &self, - target: BlockNumber, - ) -> ProviderResult>; - - /// Remove [`Zenith::BlockHeader`] objects above the specified height from the DB. - fn remove_signet_headers_above(&self, target: BlockNumber) -> ProviderResult<()>; - - /// Store an enter event in the DB. - fn insert_enter(&self, height: u64, index: u64, exit: Passage::Enter) -> ProviderResult<()>; - - /// Get enters from the DB. - fn get_enters( - &self, - range: RangeInclusive, - ) -> ProviderResult> { - Ok(self - .get_signet_events(range)? - .into_iter() - .filter_map(|(height, events)| { - if let DbSignetEvent::Enter(_, enter) = events { - Some((height, enter)) - } else { - None - } - }) - .collect()) - } - - /// Store a transaction event in the DB. - fn insert_transact( - &self, - height: u64, - index: u64, - transact: &Transactor::Transact, - ) -> ProviderResult<()>; - - /// Get [`Transactor::Transact`] from the DB. - fn get_transacts( - &self, - range: RangeInclusive, - ) -> ProviderResult> { - Ok(self - .get_signet_events(range)? - .into_iter() - .filter_map(|(height, events)| { - if let DbSignetEvent::Transact(_, transact) = events { - Some((height, transact)) - } else { - None - } - }) - .collect()) - } - - /// Insert [`Passage::EnterToken`] into the DB. - fn insert_enter_token( - &self, - height: u64, - index: u64, - enter_token: Passage::EnterToken, - ) -> ProviderResult<()>; - - /// Get [`Passage::EnterToken`] from the DB. - fn get_enter_tokens( - &self, - range: RangeInclusive, - ) -> ProviderResult> { - Ok(self - .get_signet_events(range)? - .into_iter() - .filter_map(|(height, events)| { - if let DbSignetEvent::EnterToken(_, enter) = events { - Some((height, enter)) - } else { - None - } - }) - .collect()) - } - - /// Get [`Passage::EnterToken`], [`Passage::Enter`] and - /// [`Transactor::Transact`] events. - fn get_signet_events( - &self, - range: RangeInclusive, - ) -> ProviderResult>; - - /// Take [`Passage::EnterToken`]s from the DB. - fn take_signet_events_above( - &self, - target: BlockNumber, - ) -> ProviderResult>; - - /// Remove [`Passage::EnterToken`], [`Passage::Enter`] and - /// [`Transactor::Transact`] events above the specified height from the DB. - fn remove_signet_events_above(&self, target: BlockNumber) -> ProviderResult<()>; - - /// Get extraction results from the DB. - fn get_extraction_results( - &self, - range: RangeInclusive, - ) -> ProviderResult> { - let mut signet_events = self.get_signet_events(range.clone())?.into_iter().peekable(); - let mut headers = self.get_signet_headers(range.clone())?.into_iter().peekable(); - - // For each of these, it is permissible to have no entries. If there is - // no data. The`DbExtractionResults` struct will contain a `None` - // header, or an empty vector for the other fields. - let mut items = BTreeMap::new(); - for working_height in range.clone() { - let mut enters = vec![]; - let mut transacts = vec![]; - let mut enter_tokens = vec![]; - - for (_, event) in - signet_events.peeking_take_while(|(height, _)| *height == working_height) - { - match event { - DbSignetEvent::Enter(_, enter) => enters.push(enter), - DbSignetEvent::Transact(_, transact) => transacts.push(transact), - DbSignetEvent::EnterToken(_, enter_token) => enter_tokens.push(enter_token), - } - } - - let header = headers - .peeking_take_while(|(height, _)| *height == working_height) - .map(|(_, header)| header) - .next(); - - items.insert( - working_height, - DbExtractionResults { header, enters, transacts, enter_tokens }, - ); - } - - Ok(items) - } - - /// Take extraction results from the DB. - fn take_extraction_results_above( - &self, - target: BlockNumber, - ) -> ProviderResult> { - let range = target..=(1 + self.last_block_number()?); - - let items = self.get_extraction_results(range)?; - trace!(count = items.len(), "got extraction results"); - self.remove_extraction_results_above(target)?; - trace!("removed extraction results"); - Ok(items) - } - - /// Remove extraction results from the DB. - /// - /// This will remove the following: - /// - [`Zenith::BlockHeader`] objects - /// - [`Passage::Enter`] events - /// - [`Transactor::Transact`] events - /// - [`Passage::EnterToken`] events - fn remove_extraction_results_above(&self, target: BlockNumber) -> ProviderResult<()> { - self.remove_signet_headers_above(target)?; - self.remove_signet_events_above(target)?; - Ok(()) - } - - /// Add the output of a host block to the DB. - fn append_host_block( - &self, - header: Option, - transacts: impl IntoIterator, - enters: impl IntoIterator, - enter_tokens: impl IntoIterator, - block_result: &BlockResult, - ) -> ProviderResult<()>; - - /// Take the block and execution range from the DB, reverting the blocks - /// and returning the removed information - fn ru_take_blocks_and_execution_above(&self, target: BlockNumber) -> ProviderResult; - - /// Remove the block and execution range from the DB. - fn ru_remove_blocks_and_execution_above(&self, target: BlockNumber) -> ProviderResult<()>; - - /// Write the state of the rollup to the database. - /// - /// This should be identical to [`StateWriter::write_state`], but using a - /// [`signet_evm::ExecutionOutcome`]. - /// - /// [`StateWriter::write_state`]: reth::providers::StateWriter::write_state - fn ru_write_state( - &self, - execution_outcome: &signet_evm::ExecutionOutcome, - is_value_known: OriginalValuesKnown, - ) -> ProviderResult<()>; -} - -/// Extend the [`DatabaseProviderRW`] with a guarded commit function. -pub trait DbProviderExt: Into> -where - Db: NodeTypesDbTrait, -{ - /// Update the database. The function `f` is called with a mutable - /// reference to the database. If the function returns an error, the - /// transaction is rolled back. - fn update( - self, - f: impl FnOnce(&mut SignetDbRw) -> ProviderResult<()>, - ) -> ProviderResult<()>; -} - -impl DbProviderExt for T -where - Db: NodeTypesDbTrait, - T: Into>, -{ - fn update( - self, - f: impl FnOnce(&mut SignetDbRw) -> ProviderResult<()>, - ) -> ProviderResult<()> { - let mut this = self.into(); - f(&mut this)?; - this.commit().map(drop) - } -} diff --git a/crates/db/tests/common/mod.rs b/crates/db/tests/common/mod.rs deleted file mode 100644 index d230e90..0000000 --- a/crates/db/tests/common/mod.rs +++ /dev/null @@ -1,39 +0,0 @@ -use alloy::genesis::Genesis; -use reth::{ - chainspec::ChainSpec, - providers::{ - ProviderFactory, - providers::{RocksDBProvider, StaticFileProvider}, - }, -}; -use reth_db::test_utils::{ - create_test_rocksdb_dir, create_test_rw_db, create_test_static_files_dir, -}; -use reth_exex_test_utils::TmpDB as TmpDb; -use signet_node_types::SignetNodeTypes; -use std::sync::{Arc, OnceLock}; - -static GENESIS_JSON: &str = include_str!("../../../../tests/artifacts/local.genesis.json"); - -static SPEC: OnceLock> = OnceLock::new(); - -/// Returns a chain spec for tests. -pub fn chain_spec() -> Arc { - SPEC.get_or_init(|| { - let genesis: Genesis = serde_json::from_str(GENESIS_JSON).expect("valid genesis json"); - Arc::new(genesis.into()) - }) - .clone() -} - -/// Create a provider factory with a chain spec -pub fn create_test_provider_factory() -> ProviderFactory> { - let db = create_test_rw_db(); - let (static_dir, _) = create_test_static_files_dir(); - let (rocksdb_dir, _) = create_test_rocksdb_dir(); - - let sfp = StaticFileProvider::read_write(static_dir.keep()).expect("static file provider"); - let rocks_db = RocksDBProvider::builder(rocksdb_dir.keep()).build().unwrap(); - - ProviderFactory::new(db, chain_spec(), sfp, rocks_db).expect("provider factory") -} diff --git a/crates/db/tests/db.rs b/crates/db/tests/db.rs deleted file mode 100644 index b377d08..0000000 --- a/crates/db/tests/db.rs +++ /dev/null @@ -1,167 +0,0 @@ -#[path = "./common/mod.rs"] -mod test_common; - -use alloy::{ - consensus::{BlockBody, BlockHeader, Signed, TxEip1559, TxEnvelope}, - primitives::{Address, B256, U256}, - signers::Signature, -}; -use reth::providers::{BlockNumReader, BlockReader}; -use signet_constants::test_utils::{DEPLOY_HEIGHT, RU_CHAIN_ID}; -use signet_db::RuWriter; -use signet_types::primitives::{RecoveredBlock, SealedBlock, SealedHeader, TransactionSigned}; -use signet_zenith::Zenith; - -#[test] -fn test_ru_writer() { - let factory = test_common::create_test_provider_factory(); - - let writer = factory.provider_rw().unwrap(); - - dbg!(writer.last_block_number().unwrap()); -} - -#[test] -fn test_insert_signet_block() { - let factory = test_common::create_test_provider_factory(); - let writer = factory.provider_rw().unwrap(); - - let header = Some(Zenith::BlockHeader { - rollupChainId: U256::from(RU_CHAIN_ID), - hostBlockNumber: U256::from(DEPLOY_HEIGHT), - gasLimit: U256::from(30_000_000), - rewardAddress: Address::repeat_byte(0x11), - blockDataHash: B256::repeat_byte(0x22), - }); - - let block = RecoveredBlock { - block: SealedBlock { - header: SealedHeader::new(alloy::consensus::Header::default()), - body: BlockBody { - transactions: std::iter::repeat_n( - TxEnvelope::Eip1559(Signed::new_unhashed( - TxEip1559::default(), - Signature::test_signature(), - )) - .into(), - 10, - ) - .collect(), - ommers: vec![], - withdrawals: None, - }, - }, - senders: std::iter::repeat_n(Address::repeat_byte(0x33), 10).collect(), - }; - - writer.insert_signet_block(header, &block).unwrap(); - writer.commit().unwrap(); - - let reader = factory.provider_rw().unwrap(); - - // Check basic updates - assert_eq!(reader.last_block_number().unwrap(), block.number()); - // This tests resolving `BlockId::Latest` - assert_eq!(reader.best_block_number().unwrap(), block.number()); - - // Check that the block can be loaded back - let loaded_block = reader - .recovered_block_range(block.number()..=block.number()) - .unwrap() - .first() - .cloned() - .unwrap(); - assert_eq!(loaded_block.header(), block.block.header.header()); - assert_eq!(loaded_block.body().transactions.len(), block.block.body.transactions.len()); - - // Check that the ZenithHeader can be loaded back - let loaded_header = reader.get_signet_header(block.number()).unwrap(); - assert_eq!(loaded_header, header); -} - -#[test] -fn test_transaction_hash_indexing() { - use reth::providers::TransactionsProvider; - use reth_db::{cursor::DbCursorRO, tables, transaction::DbTx}; - - let factory = test_common::create_test_provider_factory(); - let writer = factory.provider_rw().unwrap(); - - let header = Some(Zenith::BlockHeader { - rollupChainId: U256::from(RU_CHAIN_ID), - hostBlockNumber: U256::from(DEPLOY_HEIGHT), - gasLimit: U256::from(30_000_000), - rewardAddress: Address::repeat_byte(0x11), - blockDataHash: B256::repeat_byte(0x22), - }); - - // Create transactions with distinct content so they have different hashes - let transactions: Vec = (0..5u64) - .map(|i| { - let tx = TxEip1559 { nonce: i, ..Default::default() }; - TxEnvelope::Eip1559(Signed::new_unhashed(tx, Signature::test_signature())).into() - }) - .collect(); - - // Collect the expected hashes BEFORE inserting - let expected_hashes: Vec = - transactions.iter().map(|tx: &TransactionSigned| *tx.hash()).collect(); - - let block = RecoveredBlock { - block: SealedBlock { - header: SealedHeader::new(alloy::consensus::Header::default()), - body: BlockBody { transactions, ommers: vec![], withdrawals: None }, - }, - senders: std::iter::repeat_n(Address::repeat_byte(0x33), 5).collect(), - }; - - writer.insert_signet_block(header, &block).unwrap(); - writer.commit().unwrap(); - - let reader = factory.provider_rw().unwrap(); - - // Verify each transaction hash is in the index - for (idx, expected_hash) in expected_hashes.iter().enumerate() { - // Method 1: Use provider's transaction_by_hash - let tx_result = reader.transaction_by_hash(*expected_hash).unwrap(); - assert!( - tx_result.is_some(), - "transaction_by_hash failed for tx {} with hash {}", - idx, - expected_hash - ); - - // Method 2: Query TransactionHashNumbers directly - let mut cursor = reader.tx_ref().cursor_read::().unwrap(); - let index_result = cursor.seek_exact(*expected_hash).unwrap(); - assert!( - index_result.is_some(), - "TransactionHashNumbers entry missing for tx {} with hash {}", - idx, - expected_hash - ); - - let (hash, tx_num) = index_result.unwrap(); - assert_eq!(hash, *expected_hash, "Hash mismatch in index for tx {}", idx); - assert_eq!(tx_num, idx as u64, "Unexpected tx_num for tx {}", idx); - } - - // Verify hashes match when loading block back from storage - let loaded_block = reader - .recovered_block_range(block.number()..=block.number()) - .unwrap() - .first() - .cloned() - .unwrap(); - - for (idx, (original_hash, loaded_tx)) in - expected_hashes.iter().zip(loaded_block.body().transactions.iter()).enumerate() - { - let loaded_hash = *loaded_tx.hash(); - assert_eq!( - *original_hash, loaded_hash, - "Hash mismatch after load for tx {}: original={}, loaded={}", - idx, original_hash, loaded_hash - ); - } -} diff --git a/crates/node-config/Cargo.toml b/crates/node-config/Cargo.toml index 5774f5e..1db70db 100644 --- a/crates/node-config/Cargo.toml +++ b/crates/node-config/Cargo.toml @@ -18,8 +18,6 @@ reth.workspace = true reth-chainspec.workspace = true reth-exex.workspace = true reth-node-api.workspace = true -reth-db = { workspace = true, optional = true} - alloy.workspace = true eyre.workspace = true @@ -30,5 +28,7 @@ trevm.workspace = true signet-genesis.workspace = true signet-block-processor.workspace = true +tempfile = { workspace = true, optional = true } + [features] -test_utils = ["dep:reth-db", "reth-db/test-utils"] +test_utils = ["dep:tempfile"] diff --git a/crates/node-config/src/test_utils.rs b/crates/node-config/src/test_utils.rs index b5d6b67..b5ca8b7 100644 --- a/crates/node-config/src/test_utils.rs +++ b/crates/node-config/src/test_utils.rs @@ -1,14 +1,14 @@ use crate::SignetNodeConfig; use init4_bin_base::utils::calc::SlotCalculator; -use reth_db::test_utils::tempdir_path; use signet_blobber::BlobFetcherConfig; use signet_genesis::GenesisSpec; use signet_types::constants::KnownChains; use std::borrow::Cow; +use tempfile::tempdir; /// Make a test config pub fn test_config() -> SignetNodeConfig { - let mut tempdir = tempdir_path(); + let mut tempdir = tempdir().unwrap().keep(); tempdir.push("signet.ipc"); // Make a new test config with the IPC endpoint set to the tempdir. diff --git a/crates/node-tests/Cargo.toml b/crates/node-tests/Cargo.toml index 0577309..783cab4 100644 --- a/crates/node-tests/Cargo.toml +++ b/crates/node-tests/Cargo.toml @@ -11,11 +11,13 @@ repository.workspace = true [dependencies] signet-node.workspace = true signet-node-config = { workspace = true, features = ["test_utils"] } -signet-node-types.workspace = true -signet-db.workspace = true +signet-cold = { workspace = true, features = ["in-memory"] } signet-constants.workspace = true signet-evm.workspace = true +signet-hot = { workspace = true, features = ["in-memory"] } +signet-storage.workspace = true +signet-storage-types.workspace = true signet-test-utils.workspace = true signet-types.workspace = true signet-zenith.workspace = true @@ -23,15 +25,14 @@ signet-zenith.workspace = true alloy.workspace = true reth.workspace = true -reth-db.workspace = true reth-exex.workspace = true reth-exex-test-utils.workspace = true reth-node-api.workspace = true eyre.workspace = true +tokio.workspace = true tracing.workspace = true tracing-subscriber.workspace = true -tokio.workspace = true [dev-dependencies] signet-genesis.workspace = true diff --git a/crates/node-tests/src/context.rs b/crates/node-tests/src/context.rs index b9da811..6c4538d 100644 --- a/crates/node-tests/src/context.rs +++ b/crates/node-tests/src/context.rs @@ -2,31 +2,29 @@ use crate::{ HostBlockSpec, NotificationSpec, NotificationWithSidecars, RuBlockSpec, convert::ToRethPrimitive, types::{CtxProvider, Log, TestCounterInstance, TestErc20Instance, TestLogInstance}, - utils::create_test_provider_factory_with_chain_spec, }; use alloy::{ consensus::{BlockHeader, TxEnvelope, constants::ETH_TO_WEI}, genesis::{Genesis, GenesisAccount}, network::{Ethereum, EthereumWallet, TransactionBuilder as _}, - primitives::{Address, I256, Sign, U256, keccak256, map::HashSet}, + primitives::{Address, B256, I256, Sign, U256, keccak256, map::HashSet}, providers::{ Provider as _, ProviderBuilder, SendableTx, fillers::{BlobGasFiller, SimpleNonceManager}, }, rpc::types::eth::{TransactionReceipt, TransactionRequest}, }; -use reth::{ - primitives::Account, - providers::{AccountReader, BlockNumReader, ProviderFactory}, - transaction_pool::{TransactionOrigin, TransactionPool, test_utils::MockTransaction}, -}; -use reth_db::{PlainAccountState, transaction::DbTxMut}; -use reth_exex_test_utils::{Adapter, TestExExHandle, TmpDB as TmpDb}; +use reth::transaction_pool::{TransactionOrigin, TransactionPool, test_utils::MockTransaction}; +use reth_exex_test_utils::{Adapter, TestExExHandle}; use reth_node_api::FullNodeComponents; -use signet_db::DbProviderExt; +use signet_cold::{BlockData, ColdStorageReadHandle, mem::MemColdBackend}; +use signet_hot::{db::UnsafeDbWrite, mem::MemKv}; use signet_node::{NodeStatus, SignetNodeBuilder}; use signet_node_config::test_utils::test_config; -use signet_node_types::SignetNodeTypes; +use signet_storage::{CancellationToken, HistoryRead, HistoryWrite, HotKv, UnifiedStorage}; +use signet_storage_types::{ + Account, BlockNumberList, DbSignetEvent, EthereumHardfork, RecoveredTx, SealedHeader, +}; use signet_test_utils::contracts::counter::COUNTER_DEPLOY_CODE; use signet_types::constants::{HostPermitted, RollupPermitted, SignetSystemConstants}; use signet_zenith::{HostOrders::OrdersInstance, RollupPassage::RollupPassageInstance}; @@ -46,7 +44,7 @@ use tracing::instrument; /// instance. /// - The components for the Signet Node instance /// - A receiver for the node status (latest block processed) -/// - A DB provider factory +/// - Unified storage backed by in-memory hot and cold storage /// - An alloy provider connected to the Signet Node RPC, /// - Configured with standard fillers /// - A height, used to fill in block numbers for host block notifications @@ -62,8 +60,8 @@ pub struct SignetTestContext { /// The Signet Node status receiver pub node_status: watch::Receiver, - /// The provider factory for the Signet Node instance - pub factory: ProviderFactory>, + /// Unified hot + cold storage for the rollup. + pub storage: Arc>, /// An alloy provider connected to the Signet Node RPC. pub alloy_provider: CtxProvider, @@ -80,6 +78,9 @@ pub struct SignetTestContext { /// Test addresses, copied from [`signet_test_utils::users::TEST_USERS`] for /// convenience pub addresses: [Address; 10], + + /// Cancellation token for the cold storage task. + cancel_token: CancellationToken, } impl core::fmt::Debug for SignetTestContext { @@ -88,6 +89,12 @@ impl core::fmt::Debug for SignetTestContext { } } +impl Drop for SignetTestContext { + fn drop(&mut self) { + self.cancel_token.cancel(); + } +} + impl SignetTestContext { /// Make a new test env #[instrument] @@ -96,17 +103,81 @@ impl SignetTestContext { let (ctx, handle) = reth_exex_test_utils::test_exex_context().await.unwrap(); let components = ctx.components.clone(); - // set up Signet Node db + // set up Signet Node storage let constants = cfg.constants().unwrap(); - let chain_spec: Arc<_> = cfg.chain_spec().clone(); - let factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + let cancel_token = CancellationToken::new(); + let hot = MemKv::new(); + + // Load genesis into hot storage + { + let writer = hot.writer().unwrap(); + writer.load_genesis(cfg.genesis(), &EthereumHardfork::Paris).unwrap(); + writer.commit().unwrap(); + } + + // Patch genesis header to include base_fee_per_gas from the genesis + // config. `load_genesis()` doesn't copy this field to the stored + // header, which breaks EIP-1559 gas estimation via the RPC. + if let Some(base_fee) = cfg.genesis().base_fee_per_gas { + let sealed = { + let reader = hot.reader().unwrap(); + signet_hot::db::HotDbRead::get_header(&reader, 0).unwrap().unwrap() + }; + let mut header = sealed.into_inner(); + header.base_fee_per_gas = Some(base_fee as u64); + let writer = hot.writer().unwrap(); + writer.put_header(&SealedHeader::new(header)).unwrap(); + writer.commit().unwrap(); + } + + // set up some keys and addresses + let keys = &signet_test_utils::users::TEST_SIGNERS; + let addresses = *signet_test_utils::users::TEST_USERS; + + // Mint balances for test addresses + let mint_amnt = U256::from(1_000) * U256::from(ETH_TO_WEI); + { + // Read existing accounts before acquiring write lock + let existing_accounts: Vec<_> = { + let reader = hot.reader().unwrap(); + addresses + .iter() + .map(|addr| { + signet_hot::db::HotDbRead::get_account(&reader, addr) + .unwrap() + .unwrap_or_default() + }) + .collect() + }; + let writer = hot.writer().unwrap(); + for (address, existing) in addresses.iter().zip(existing_accounts) { + let updated = + Account { balance: existing.balance.saturating_add(mint_amnt), ..existing }; + writer.put_account(address, &updated).unwrap(); + } + writer.commit().unwrap(); + } + + // Create UnifiedStorage + let storage = + Arc::new(UnifiedStorage::spawn(hot, MemColdBackend::new(), cancel_token.clone())); + + // Write genesis block to cold storage so fee_history and other + // cold-backed RPC endpoints can find block 0. + { + let reader = storage.reader().unwrap(); + let genesis_header = + signet_hot::db::HotDbRead::get_header(&reader, 0).unwrap().unwrap(); + let genesis_block = BlockData::new(genesis_header, vec![], vec![], vec![], None); + storage.cold().append_block(genesis_block).await.unwrap(); + } let alias_oracle: Arc>> = Arc::new(Mutex::new(HashSet::default())); let (node, mut node_status) = SignetNodeBuilder::new(cfg.clone()) .with_ctx(ctx) - .with_factory(factory.clone()) + .with_storage(Arc::clone(&storage)) .with_alias_oracle(Arc::clone(&alias_oracle)) .build() .unwrap(); @@ -115,30 +186,12 @@ impl SignetTestContext { let node = tokio::spawn(node.start()); node_status.changed().await.unwrap(); - // set up some keys and addresses - let keys = &signet_test_utils::users::TEST_SIGNERS; - let addresses = *signet_test_utils::users::TEST_USERS; - - // register the signers on the alloy proider + // register the signers on the alloy provider let mut wallet = EthereumWallet::new(keys[0].clone()); for key in keys.iter().skip(1) { wallet.register_signer(key.clone()); } - let mint_amnt = U256::from(1_000) * U256::from(ETH_TO_WEI); - factory - .provider_rw() - .unwrap() - .update(|rw| { - for address in addresses.into_iter() { - let mut account = rw.basic_account(&address)?.unwrap_or_default(); - account.balance = account.balance.saturating_add(mint_amnt); - rw.tx_ref().put::(address, account)?; - } - Ok(()) - }) - .unwrap(); - // after RPC booted, we can create the alloy provider let alloy_provider = ProviderBuilder::new_with_network() .disable_recommended_fillers() @@ -155,13 +208,13 @@ impl SignetTestContext { handle, components, node_status, - factory, + storage, alloy_provider, constants, height: AtomicU64::new(cfg.constants().unwrap().host_deploy_height()), - alias_oracle, addresses, + cancel_token, }; (this, node) @@ -193,6 +246,58 @@ impl SignetTestContext { self.constants.clone() } + /// Get a cold storage read handle. + pub fn cold(&self) -> ColdStorageReadHandle { + self.storage.cold_reader() + } + + /// Get a header by block number from hot storage. + pub fn header_by_number(&self, number: u64) -> Option { + let reader = self.storage.reader().unwrap(); + signet_hot::db::HotDbRead::get_header(&reader, number).unwrap() + } + + /// Get the last block number from hot storage. + pub fn last_block_number(&self) -> u64 { + let reader = self.storage.reader().unwrap(); + HistoryRead::last_block_number(&reader).unwrap().unwrap_or(0) + } + + /// Get all transactions in a block from cold storage. + pub async fn transactions_in_block( + &self, + block: u64, + ) -> Vec { + self.cold().get_transactions_in_block(block).await.unwrap() + } + + /// Get signet events in a single block from cold storage. + pub async fn signet_events_in_block(&self, block: u64) -> Vec { + self.cold().get_signet_events_in_block(block).await.unwrap() + } + + /// Look up a transaction by hash from cold storage. + pub async fn transaction_by_hash(&self, hash: B256) -> Option { + self.cold().get_tx_by_hash(hash).await.unwrap().map(|c| c.into_inner()) + } + + /// Get the account history (block number list) for an address. + pub fn account_history(&self, address: Address) -> Option { + let reader = self.storage.reader().unwrap(); + HistoryRead::last_account_history(&reader, address).unwrap().map(|(_, list)| list) + } + + /// Get an account's state at a specific block height. + /// + /// Returns `None` if the account did not exist at the given height + /// (i.e. balance, nonce, and bytecode are all zero/empty). + pub fn account_at_height(&self, address: Address, height: u64) -> Option { + let reader = self.storage.reader().unwrap(); + HistoryRead::get_account_at_height(&reader, &address, Some(height)) + .unwrap() + .filter(|a| !a.is_empty()) + } + /// Send a notification to the Signet Node instance pub async fn send_notification(&self, notification: NotificationWithSidecars) { let pool = self.components.pool(); @@ -236,6 +341,21 @@ impl SignetTestContext { self.send_notification(notification).await; recv.changed().await?; + // Wait for cold storage to finish processing dispatched blocks. + // `append_blocks()` dispatches to cold asynchronously, so we + // poll until cold storage has the expected block. + let cold = self.storage.cold_reader(); + tokio::time::timeout(std::time::Duration::from_secs(30), async { + loop { + match cold.get_latest_block().await { + Ok(Some(latest)) if latest >= expected_height => break, + _ => tokio::task::yield_now().await, + } + } + }) + .await + .expect("cold storage did not reach expected height within 30s"); + // cheeky little check that the RPC is correct :) assert_eq!(self.alloy_provider.get_block_number().await.unwrap(), expected_height); @@ -286,7 +406,8 @@ impl SignetTestContext { /// Get the account for an address. pub fn account(&self, address: Address) -> Option { - self.factory.provider().unwrap().basic_account(&address).unwrap() + let reader = self.storage.reader().unwrap(); + signet_hot::db::HotDbRead::get_account(&reader, &address).unwrap() } /// Get the nonce off an addresss. @@ -381,7 +502,7 @@ impl SignetTestContext { /// assertions if run on the genesis block. If any other blocks have been /// processed it will do nothing. pub fn verify_allocs(&self, genesis: &Genesis) { - if self.factory.provider().unwrap().last_block_number().unwrap() != 0 { + if self.last_block_number() != 0 { return; } @@ -403,10 +524,12 @@ impl SignetTestContext { } if let Some(ref storage) = alloc.storage { + let reader = self.storage.reader().unwrap(); for (key, value) in storage { + let slot = U256::from_be_bytes(key.0); assert_eq!( - self.factory.latest().unwrap().storage(address, *key).unwrap(), - Some((*value).into()) + signet_hot::db::HotDbRead::get_storage(&reader, &address, &slot).unwrap(), + Some(U256::from_be_bytes(value.0)) ); } } diff --git a/crates/node-tests/src/lib.rs b/crates/node-tests/src/lib.rs index 80ecf4d..10a13f8 100644 --- a/crates/node-tests/src/lib.rs +++ b/crates/node-tests/src/lib.rs @@ -32,7 +32,7 @@ pub mod types; pub mod utils; pub use utils::run_test; -pub use reth_exex_test_utils::{Adapter, TestExExContext, TmpDB as TmpDb}; +pub use reth_exex_test_utils::{Adapter, TestExExContext}; pub use signet_test_utils::specs::{ HostBlockSpec, NotificationSpec, NotificationWithSidecars, RuBlockSpec, }; diff --git a/crates/node-tests/src/utils.rs b/crates/node-tests/src/utils.rs index 6d40508..f4e68e5 100644 --- a/crates/node-tests/src/utils.rs +++ b/crates/node-tests/src/utils.rs @@ -5,19 +5,7 @@ use alloy::{ signers::{SignerSync, local::PrivateKeySigner}, uint, }; -use reth::{ - chainspec::ChainSpec, - primitives::{Block, BlockBody, Header, RecoveredBlock, Transaction, TransactionSigned}, - providers::{ - ProviderFactory, - providers::{RocksDBProvider, StaticFileProvider}, - }, -}; -use reth_db::test_utils::{ - create_test_rocksdb_dir, create_test_rw_db, create_test_static_files_dir, -}; -use reth_exex_test_utils::TmpDB; -use signet_node_types::SignetNodeTypes; +use reth::primitives::{Block, BlockBody, Header, RecoveredBlock, Transaction, TransactionSigned}; use signet_zenith::Zenith; use std::{panic, sync::Once}; use tracing_subscriber::EnvFilter; @@ -127,18 +115,3 @@ pub fn adjust_usd_decimals_u256(amount: U256, decimals: u8) -> U256 { pub fn adjust_usd_decimals(amount: usize, decimals: u8) -> U256 { adjust_usd_decimals_u256(U256::from(amount), decimals) } - -/// Create a provider factory with a chain spec -pub fn create_test_provider_factory_with_chain_spec( - chain_spec: std::sync::Arc, -) -> ProviderFactory> { - let (static_dir, _) = create_test_static_files_dir(); - let (rocks, _) = create_test_rocksdb_dir(); - - let db = create_test_rw_db(); - let sfp = StaticFileProvider::read_write(static_dir.keep()).expect("static file provider"); - - let rocks = RocksDBProvider::builder(rocks.keep()).build().expect("rocksdb provider"); - - ProviderFactory::new(db, chain_spec, sfp, rocks).unwrap() -} diff --git a/crates/node-tests/tests/db.rs b/crates/node-tests/tests/db.rs index 84a527b..1a54d01 100644 --- a/crates/node-tests/tests/db.rs +++ b/crates/node-tests/tests/db.rs @@ -1,9 +1,10 @@ -use alloy::primitives::hex; -use reth::providers::BlockReader; use serial_test::serial; +use signet_cold::mem::MemColdBackend; +use signet_hot::{db::UnsafeDbWrite, mem::MemKv}; use signet_node::SignetNodeBuilder; use signet_node_config::test_utils::test_config; -use signet_node_tests::utils::create_test_provider_factory_with_chain_spec; +use signet_storage::{CancellationToken, HistoryRead, HistoryWrite, HotKv, UnifiedStorage}; +use signet_storage_types::EthereumHardfork; use std::sync::Arc; #[serial] @@ -16,15 +17,29 @@ async fn test_genesis() { let chain_spec: Arc<_> = cfg.chain_spec().clone(); assert_eq!(chain_spec.genesis().config.chain_id, consts.unwrap().ru_chain_id()); - let factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + let cancel_token = CancellationToken::new(); + let hot = MemKv::new(); + { + let writer = hot.writer().unwrap(); + writer.load_genesis(cfg.genesis(), &EthereumHardfork::Paris).unwrap(); + writer.commit().unwrap(); + } + + let storage = Arc::new(UnifiedStorage::spawn(hot, MemColdBackend::new(), cancel_token.clone())); + let (_, _) = SignetNodeBuilder::new(cfg.clone()) .with_ctx(ctx) - .with_factory(factory.clone()) + .with_storage(Arc::clone(&storage)) .build() .unwrap(); - let genesis_block = factory.provider().unwrap().block_by_number(0).unwrap().unwrap(); + let reader = storage.reader().unwrap(); + assert!(HistoryRead::has_block(&reader, 0).unwrap()); + + let header = + signet_hot::db::HotDbRead::get_header(&reader, 0).unwrap().expect("missing genesis header"); + let zero_hash = alloy::primitives::B256::ZERO; + assert_eq!(header.parent_hash, zero_hash); - let want_hash = hex!("0x0000000000000000000000000000000000000000000000000000000000000000"); - assert_eq!(genesis_block.parent_hash, want_hash); + cancel_token.cancel(); } diff --git a/crates/node-tests/tests/host_events.rs b/crates/node-tests/tests/host_events.rs index c44b4d9..e58e3f2 100644 --- a/crates/node-tests/tests/host_events.rs +++ b/crates/node-tests/tests/host_events.rs @@ -9,9 +9,7 @@ use alloy::{ providers::Provider, sol_types::SolCall, }; -use reth::providers::{BlockNumReader, DBProvider, HeaderProvider}; use serial_test::serial; -use signet_db::{DbSignetEvent, SignetEvents}; use signet_node_tests::{ HostBlockSpec, SignetTestContext, constants::{DEFAULT_REWARD_ADDRESS, TEST_CONSTANTS}, @@ -19,6 +17,7 @@ use signet_node_tests::{ types::{Counter, TestCounterInstance}, utils::{adjust_usd_decimals, adjust_usd_decimals_u256}, }; +use signet_storage_types::DbSignetEvent; use signet_test_utils::{chain::USDC_RECORD, contracts::counter::COUNTER_BYTECODE}; use signet_types::{ constants::{HostPermitted, RollupPermitted}, @@ -337,8 +336,8 @@ async fn test_transact_underfunded_gas() { assert_eq!(contract.count().call().await.unwrap(), U256::ZERO); // check signet events for the recorded transact and that the gas equals tiny_gas - let last = ctx.factory.provider().unwrap().last_block_number().unwrap(); - let events = ctx.factory.provider().unwrap().get::(last..last + 1).unwrap(); + let last = ctx.last_block_number(); + let events = ctx.signet_events_in_block(last).await; // Check that the block has no transactions, i.e. that the transact was // discarded @@ -350,7 +349,7 @@ async fn test_transact_underfunded_gas() { .unwrap(); assert!(last_block.transactions.is_empty()); - let found = events.iter().find(|(_, ev)| match ev { + let found = events.iter().find(|ev| match ev { DbSignetEvent::Transact(_, Transactor::Transact { sender, to, gas, .. }) => { *sender == user && *to == contract_addr && *gas == U256::from(tiny_gas) } @@ -402,14 +401,8 @@ async fn test_signet_events() { ctx.process_block(block.clone()).await.unwrap(); // Check the base fee - let base_fee = ctx - .factory - .header_by_number(1) - .unwrap() - .unwrap() - .base_fee_per_gas - .map(U256::from) - .unwrap(); + let header_1 = ctx.header_by_number(1).unwrap(); + let base_fee = U256::from(header_1.base_fee_per_gas.unwrap()); // NB: // user_a should have received 1 USD, @@ -427,51 +420,21 @@ async fn test_signet_events() { ctx.process_block(block).await.unwrap(); // Check the base fee - let base_fee = ctx - .factory - .header_by_number(2) - .unwrap() - .unwrap() - .base_fee_per_gas - .map(U256::from) - .unwrap(); + let header_2 = ctx.header_by_number(2).unwrap(); + let base_fee = U256::from(header_2.base_fee_per_gas.unwrap()); // This time works exactly the same as above. user_a_bal.assert_decrease_exact(base_fee * U256::from(100_000)); user_b_bal.assert_increase_exact(U256::from(expected_usd_minted)); - let events = ctx.factory.provider().unwrap().get::(1..3).unwrap(); - assert_eq!(events.len(), 8); - - // The tuple is (block_number, event). - // We expect 4 events per block - assert_eq!(events[0].0, 1); - assert_eq!(events[1].0, 1); - assert_eq!(events[2].0, 1); - assert_eq!(events[3].0, 1); - assert_eq!(events[4].0, 2); - assert_eq!(events[5].0, 2); - assert_eq!(events[6].0, 2); - assert_eq!(events[7].0, 2); + let events_1 = ctx.signet_events_in_block(1).await; + let events_2 = ctx.signet_events_in_block(2).await; + assert_eq!(events_1.len(), 4); + assert_eq!(events_2.len(), 4); + // Events are in log_index order assert_eq!( - events[0].1, - DbSignetEvent::Transact( - 3, - Transactor::Transact { - rollupChainId, - sender: user_a, - to: user_b, - data: vec![0xab, 0xcd].into(), - value: U256::from(expected_usd_minted), - gas: U256::from(100_000), - maxFeePerGas: U256::from(GWEI_TO_WEI), - } - ) - ); - - assert_eq!( - events[1].1, + events_1[0], DbSignetEvent::Enter( 0, Passage::Enter { @@ -483,7 +446,7 @@ async fn test_signet_events() { ); assert_eq!( - events[2].1, + events_1[1], DbSignetEvent::EnterToken( 1, Passage::EnterToken { @@ -496,7 +459,7 @@ async fn test_signet_events() { ); assert_eq!( - events[3].1, + events_1[2], DbSignetEvent::EnterToken( 2, Passage::EnterToken { @@ -509,7 +472,7 @@ async fn test_signet_events() { ); assert_eq!( - events[4].1, + events_1[3], DbSignetEvent::Transact( 3, Transactor::Transact { @@ -524,8 +487,9 @@ async fn test_signet_events() { ) ); + // Events are in log_index order assert_eq!( - events[5].1, + events_2[0], DbSignetEvent::Enter( 0, Passage::Enter { @@ -537,7 +501,7 @@ async fn test_signet_events() { ); assert_eq!( - events[6].1, + events_2[1], DbSignetEvent::EnterToken( 1, Passage::EnterToken { @@ -550,7 +514,7 @@ async fn test_signet_events() { ); assert_eq!( - events[7].1, + events_2[2], DbSignetEvent::EnterToken( 2, Passage::EnterToken { @@ -561,6 +525,22 @@ async fn test_signet_events() { } ) ); + + assert_eq!( + events_2[3], + DbSignetEvent::Transact( + 3, + Transactor::Transact { + rollupChainId, + sender: user_a, + to: user_b, + data: vec![0xab, 0xcd].into(), + value: U256::from(expected_usd_minted), + gas: U256::from(100_000), + maxFeePerGas: U256::from(GWEI_TO_WEI), + } + ) + ); }) .await; } diff --git a/crates/node-tests/tests/multiple-blocks.rs b/crates/node-tests/tests/multiple-blocks.rs index d012220..f711a83 100644 --- a/crates/node-tests/tests/multiple-blocks.rs +++ b/crates/node-tests/tests/multiple-blocks.rs @@ -2,10 +2,6 @@ use alloy::{ consensus::constants::ETH_TO_WEI, primitives::{Address, U256}, }; -use reth::providers::AccountExtReader; -use reth_db::{ - AccountChangeSets, AccountsHistory, cursor::DbCursorRO, models::ShardedKey, transaction::DbTx, -}; use serial_test::serial; use signet_constants::test_utils::HOST_USDC; use signet_node_tests::{HostBlockSpec, SignetTestContext, run_test, utils::adjust_usd_decimals}; @@ -80,7 +76,7 @@ async fn test_three_enters() { // Processes 5 blocks, setting up accounts with initial balances and histories. // // After this, A will have 500, B will have 1000, C will have 1500. They will -// also have accounthistories entries for blocks 1 to 5. +// also have account histories entries for blocks 1 to 5. async fn setup_accounts_history(ctx: SignetTestContext) -> SignetTestContext { let block = HostBlockSpec::new(ctx.constants()) .enter_token(USER_A, ONE_HOST_USDC, HOST_USDC) @@ -104,48 +100,30 @@ async fn test_write_account_histories() { .enter_token(USER_F, 30 * ONE_HOST_USDC, HOST_USDC); ctx.process_blocks(vec![another_block]).await.unwrap(); - let provider = ctx.factory.provider().unwrap(); - - let a_key = ShardedKey::new(USER_A, u64::MAX); - let b_key = ShardedKey::new(USER_B, u64::MAX); - let c_key = ShardedKey::new(USER_C, u64::MAX); - let d_key = ShardedKey::new(USER_D, u64::MAX); - let e_key = ShardedKey::new(USER_E, u64::MAX); - let f_key = ShardedKey::new(USER_F, u64::MAX); - - let v = provider - .tx_ref() - .cursor_read::() - .unwrap() - .walk_range(a_key.clone()..=f_key.clone()) - .unwrap() - .collect::, _>>() - .unwrap(); - - assert_eq!(v.len(), 6); - assert_eq!(v[0].0, a_key); - assert_eq!(v[1].0, b_key); - assert_eq!(v[2].0, c_key); - for i in 1..5 { - assert!(v[0].1.contains(i)); - assert!(v[1].1.contains(i)); - assert!(v[2].1.contains(i)); + let a_hist = ctx.account_history(USER_A).unwrap(); + let b_hist = ctx.account_history(USER_B).unwrap(); + let c_hist = ctx.account_history(USER_C).unwrap(); + let d_hist = ctx.account_history(USER_D).unwrap(); + let e_hist = ctx.account_history(USER_E).unwrap(); + let f_hist = ctx.account_history(USER_F).unwrap(); + + for i in 1..=5 { + assert!(a_hist.contains(i)); + assert!(b_hist.contains(i)); + assert!(c_hist.contains(i)); } - assert!(!v[0].1.contains(6)); - assert!(!v[1].1.contains(6)); - assert!(!v[2].1.contains(6)); - - assert_eq!(v[3].0, d_key); - assert_eq!(v[4].0, e_key); - assert_eq!(v[5].0, f_key); - for i in 1..5 { - assert!(!v[3].1.contains(i)); - assert!(!v[4].1.contains(i)); - assert!(!v[5].1.contains(i)); + assert!(!a_hist.contains(6)); + assert!(!b_hist.contains(6)); + assert!(!c_hist.contains(6)); + + for i in 1..=5 { + assert!(!d_hist.contains(i)); + assert!(!e_hist.contains(i)); + assert!(!f_hist.contains(i)); } - assert!(v[3].1.contains(6)); - assert!(v[4].1.contains(6)); - assert!(v[5].1.contains(6)); + assert!(d_hist.contains(6)); + assert!(e_hist.contains(6)); + assert!(f_hist.contains(6)); }) .await; } @@ -162,86 +140,59 @@ async fn test_write_account_histories_with_empty_block() { .enter_token(USER_F, 30 * ONE_HOST_USDC, HOST_USDC); ctx.process_blocks(vec![another_block]).await.unwrap(); - let provider = ctx.factory.provider().unwrap(); - - let a_key = ShardedKey::new(USER_A, u64::MAX); - let b_key = ShardedKey::new(USER_B, u64::MAX); - let c_key = ShardedKey::new(USER_C, u64::MAX); - let d_key = ShardedKey::new(USER_D, u64::MAX); - let e_key = ShardedKey::new(USER_E, u64::MAX); - let f_key = ShardedKey::new(USER_F, u64::MAX); - - let v = provider - .tx_ref() - .cursor_read::() - .unwrap() - .walk_range(a_key.clone()..=f_key.clone()) - .unwrap() - .collect::, _>>() - .unwrap(); - - assert_eq!(v.len(), 6); - assert_eq!(v[0].0, a_key); - assert_eq!(v[1].0, b_key); - assert_eq!(v[2].0, c_key); - for i in 1..5 { - assert!(v[0].1.contains(i)); - assert!(v[1].1.contains(i)); - assert!(v[2].1.contains(i)); + let a_hist = ctx.account_history(USER_A).unwrap(); + let b_hist = ctx.account_history(USER_B).unwrap(); + let c_hist = ctx.account_history(USER_C).unwrap(); + let d_hist = ctx.account_history(USER_D).unwrap(); + let e_hist = ctx.account_history(USER_E).unwrap(); + let f_hist = ctx.account_history(USER_F).unwrap(); + + for i in 1..=5 { + assert!(a_hist.contains(i)); + assert!(b_hist.contains(i)); + assert!(c_hist.contains(i)); } - assert!(!v[0].1.contains(6)); - assert!(!v[1].1.contains(6)); - assert!(!v[2].1.contains(6)); - - assert_eq!(v[3].0, d_key); - assert_eq!(v[4].0, e_key); - assert_eq!(v[5].0, f_key); - for i in 1..5 { - assert!(!v[3].1.contains(i)); - assert!(!v[4].1.contains(i)); - assert!(!v[5].1.contains(i)); + assert!(!a_hist.contains(6)); + assert!(!b_hist.contains(6)); + assert!(!c_hist.contains(6)); + + for i in 1..=5 { + assert!(!d_hist.contains(i)); + assert!(!e_hist.contains(i)); + assert!(!f_hist.contains(i)); } - assert!(v[3].1.contains(6)); - assert!(v[4].1.contains(6)); - assert!(v[5].1.contains(6)); + assert!(d_hist.contains(6)); + assert!(e_hist.contains(6)); + assert!(f_hist.contains(6)); let empty_block = HostBlockSpec::new(ctx.constants()); ctx.process_block(empty_block).await.unwrap(); // As we did not process a new RU block, the history should not change. - let v = provider - .tx_ref() - .cursor_read::() - .unwrap() - .walk_range(a_key.clone()..=f_key.clone()) - .unwrap() - .collect::, _>>() - .unwrap(); - - assert_eq!(v.len(), 6); - assert_eq!(v[0].0, a_key); - assert_eq!(v[1].0, b_key); - assert_eq!(v[2].0, c_key); - for i in 1..5 { - assert!(v[0].1.contains(i)); - assert!(v[1].1.contains(i)); - assert!(v[2].1.contains(i)); + let a_hist = ctx.account_history(USER_A).unwrap(); + let b_hist = ctx.account_history(USER_B).unwrap(); + let c_hist = ctx.account_history(USER_C).unwrap(); + let d_hist = ctx.account_history(USER_D).unwrap(); + let e_hist = ctx.account_history(USER_E).unwrap(); + let f_hist = ctx.account_history(USER_F).unwrap(); + + for i in 1..=5 { + assert!(a_hist.contains(i)); + assert!(b_hist.contains(i)); + assert!(c_hist.contains(i)); } - assert!(!v[0].1.contains(6)); - assert!(!v[1].1.contains(6)); - assert!(!v[2].1.contains(6)); - - assert_eq!(v[3].0, d_key); - assert_eq!(v[4].0, e_key); - assert_eq!(v[5].0, f_key); - for i in 1..5 { - assert!(!v[3].1.contains(i)); - assert!(!v[4].1.contains(i)); - assert!(!v[5].1.contains(i)); + assert!(!a_hist.contains(6)); + assert!(!b_hist.contains(6)); + assert!(!c_hist.contains(6)); + + for i in 1..=5 { + assert!(!d_hist.contains(i)); + assert!(!e_hist.contains(i)); + assert!(!f_hist.contains(i)); } - assert!(v[3].1.contains(6)); - assert!(v[4].1.contains(6)); - assert!(v[5].1.contains(6)); + assert!(d_hist.contains(6)); + assert!(e_hist.contains(6)); + assert!(f_hist.contains(6)); }) .await; } @@ -252,118 +203,81 @@ async fn test_write_account_histories_with_reorg_and_empty_blocks() { run_test(|ctx| async move { let ctx = setup_accounts_history(ctx).await; - let provider = ctx.factory.provider().unwrap(); - - let a_key = ShardedKey::new(USER_A, u64::MAX); - let b_key = ShardedKey::new(USER_B, u64::MAX); - let c_key = ShardedKey::new(USER_C, u64::MAX); - let another_block = HostBlockSpec::new(ctx.constants()) .enter_token(USER_A, 10 * ONE_HOST_USDC, HOST_USDC) .enter_token(USER_B, 2 * ONE_HOST_USDC, HOST_USDC) .enter_token(USER_C, 3 * ONE_HOST_USDC, HOST_USDC); ctx.process_blocks(vec![another_block.clone()]).await.unwrap(); - let v = provider - .tx_ref() - .cursor_read::() - .unwrap() - .walk_range(a_key.clone()..=c_key.clone()) - .unwrap() - .collect::, _>>() - .unwrap(); - - assert_eq!(v.len(), 3); - assert_eq!(v[0].0, a_key); - assert_eq!(v[1].0, b_key); - assert_eq!(v[2].0, c_key); - - for i in 1..6 { - assert!(v[0].1.contains(i)); - assert!(v[1].1.contains(i)); - assert!(v[2].1.contains(i)); + let a_hist = ctx.account_history(USER_A).unwrap(); + let b_hist = ctx.account_history(USER_B).unwrap(); + let c_hist = ctx.account_history(USER_C).unwrap(); + + for i in 1..=6 { + assert!(a_hist.contains(i)); + assert!(b_hist.contains(i)); + assert!(c_hist.contains(i)); } // After reorg, the history should not contain the latest entries ctx.revert_block(another_block).await.unwrap(); - let v = provider - .tx_ref() - .cursor_read::() - .unwrap() - .walk_range(a_key.clone()..=c_key.clone()) - .unwrap() - .collect::, _>>() - .unwrap(); - - assert_eq!(v.len(), 3); - assert_eq!(v[0].0, a_key); - assert_eq!(v[1].0, b_key); - assert_eq!(v[2].0, c_key); - - for i in 1..5 { - assert!(v[0].1.contains(i)); - assert!(v[1].1.contains(i)); - assert!(v[2].1.contains(i)); + let a_hist = ctx.account_history(USER_A).unwrap(); + let b_hist = ctx.account_history(USER_B).unwrap(); + let c_hist = ctx.account_history(USER_C).unwrap(); + + for i in 1..=5 { + assert!(a_hist.contains(i)); + assert!(b_hist.contains(i)); + assert!(c_hist.contains(i)); } - assert!(!v[0].1.contains(6)); - assert!(!v[1].1.contains(6)); - assert!(!v[2].1.contains(6)); + assert!(!a_hist.contains(6)); + assert!(!b_hist.contains(6)); + assert!(!c_hist.contains(6)); // Now process an empty block. let empty_block = HostBlockSpec::new(ctx.constants()); ctx.process_block(empty_block).await.unwrap(); // As we did not process a new RU block, the history should not change. - let v = provider - .tx_ref() - .cursor_read::() - .unwrap() - .walk_range(a_key.clone()..=c_key.clone()) - .unwrap() - .collect::, _>>() - .unwrap(); - - assert_eq!(v.len(), 3); - assert_eq!(v[0].0, a_key); - assert_eq!(v[1].0, b_key); - assert_eq!(v[2].0, c_key); - - for i in 1..5 { - assert!(v[0].1.contains(i)); - assert!(v[1].1.contains(i)); - assert!(v[2].1.contains(i)); + let a_hist = ctx.account_history(USER_A).unwrap(); + let b_hist = ctx.account_history(USER_B).unwrap(); + let c_hist = ctx.account_history(USER_C).unwrap(); + + for i in 1..=5 { + assert!(a_hist.contains(i)); + assert!(b_hist.contains(i)); + assert!(c_hist.contains(i)); } - assert!(!v[0].1.contains(6)); - assert!(!v[1].1.contains(6)); - assert!(!v[2].1.contains(6)); + assert!(!a_hist.contains(6)); + assert!(!b_hist.contains(6)); + assert!(!c_hist.contains(6)); - // re-process the reorged block. + // re-process the reorged block. The empty block above consumed + // RU height 6, so this block lands at RU height 7. let another_block = HostBlockSpec::new(ctx.constants()) .enter_token(USER_A, 10 * ONE_HOST_USDC, HOST_USDC) .enter_token(USER_B, 2 * ONE_HOST_USDC, HOST_USDC) .enter_token(USER_C, 3 * ONE_HOST_USDC, HOST_USDC); ctx.process_blocks(vec![another_block.clone()]).await.unwrap(); - let v = provider - .tx_ref() - .cursor_read::() - .unwrap() - .walk_range(a_key.clone()..=c_key.clone()) - .unwrap() - .collect::, _>>() - .unwrap(); - - assert_eq!(v.len(), 3); - assert_eq!(v[0].0, a_key); - assert_eq!(v[1].0, b_key); - assert_eq!(v[2].0, c_key); - - for i in 1..6 { - assert!(v[0].1.contains(i)); - assert!(v[1].1.contains(i)); - assert!(v[2].1.contains(i)); + let a_hist = ctx.account_history(USER_A).unwrap(); + let b_hist = ctx.account_history(USER_B).unwrap(); + let c_hist = ctx.account_history(USER_C).unwrap(); + + for i in 1..=5 { + assert!(a_hist.contains(i)); + assert!(b_hist.contains(i)); + assert!(c_hist.contains(i)); } + // Block 6 was the empty block (no state changes, no history entry). + // Block 7 is the re-processed block with enter_tokens. + assert!(!a_hist.contains(6)); + assert!(!b_hist.contains(6)); + assert!(!c_hist.contains(6)); + assert!(a_hist.contains(7)); + assert!(b_hist.contains(7)); + assert!(c_hist.contains(7)); }) .await; } @@ -374,70 +288,44 @@ async fn test_write_account_histories_with_reorg() { run_test(|ctx| async move { let ctx = setup_accounts_history(ctx).await; - let provider = ctx.factory.provider().unwrap(); - - let a_key = ShardedKey::new(USER_A, u64::MAX); - let b_key = ShardedKey::new(USER_B, u64::MAX); - let c_key = ShardedKey::new(USER_C, u64::MAX); - let another_block = HostBlockSpec::new(ctx.constants()) .enter_token(USER_A, 10 * ONE_HOST_USDC, HOST_USDC) .enter_token(USER_B, 2 * ONE_HOST_USDC, HOST_USDC) .enter_token(USER_C, 3 * ONE_HOST_USDC, HOST_USDC); ctx.process_blocks(vec![another_block.clone()]).await.unwrap(); - let v = provider - .tx_ref() - .cursor_read::() - .unwrap() - .walk_range(a_key.clone()..=c_key.clone()) - .unwrap() - .collect::, _>>() - .unwrap(); - - assert_eq!(v.len(), 3); - assert_eq!(v[0].0, a_key); - assert_eq!(v[1].0, b_key); - assert_eq!(v[2].0, c_key); - - for i in 1..6 { - assert!(v[0].1.contains(i)); - assert!(v[1].1.contains(i)); - assert!(v[2].1.contains(i)); + let a_hist = ctx.account_history(USER_A).unwrap(); + let b_hist = ctx.account_history(USER_B).unwrap(); + let c_hist = ctx.account_history(USER_C).unwrap(); + + for i in 1..=6 { + assert!(a_hist.contains(i)); + assert!(b_hist.contains(i)); + assert!(c_hist.contains(i)); } // After reorg, the history should not contain the latest entries ctx.revert_block(another_block).await.unwrap(); - let v = provider - .tx_ref() - .cursor_read::() - .unwrap() - .walk_range(a_key.clone()..=c_key.clone()) - .unwrap() - .collect::, _>>() - .unwrap(); - - assert_eq!(v.len(), 3); - assert_eq!(v[0].0, a_key); - assert_eq!(v[1].0, b_key); - assert_eq!(v[2].0, c_key); - - for i in 1..5 { - assert!(v[0].1.contains(i)); - assert!(v[1].1.contains(i)); - assert!(v[2].1.contains(i)); + let a_hist = ctx.account_history(USER_A).unwrap(); + let b_hist = ctx.account_history(USER_B).unwrap(); + let c_hist = ctx.account_history(USER_C).unwrap(); + + for i in 1..=5 { + assert!(a_hist.contains(i)); + assert!(b_hist.contains(i)); + assert!(c_hist.contains(i)); } - assert!(!v[0].1.contains(6)); - assert!(!v[1].1.contains(6)); - assert!(!v[2].1.contains(6)); + assert!(!a_hist.contains(6)); + assert!(!b_hist.contains(6)); + assert!(!c_hist.contains(6)); }) .await; } #[serial] #[tokio::test] -async fn test_historical_state_provider(ctx: SignetTestContext) { +async fn test_historical_state_provider() { run_test(|ctx| async move { let ctx = setup_accounts_history(ctx).await; @@ -450,44 +338,28 @@ async fn test_historical_state_provider(ctx: SignetTestContext) { .enter_token(USER_F, 60 * ONE_HOST_USDC, HOST_USDC); ctx.process_blocks(vec![another_block]).await.unwrap(); - let provider = ctx.factory.provider().unwrap(); - - // NB: It is bizarre that reth has completely different APIs for - // historical state and current state. basic_accounts is only on - // current while account_balance is only on historical. - let accounts = - provider.basic_accounts([USER_A, USER_B, USER_C, USER_D, USER_E, USER_F]).unwrap(); - - assert_eq!(accounts[0].1.as_ref().unwrap().balance, U256::from(15 * ONE_RU_USDC)); - assert_eq!(accounts[1].1.as_ref().unwrap().balance, U256::from(30 * ONE_RU_USDC)); - assert_eq!(accounts[2].1.as_ref().unwrap().balance, U256::from(45 * ONE_RU_USDC)); - assert_eq!(accounts[3].1.as_ref().unwrap().balance, U256::from(40 * ONE_RU_USDC)); - assert_eq!(accounts[4].1.as_ref().unwrap().balance, U256::from(50 * ONE_RU_USDC)); - assert_eq!(accounts[5].1.as_ref().unwrap().balance, U256::from(60 * ONE_RU_USDC)); - - let historical = ctx.factory.history_by_block_number(5).unwrap(); - assert_eq!( - historical.account_balance(&USER_A).unwrap().unwrap(), - U256::from(5 * ONE_RU_USDC) - ); - assert_eq!( - historical.account_balance(&USER_B).unwrap().unwrap(), - U256::from(10 * ONE_RU_USDC) - ); - assert_eq!( - historical.account_balance(&USER_C).unwrap().unwrap(), - U256::from(15 * ONE_RU_USDC) - ); - assert!(historical.account_balance(&USER_D).unwrap().is_none()); - assert!(historical.account_balance(&USER_E).unwrap().is_none()); - assert!(historical.account_balance(&USER_F).unwrap().is_none()); + // Current state + assert_eq!(ctx.account(USER_A).unwrap().balance, U256::from(15 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_B).unwrap().balance, U256::from(30 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_C).unwrap().balance, U256::from(45 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_D).unwrap().balance, U256::from(40 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_E).unwrap().balance, U256::from(50 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_F).unwrap().balance, U256::from(60 * ONE_RU_USDC)); + + // Historical state at block 5 + assert_eq!(ctx.account_at_height(USER_A, 5).unwrap().balance, U256::from(5 * ONE_RU_USDC)); + assert_eq!(ctx.account_at_height(USER_B, 5).unwrap().balance, U256::from(10 * ONE_RU_USDC)); + assert_eq!(ctx.account_at_height(USER_C, 5).unwrap().balance, U256::from(15 * ONE_RU_USDC)); + assert!(ctx.account_at_height(USER_D, 5).is_none()); + assert!(ctx.account_at_height(USER_E, 5).is_none()); + assert!(ctx.account_at_height(USER_F, 5).is_none()); }) .await; } #[serial] #[tokio::test] -async fn test_historical_state_provider_with_empty_blocks(ctx: SignetTestContext) { +async fn test_historical_state_provider_with_empty_blocks() { run_test(|ctx| async move { let ctx = setup_accounts_history(ctx).await; @@ -500,68 +372,40 @@ async fn test_historical_state_provider_with_empty_blocks(ctx: SignetTestContext .enter_token(USER_F, 60 * ONE_HOST_USDC, HOST_USDC); ctx.process_blocks(vec![another_block]).await.unwrap(); - let provider = ctx.factory.provider().unwrap(); - - // NB: It is bizarre that reth has completely different APIs for - // historical state and current state. basic_accounts is only on - // current while account_balance is only on historical. - let accounts = - provider.basic_accounts([USER_A, USER_B, USER_C, USER_D, USER_E, USER_F]).unwrap(); - - assert_eq!(accounts[0].1.as_ref().unwrap().balance, U256::from(15 * ONE_RU_USDC)); - assert_eq!(accounts[1].1.as_ref().unwrap().balance, U256::from(30 * ONE_RU_USDC)); - assert_eq!(accounts[2].1.as_ref().unwrap().balance, U256::from(45 * ONE_RU_USDC)); - assert_eq!(accounts[3].1.as_ref().unwrap().balance, U256::from(40 * ONE_RU_USDC)); - assert_eq!(accounts[4].1.as_ref().unwrap().balance, U256::from(50 * ONE_RU_USDC)); - assert_eq!(accounts[5].1.as_ref().unwrap().balance, U256::from(60 * ONE_RU_USDC)); - - let historical = ctx.factory.history_by_block_number(5).unwrap(); - assert_eq!( - historical.account_balance(&USER_A).unwrap().unwrap(), - U256::from(5 * ONE_RU_USDC) - ); - assert_eq!( - historical.account_balance(&USER_B).unwrap().unwrap(), - U256::from(10 * ONE_RU_USDC) - ); - assert_eq!( - historical.account_balance(&USER_C).unwrap().unwrap(), - U256::from(15 * ONE_RU_USDC) - ); - assert!(historical.account_balance(&USER_D).unwrap().is_none()); - assert!(historical.account_balance(&USER_E).unwrap().is_none()); - assert!(historical.account_balance(&USER_F).unwrap().is_none()); + // Current state + assert_eq!(ctx.account(USER_A).unwrap().balance, U256::from(15 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_B).unwrap().balance, U256::from(30 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_C).unwrap().balance, U256::from(45 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_D).unwrap().balance, U256::from(40 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_E).unwrap().balance, U256::from(50 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_F).unwrap().balance, U256::from(60 * ONE_RU_USDC)); + + // Historical state at block 5 + assert_eq!(ctx.account_at_height(USER_A, 5).unwrap().balance, U256::from(5 * ONE_RU_USDC)); + assert_eq!(ctx.account_at_height(USER_B, 5).unwrap().balance, U256::from(10 * ONE_RU_USDC)); + assert_eq!(ctx.account_at_height(USER_C, 5).unwrap().balance, U256::from(15 * ONE_RU_USDC)); + assert!(ctx.account_at_height(USER_D, 5).is_none()); + assert!(ctx.account_at_height(USER_E, 5).is_none()); + assert!(ctx.account_at_height(USER_F, 5).is_none()); let empty_block = HostBlockSpec::new(ctx.constants()); ctx.process_blocks(vec![empty_block; 2]).await.unwrap(); - // the historical state that we previously checked should not change, even after processing empty blocks. - let accounts = - provider.basic_accounts([USER_A, USER_B, USER_C, USER_D, USER_E, USER_F]).unwrap(); - - assert_eq!(accounts[0].1.as_ref().unwrap().balance, U256::from(15 * ONE_RU_USDC)); - assert_eq!(accounts[1].1.as_ref().unwrap().balance, U256::from(30 * ONE_RU_USDC)); - assert_eq!(accounts[2].1.as_ref().unwrap().balance, U256::from(45 * ONE_RU_USDC)); - assert_eq!(accounts[3].1.as_ref().unwrap().balance, U256::from(40 * ONE_RU_USDC)); - assert_eq!(accounts[4].1.as_ref().unwrap().balance, U256::from(50 * ONE_RU_USDC)); - assert_eq!(accounts[5].1.as_ref().unwrap().balance, U256::from(60 * ONE_RU_USDC)); - - let historical = ctx.factory.history_by_block_number(5).unwrap(); - assert_eq!( - historical.account_balance(&USER_A).unwrap().unwrap(), - U256::from(5 * ONE_RU_USDC) - ); - assert_eq!( - historical.account_balance(&USER_B).unwrap().unwrap(), - U256::from(10 * ONE_RU_USDC) - ); - assert_eq!( - historical.account_balance(&USER_C).unwrap().unwrap(), - U256::from(15 * ONE_RU_USDC) - ); - assert!(historical.account_balance(&USER_D).unwrap().is_none()); - assert!(historical.account_balance(&USER_E).unwrap().is_none()); - assert!(historical.account_balance(&USER_F).unwrap().is_none()); + // The historical state previously checked should not change, even after + // processing empty blocks. + assert_eq!(ctx.account(USER_A).unwrap().balance, U256::from(15 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_B).unwrap().balance, U256::from(30 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_C).unwrap().balance, U256::from(45 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_D).unwrap().balance, U256::from(40 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_E).unwrap().balance, U256::from(50 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_F).unwrap().balance, U256::from(60 * ONE_RU_USDC)); + + assert_eq!(ctx.account_at_height(USER_A, 5).unwrap().balance, U256::from(5 * ONE_RU_USDC)); + assert_eq!(ctx.account_at_height(USER_B, 5).unwrap().balance, U256::from(10 * ONE_RU_USDC)); + assert_eq!(ctx.account_at_height(USER_C, 5).unwrap().balance, U256::from(15 * ONE_RU_USDC)); + assert!(ctx.account_at_height(USER_D, 5).is_none()); + assert!(ctx.account_at_height(USER_E, 5).is_none()); + assert!(ctx.account_at_height(USER_F, 5).is_none()); }) .await; } @@ -581,55 +425,30 @@ async fn test_historical_state_provider_with_reorg() { .enter_token(USER_F, 60 * ONE_HOST_USDC, HOST_USDC); ctx.process_blocks(vec![another_block.clone()]).await.unwrap(); - let provider = ctx.factory.provider().unwrap(); - - // NB: It is bizarre that reth has completely different APIs for - // historical state and current state. basic_accounts is only on - // current while account_balance is only on historical. - let accounts = - provider.basic_accounts([USER_A, USER_B, USER_C, USER_D, USER_E, USER_F]).unwrap(); - - assert_eq!(accounts[0].1.as_ref().unwrap().balance, U256::from(15 * ONE_RU_USDC)); - assert_eq!(accounts[1].1.as_ref().unwrap().balance, U256::from(30 * ONE_RU_USDC)); - assert_eq!(accounts[2].1.as_ref().unwrap().balance, U256::from(45 * ONE_RU_USDC)); - assert_eq!(accounts[3].1.as_ref().unwrap().balance, U256::from(40 * ONE_RU_USDC)); - assert_eq!(accounts[4].1.as_ref().unwrap().balance, U256::from(50 * ONE_RU_USDC)); - assert_eq!(accounts[5].1.as_ref().unwrap().balance, U256::from(60 * ONE_RU_USDC)); - - let historical = ctx.factory.history_by_block_number(5).unwrap(); - assert_eq!( - historical.account_balance(&USER_A).unwrap().unwrap(), - U256::from(5 * ONE_RU_USDC) - ); - assert_eq!( - historical.account_balance(&USER_B).unwrap().unwrap(), - U256::from(10 * ONE_RU_USDC) - ); - assert_eq!( - historical.account_balance(&USER_C).unwrap().unwrap(), - U256::from(15 * ONE_RU_USDC) - ); - assert!(historical.account_balance(&USER_D).unwrap().is_none()); - assert!(historical.account_balance(&USER_E).unwrap().is_none()); - assert!(historical.account_balance(&USER_F).unwrap().is_none()); + // Current state + assert_eq!(ctx.account(USER_A).unwrap().balance, U256::from(15 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_B).unwrap().balance, U256::from(30 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_C).unwrap().balance, U256::from(45 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_D).unwrap().balance, U256::from(40 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_E).unwrap().balance, U256::from(50 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_F).unwrap().balance, U256::from(60 * ONE_RU_USDC)); + + // Historical state at block 5 + assert_eq!(ctx.account_at_height(USER_A, 5).unwrap().balance, U256::from(5 * ONE_RU_USDC)); + assert_eq!(ctx.account_at_height(USER_B, 5).unwrap().balance, U256::from(10 * ONE_RU_USDC)); + assert_eq!(ctx.account_at_height(USER_C, 5).unwrap().balance, U256::from(15 * ONE_RU_USDC)); + assert!(ctx.account_at_height(USER_D, 5).is_none()); + assert!(ctx.account_at_height(USER_E, 5).is_none()); + assert!(ctx.account_at_height(USER_F, 5).is_none()); ctx.revert_block(another_block).await.unwrap(); // Make the same assertions after reverting, the historical state should not change - assert_eq!( - historical.account_balance(&USER_A).unwrap().unwrap(), - U256::from(5 * ONE_RU_USDC) - ); - assert_eq!( - historical.account_balance(&USER_B).unwrap().unwrap(), - U256::from(10 * ONE_RU_USDC) - ); - assert_eq!( - historical.account_balance(&USER_C).unwrap().unwrap(), - U256::from(15 * ONE_RU_USDC) - ); - assert!(historical.account_balance(&USER_D).unwrap().is_none()); - assert!(historical.account_balance(&USER_E).unwrap().is_none()); - assert!(historical.account_balance(&USER_F).unwrap().is_none()); + assert_eq!(ctx.account_at_height(USER_A, 5).unwrap().balance, U256::from(5 * ONE_RU_USDC)); + assert_eq!(ctx.account_at_height(USER_B, 5).unwrap().balance, U256::from(10 * ONE_RU_USDC)); + assert_eq!(ctx.account_at_height(USER_C, 5).unwrap().balance, U256::from(15 * ONE_RU_USDC)); + assert!(ctx.account_at_height(USER_D, 5).is_none()); + assert!(ctx.account_at_height(USER_E, 5).is_none()); + assert!(ctx.account_at_height(USER_F, 5).is_none()); let new_block_6 = HostBlockSpec::new(ctx.constants()) .enter_token(USER_A, 10 * ONE_HOST_USDC, HOST_USDC) @@ -638,33 +457,21 @@ async fn test_historical_state_provider_with_reorg() { ctx.process_block(new_block_6).await.unwrap(); - // new current state assertions - let provider = ctx.factory.provider().unwrap(); - let accounts = - provider.basic_accounts([USER_A, USER_B, USER_C, USER_D, USER_E, USER_F]).unwrap(); - assert_eq!(accounts[0].1.as_ref().unwrap().balance, U256::from(15 * ONE_RU_USDC)); - assert_eq!(accounts[1].1.as_ref().unwrap().balance, U256::from(30 * ONE_RU_USDC)); - assert_eq!(accounts[2].1.as_ref().unwrap().balance, U256::from(45 * ONE_RU_USDC)); - assert!(accounts[3].1.is_none()); - assert!(accounts[4].1.is_none()); - assert!(accounts[5].1.is_none()); + // New current state assertions + assert_eq!(ctx.account(USER_A).unwrap().balance, U256::from(15 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_B).unwrap().balance, U256::from(30 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_C).unwrap().balance, U256::from(45 * ONE_RU_USDC)); + assert!(ctx.account(USER_D).is_none()); + assert!(ctx.account(USER_E).is_none()); + assert!(ctx.account(USER_F).is_none()); // Make the same assertions after the new block 6, the historical state should not change - assert_eq!( - historical.account_balance(&USER_A).unwrap().unwrap(), - U256::from(5 * ONE_RU_USDC) - ); - assert_eq!( - historical.account_balance(&USER_B).unwrap().unwrap(), - U256::from(10 * ONE_RU_USDC) - ); - assert_eq!( - historical.account_balance(&USER_C).unwrap().unwrap(), - U256::from(15 * ONE_RU_USDC) - ); - assert!(historical.account_balance(&USER_D).unwrap().is_none()); - assert!(historical.account_balance(&USER_E).unwrap().is_none()); - assert!(historical.account_balance(&USER_F).unwrap().is_none()); + assert_eq!(ctx.account_at_height(USER_A, 5).unwrap().balance, U256::from(5 * ONE_RU_USDC)); + assert_eq!(ctx.account_at_height(USER_B, 5).unwrap().balance, U256::from(10 * ONE_RU_USDC)); + assert_eq!(ctx.account_at_height(USER_C, 5).unwrap().balance, U256::from(15 * ONE_RU_USDC)); + assert!(ctx.account_at_height(USER_D, 5).is_none()); + assert!(ctx.account_at_height(USER_E, 5).is_none()); + assert!(ctx.account_at_height(USER_F, 5).is_none()); }) .await; } @@ -675,31 +482,24 @@ async fn test_write_changesets() { run_test(|ctx| async move { let ctx = setup_accounts_history(ctx).await; - let provider = ctx.factory.provider().unwrap(); - - let mut cursor = provider.tx_ref().cursor_dup_read::().unwrap(); + // The changeset at block N records the state before block N. + // get_account_at_height(addr, N-1) gives the state at end of block N-1, + // which is the "before" state for block N. + let acct_a_at_3 = ctx.account_at_height(USER_A, 3).unwrap(); + let acct_b_at_3 = ctx.account_at_height(USER_B, 3).unwrap(); + let acct_c_at_3 = ctx.account_at_height(USER_C, 3).unwrap(); - let entries_4 = - cursor.walk_range(4..5).unwrap().map(Result::unwrap).map(|t| t.1).collect::>(); + assert_eq!(acct_a_at_3.balance, U256::from(3 * ONE_RU_USDC)); + assert_eq!(acct_b_at_3.balance, U256::from(6 * ONE_RU_USDC)); + assert_eq!(acct_c_at_3.balance, U256::from(9 * ONE_RU_USDC)); - let entry_a = entries_4.iter().find(|e| e.address == USER_A).unwrap(); - let entry_b = entries_4.iter().find(|e| e.address == USER_B).unwrap(); - let entry_c = entries_4.iter().find(|e| e.address == USER_C).unwrap(); + let acct_a_at_4 = ctx.account_at_height(USER_A, 4).unwrap(); + let acct_b_at_4 = ctx.account_at_height(USER_B, 4).unwrap(); + let acct_c_at_4 = ctx.account_at_height(USER_C, 4).unwrap(); - assert_eq!(entry_a.info.as_ref().unwrap().balance, U256::from(3 * ONE_RU_USDC)); - assert_eq!(entry_b.info.as_ref().unwrap().balance, U256::from(6 * ONE_RU_USDC)); - assert_eq!(entry_c.info.as_ref().unwrap().balance, U256::from(9 * ONE_RU_USDC)); - - let entries_5 = - cursor.walk(Some(5)).unwrap().map(Result::unwrap).map(|t| t.1).collect::>(); - - let entry_a = entries_5.iter().find(|e| e.address == USER_A).unwrap(); - let entry_b = entries_5.iter().find(|e| e.address == USER_B).unwrap(); - let entry_c = entries_5.iter().find(|e| e.address == USER_C).unwrap(); - - assert_eq!(entry_a.info.as_ref().unwrap().balance, U256::from(4 * ONE_RU_USDC)); - assert_eq!(entry_b.info.as_ref().unwrap().balance, U256::from(8 * ONE_RU_USDC)); - assert_eq!(entry_c.info.as_ref().unwrap().balance, U256::from(12 * ONE_RU_USDC)); + assert_eq!(acct_a_at_4.balance, U256::from(4 * ONE_RU_USDC)); + assert_eq!(acct_b_at_4.balance, U256::from(8 * ONE_RU_USDC)); + assert_eq!(acct_c_at_4.balance, U256::from(12 * ONE_RU_USDC)); }) .await; } @@ -710,59 +510,41 @@ async fn test_write_changesets_with_empty_blocks() { run_test(|ctx| async move { let ctx = setup_accounts_history(ctx).await; - let provider = ctx.factory.provider().unwrap(); - - let mut cursor = provider.tx_ref().cursor_dup_read::().unwrap(); - - let entries_4 = - cursor.walk_range(4..5).unwrap().map(Result::unwrap).map(|t| t.1).collect::>(); + let acct_a_at_3 = ctx.account_at_height(USER_A, 3).unwrap(); + let acct_b_at_3 = ctx.account_at_height(USER_B, 3).unwrap(); + let acct_c_at_3 = ctx.account_at_height(USER_C, 3).unwrap(); - let entry_a = entries_4.iter().find(|e| e.address == USER_A).unwrap(); - let entry_b = entries_4.iter().find(|e| e.address == USER_B).unwrap(); - let entry_c = entries_4.iter().find(|e| e.address == USER_C).unwrap(); + assert_eq!(acct_a_at_3.balance, U256::from(3 * ONE_RU_USDC)); + assert_eq!(acct_b_at_3.balance, U256::from(6 * ONE_RU_USDC)); + assert_eq!(acct_c_at_3.balance, U256::from(9 * ONE_RU_USDC)); - assert_eq!(entry_a.info.as_ref().unwrap().balance, U256::from(3 * ONE_RU_USDC)); - assert_eq!(entry_b.info.as_ref().unwrap().balance, U256::from(6 * ONE_RU_USDC)); - assert_eq!(entry_c.info.as_ref().unwrap().balance, U256::from(9 * ONE_RU_USDC)); + let acct_a_at_4 = ctx.account_at_height(USER_A, 4).unwrap(); + let acct_b_at_4 = ctx.account_at_height(USER_B, 4).unwrap(); + let acct_c_at_4 = ctx.account_at_height(USER_C, 4).unwrap(); - let entries_5 = - cursor.walk(Some(5)).unwrap().map(Result::unwrap).map(|t| t.1).collect::>(); - - let entry_a = entries_5.iter().find(|e| e.address == USER_A).unwrap(); - let entry_b = entries_5.iter().find(|e| e.address == USER_B).unwrap(); - let entry_c = entries_5.iter().find(|e| e.address == USER_C).unwrap(); - - assert_eq!(entry_a.info.as_ref().unwrap().balance, U256::from(4 * ONE_RU_USDC)); - assert_eq!(entry_b.info.as_ref().unwrap().balance, U256::from(8 * ONE_RU_USDC)); - assert_eq!(entry_c.info.as_ref().unwrap().balance, U256::from(12 * ONE_RU_USDC)); + assert_eq!(acct_a_at_4.balance, U256::from(4 * ONE_RU_USDC)); + assert_eq!(acct_b_at_4.balance, U256::from(8 * ONE_RU_USDC)); + assert_eq!(acct_c_at_4.balance, U256::from(12 * ONE_RU_USDC)); let empty_block = HostBlockSpec::new(ctx.constants()); ctx.process_blocks(vec![empty_block; 2]).await.unwrap(); - // Even after processing empty blocks, the changesets should not change. - let mut cursor = provider.tx_ref().cursor_dup_read::().unwrap(); - - let entries_4 = - cursor.walk_range(4..5).unwrap().map(Result::unwrap).map(|t| t.1).collect::>(); - - let entry_a = entries_4.iter().find(|e| e.address == USER_A).unwrap(); - let entry_b = entries_4.iter().find(|e| e.address == USER_B).unwrap(); - let entry_c = entries_4.iter().find(|e| e.address == USER_C).unwrap(); - - assert_eq!(entry_a.info.as_ref().unwrap().balance, U256::from(3 * ONE_RU_USDC)); - assert_eq!(entry_b.info.as_ref().unwrap().balance, U256::from(6 * ONE_RU_USDC)); - assert_eq!(entry_c.info.as_ref().unwrap().balance, U256::from(9 * ONE_RU_USDC)); + // Even after processing empty blocks, the historical state should not change. + let acct_a_at_3 = ctx.account_at_height(USER_A, 3).unwrap(); + let acct_b_at_3 = ctx.account_at_height(USER_B, 3).unwrap(); + let acct_c_at_3 = ctx.account_at_height(USER_C, 3).unwrap(); - let entries_5 = - cursor.walk(Some(5)).unwrap().map(Result::unwrap).map(|t| t.1).collect::>(); + assert_eq!(acct_a_at_3.balance, U256::from(3 * ONE_RU_USDC)); + assert_eq!(acct_b_at_3.balance, U256::from(6 * ONE_RU_USDC)); + assert_eq!(acct_c_at_3.balance, U256::from(9 * ONE_RU_USDC)); - let entry_a = entries_5.iter().find(|e| e.address == USER_A).unwrap(); - let entry_b = entries_5.iter().find(|e| e.address == USER_B).unwrap(); - let entry_c = entries_5.iter().find(|e| e.address == USER_C).unwrap(); + let acct_a_at_4 = ctx.account_at_height(USER_A, 4).unwrap(); + let acct_b_at_4 = ctx.account_at_height(USER_B, 4).unwrap(); + let acct_c_at_4 = ctx.account_at_height(USER_C, 4).unwrap(); - assert_eq!(entry_a.info.as_ref().unwrap().balance, U256::from(4 * ONE_RU_USDC)); - assert_eq!(entry_b.info.as_ref().unwrap().balance, U256::from(8 * ONE_RU_USDC)); - assert_eq!(entry_c.info.as_ref().unwrap().balance, U256::from(12 * ONE_RU_USDC)); + assert_eq!(acct_a_at_4.balance, U256::from(4 * ONE_RU_USDC)); + assert_eq!(acct_b_at_4.balance, U256::from(8 * ONE_RU_USDC)); + assert_eq!(acct_c_at_4.balance, U256::from(12 * ONE_RU_USDC)); }) .await; } diff --git a/crates/node-tests/tests/rpc.rs b/crates/node-tests/tests/rpc.rs index a293665..72581b6 100644 --- a/crates/node-tests/tests/rpc.rs +++ b/crates/node-tests/tests/rpc.rs @@ -13,7 +13,6 @@ use alloy::{ }, sol_types::{SolCall, SolEvent}, }; -use reth::providers::{BlockNumReader, BlockReader, TransactionsProvider}; use serial_test::serial; use signet_node_tests::{ HostBlockSpec, SignetTestContext, @@ -99,28 +98,29 @@ async fn test_eth_estimateGas(ctx: &SignetTestContext, contract: &TestCounterIns } async fn test_eth_getBlockByHash(ctx: &SignetTestContext, _contract: &TestCounterInstance) { - let genesis = ctx.factory.block(0.into()).unwrap().unwrap(); + let genesis = ctx.header_by_number(0).unwrap(); - let block = ctx.alloy_provider.get_block_by_hash(genesis.hash_slow()).await.unwrap().unwrap(); + let block = ctx.alloy_provider.get_block_by_hash(genesis.hash()).await.unwrap().unwrap(); assert_eq!(block.header.number, genesis.number); assert_eq!(block.header.timestamp, genesis.timestamp); } async fn test_eth_getBlockByNumber(ctx: &SignetTestContext, _contract: &TestCounterInstance) { - let db_block = ctx.factory.block(1.into()).unwrap().unwrap(); + let db_header = ctx.header_by_number(1).unwrap(); let rpc_block = - ctx.alloy_provider.get_block_by_number(db_block.number.into()).await.unwrap().unwrap(); - assert_eq!(rpc_block.header.number, db_block.number); - assert_eq!(rpc_block.header.timestamp, db_block.timestamp); - assert_eq!(rpc_block.header.hash, db_block.hash_slow()); + ctx.alloy_provider.get_block_by_number(db_header.number.into()).await.unwrap().unwrap(); + assert_eq!(rpc_block.header.number, db_header.number); + assert_eq!(rpc_block.header.timestamp, db_header.timestamp); + assert_eq!(rpc_block.header.hash, db_header.hash()); } async fn test_eth_getTransactionByHash(ctx: &SignetTestContext, _contract: &TestCounterInstance) { let deployer = ctx.addresses[0]; - let deploy_tx = &ctx.factory.transactions_by_block(1.into()).unwrap().unwrap()[0]; - let tx_hash = *deploy_tx.hash(); + let txs = ctx.transactions_in_block(1).await; + let deploy_tx = &txs[0]; + let tx_hash = *deploy_tx.tx_hash(); let rpc_tx = ctx.alloy_provider.get_transaction_by_hash(tx_hash).await.unwrap().unwrap(); assert_eq!(rpc_tx.tx_hash(), tx_hash); @@ -135,8 +135,9 @@ async fn test_eth_getTransactionByHash(ctx: &SignetTestContext, _contract: &Test async fn test_eth_getTransactionReceipt(ctx: &SignetTestContext, contract: &TestCounterInstance) { let deployer = ctx.addresses[0]; - let deploy_tx = &ctx.factory.transactions_by_block(1.into()).unwrap().unwrap()[0]; - let tx_hash = *deploy_tx.hash(); + let txs = ctx.transactions_in_block(1).await; + let deploy_tx = &txs[0]; + let tx_hash = *deploy_tx.tx_hash(); let receipt = ctx.alloy_provider.get_transaction_receipt(tx_hash).await.unwrap().unwrap(); @@ -210,8 +211,8 @@ async fn test_stateful_rpc_calls() { } async fn getLogs_post(ctx: &SignetTestContext, contract: &TestCounterInstance) { - let latest_block = ctx.factory.last_block_number().unwrap(); - let latest_hash = ctx.factory.block(latest_block.into()).unwrap().unwrap().hash_slow(); + let latest_block = ctx.last_block_number(); + let latest_hash = ctx.header_by_number(latest_block).unwrap().hash(); let logs = ctx .alloy_provider @@ -303,8 +304,8 @@ async fn newBlockFilter_pre(ctx: &SignetTestContext) -> U256 { async fn newBlockFilter_post(ctx: &SignetTestContext, filter_id: U256) { let blocks: Vec = ctx.alloy_provider.get_filter_changes(filter_id).await.unwrap(); - let latest_block = ctx.factory.last_block_number().unwrap(); - let latest_hash = ctx.factory.block(latest_block.into()).unwrap().unwrap().hash_slow(); + let latest_block = ctx.last_block_number(); + let latest_hash = ctx.header_by_number(latest_block).unwrap().hash(); assert_eq!(blocks.len(), 1); assert_eq!(blocks[0], latest_hash); @@ -534,8 +535,8 @@ async fn subscribe_blocks_pre(ctx: &SignetTestContext) -> Subscription
{ async fn subscribe_blocks_post(ctx: &SignetTestContext, mut sub: Subscription
) { let block = sub.recv().await.unwrap(); - let latest_block = ctx.factory.last_block_number().unwrap(); - let latest_hash = ctx.factory.block(latest_block.into()).unwrap().unwrap().hash_slow(); + let latest_block = ctx.last_block_number(); + let latest_hash = ctx.header_by_number(latest_block).unwrap().hash(); assert_eq!(block.number, latest_block); assert_eq!(block.hash, latest_hash); } @@ -665,19 +666,19 @@ async fn verify_all_txs_in_block(ctx: &SignetTestContext, block_number: u64) { let txs = block.transactions.as_transactions().unwrap(); - // Also get transactions directly from DB - let db_txs = ctx.factory.transactions_by_block(block_number.into()).unwrap().unwrap(); + // Also get transactions directly from storage + let db_txs = ctx.transactions_in_block(block_number).await; assert_eq!(txs.len(), db_txs.len(), "Transaction count mismatch in block {}", block_number); for (idx, (rpc_tx, db_tx)) in txs.iter().zip(db_txs.iter()).enumerate() { let rpc_hash = rpc_tx.tx_hash(); - let db_hash = *db_tx.hash(); + let db_hash = *db_tx.tx_hash(); - // Verify RPC and DB hashes match + // Verify RPC and storage hashes match assert_eq!( rpc_hash, db_hash, - "Hash mismatch between RPC and DB for block {} tx {}", + "Hash mismatch between RPC and storage for block {} tx {}", block_number, idx ); @@ -688,11 +689,11 @@ async fn verify_all_txs_in_block(ctx: &SignetTestContext, block_number: u64) { "RPC hash lookup failed: block={block_number}, idx={idx}, hash={rpc_hash}", ); - // Verify hash lookup works via DB provider - let provider_lookup = ctx.factory.provider().unwrap().transaction_by_hash(db_hash).unwrap(); + // Verify hash lookup works via cold storage + let storage_lookup = ctx.transaction_by_hash(db_hash).await; assert!( - provider_lookup.is_some(), - "DB provider hash lookup failed: block={}, idx={}, hash={}", + storage_lookup.is_some(), + "Cold storage hash lookup failed: block={}, idx={}, hash={}", block_number, idx, db_hash diff --git a/crates/node-tests/tests/rpc_debug.rs b/crates/node-tests/tests/rpc_debug.rs index 90d240a..8dd8824 100644 --- a/crates/node-tests/tests/rpc_debug.rs +++ b/crates/node-tests/tests/rpc_debug.rs @@ -1,8 +1,5 @@ use alloy::{primitives::Bytes, providers::ext::DebugApi, sol_types::SolCall}; -use reth::{ - providers::TransactionsProvider, - rpc::types::trace::geth::{CallConfig, GethDebugTracingOptions}, -}; +use reth::rpc::types::trace::geth::{CallConfig, GethDebugTracingOptions}; use serial_test::serial; use signet_node_tests::{rpc::rpc_test, types::Counter::incrementCall}; use signet_test_utils::specs::{HostBlockSpec, RuBlockSpec}; @@ -12,8 +9,9 @@ use signet_test_utils::specs::{HostBlockSpec, RuBlockSpec}; async fn test_debug_trace_transaction() { rpc_test(|ctx, counter| async move { let deployer = ctx.addresses[0]; - let deploy_tx = &ctx.factory.transactions_by_block(1.into()).unwrap().unwrap()[0]; - let tx_hash = *deploy_tx.hash(); + let txs = ctx.transactions_in_block(1).await; + let deploy_tx = &txs[0]; + let tx_hash = *deploy_tx.tx_hash(); let tracing_opts = GethDebugTracingOptions::call_tracer(CallConfig { only_top_call: Some(false), diff --git a/crates/node-types/Cargo.toml b/crates/node-types/Cargo.toml deleted file mode 100644 index 6db0b39..0000000 --- a/crates/node-types/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "signet-node-types" -version.workspace = true -edition.workspace = true -rust-version.workspace = true -authors.workspace = true -license.workspace = true -homepage.workspace = true -repository.workspace = true - -[dependencies] -alloy.workspace = true -reth.workspace = true -reth-chainspec.workspace = true -reth-db.workspace = true -reth-node-api.workspace = true -reth-node-ethereum.workspace = true -signet-zenith.workspace = true - -tokio.workspace = true -tracing.workspace = true diff --git a/crates/node-types/README.md b/crates/node-types/README.md deleted file mode 100644 index 61ddffc..0000000 --- a/crates/node-types/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# Signet Node Types - -This crate provides parameterizations and conveneniences for the Signet node's -use of reth's internal generics. E.g. [`NodePrimitives`] and [`NodeTypes`]. - -It also provides a [`NodeTypesDbTrait`] to aggregate several trait constraints -on the database type. This is then used in the node and in `signet-db`. - -This crate is mostly shims. It is not intended to be used outside of the -Signet node and `signet-db` crates. diff --git a/crates/node-types/src/block.rs b/crates/node-types/src/block.rs deleted file mode 100644 index 29d4463..0000000 --- a/crates/node-types/src/block.rs +++ /dev/null @@ -1,26 +0,0 @@ -use alloy::eips::eip2718::{Decodable2718, Encodable2718}; -use reth::primitives::TransactionSigned; -use signet_zenith::Coder; -use tracing::trace; - -/// [signet_zenith::ZenithBlock] parameterized for use with reth. -pub type ZenithBlock = signet_zenith::ZenithBlock; - -/// [Coder] implementation for reth's 2718 impl -#[derive(Debug, Clone, Copy)] -pub struct Reth2718Coder; - -impl Coder for Reth2718Coder { - type Tx = TransactionSigned; - - fn encode(t: &TransactionSigned) -> Vec { - t.encoded_2718() - } - - fn decode(buf: &mut &[u8]) -> Option { - TransactionSigned::decode_2718(buf) - .inspect_err(|e| trace!(%e, "Discarding transaction due to failed decoding")) - .ok() - .filter(|tx| !tx.is_eip4844()) - } -} diff --git a/crates/node-types/src/lib.rs b/crates/node-types/src/lib.rs deleted file mode 100644 index ba49628..0000000 --- a/crates/node-types/src/lib.rs +++ /dev/null @@ -1,174 +0,0 @@ -#![doc = include_str!("../README.md")] -#![warn( - missing_copy_implementations, - missing_debug_implementations, - missing_docs, - unreachable_pub, - clippy::missing_const_for_fn, - rustdoc::all -)] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![deny(unused_must_use, rust_2018_idioms)] -#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] - -mod block; -pub use block::{Reth2718Coder, ZenithBlock}; - -mod utils; -pub use utils::{NodeTypesDbTrait, Pnt}; - -use reth::{ - primitives::EthPrimitives, - providers::{ - CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, - CanonStateSubscriptions, EthStorage, NodePrimitivesProvider, - }, -}; -use reth_chainspec::ChainSpec; -use reth_node_api::{NodePrimitives, NodeTypes, NodeTypesWithDB}; -use reth_node_ethereum::EthEngineTypes; -use std::marker::PhantomData; -use tokio::sync::broadcast::error::SendError; - -/// Items that can be sent via the status channel. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum NodeStatus { - /// Node is booting. - Booting, - /// Node's current height. - AtHeight(u64), -} - -/// Signet node types for [`NodeTypes`] and [`NodeTypesWithDB`]. -#[derive(Copy, Debug)] -pub struct SignetNodeTypes { - _db: PhantomData Db>, -} - -impl Clone for SignetNodeTypes { - fn clone(&self) -> Self { - Self { _db: PhantomData } - } -} - -impl PartialEq for SignetNodeTypes { - fn eq(&self, _other: &Self) -> bool { - true - } -} - -impl Eq for SignetNodeTypes {} - -impl Default for SignetNodeTypes { - fn default() -> Self { - Self { _db: PhantomData } - } -} - -impl NodePrimitives for SignetNodeTypes -where - Db: NodeTypesDbTrait, -{ - type Block = ::Block; - type BlockHeader = ::BlockHeader; - /// Block body primitive. - type BlockBody = ::BlockBody; - /// Signed version of the transaction type. - type SignedTx = ::SignedTx; - /// A receipt. - type Receipt = ::Receipt; -} - -impl NodeTypes for SignetNodeTypes -where - Db: NodeTypesDbTrait, -{ - type Primitives = EthPrimitives; - - type ChainSpec = ChainSpec; - - type Storage = EthStorage; - - type Payload = EthEngineTypes; -} - -impl NodeTypesWithDB for SignetNodeTypes -where - Db: NodeTypesDbTrait, -{ - type DB = Db; -} - -/// Shim to impl [`CanonStateSubscriptions`] -#[derive(Debug, Clone)] -pub struct SharedCanonState { - sender: CanonStateNotificationSender, - _pd: PhantomData Db>, -} - -impl SharedCanonState -where - Db: NodeTypesDbTrait, -{ - /// Get the number of receivers, via [`CanonStateNotificationSender::receiver_count`]. - pub fn receiver_count(&self) -> usize { - self.sender.receiver_count() - } - - /// Send a notification to all subscribers. - pub fn send( - &self, - notification: CanonStateNotification, - ) -> Result> { - self.sender.send(notification) - } -} - -impl Default for SharedCanonState -where - Db: NodeTypesDbTrait, -{ - fn default() -> Self { - // magic constant matches reth behavior in blockchain_tree. - // Max reorg depth is default 64, blockchain tree doubles it to 128. - Self::new(128) - } -} - -impl NodePrimitivesProvider for SharedCanonState -where - Db: NodeTypesDbTrait, -{ - type Primitives = EthPrimitives; -} - -impl SharedCanonState -where - Db: NodeTypesDbTrait, -{ - /// Create a new shared canon state. - pub fn new(capacity: usize) -> Self { - Self { sender: tokio::sync::broadcast::channel(capacity).0, _pd: PhantomData } - } -} - -impl CanonStateSubscriptions for SharedCanonState -where - Db: NodeTypesDbTrait, -{ - fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { - self.sender.subscribe() - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[allow(dead_code)] - fn compile_check() { - fn inner() {} - - inner::>>(); - } -} diff --git a/crates/node-types/src/utils.rs b/crates/node-types/src/utils.rs deleted file mode 100644 index 0d49922..0000000 --- a/crates/node-types/src/utils.rs +++ /dev/null @@ -1,24 +0,0 @@ -use reth::{primitives::EthPrimitives, providers::providers::ProviderNodeTypes}; -use reth_chainspec::ChainSpec; - -/// Convenience trait for specifying the [`ProviderNodeTypes`] implementation -/// required for Signet functionality. This is used to condense many trait -/// bounds. -pub trait Pnt: ProviderNodeTypes {} - -impl Pnt for T where T: ProviderNodeTypes {} - -/// Convenience trait to aggregate the DB requirements -pub trait NodeTypesDbTrait: - reth_db::database::Database + reth_db::database_metrics::DatabaseMetrics + Clone + Unpin + 'static -{ -} - -impl NodeTypesDbTrait for T where - T: reth_db::database::Database - + reth_db::database_metrics::DatabaseMetrics - + Clone - + Unpin - + 'static -{ -} diff --git a/crates/node/Cargo.toml b/crates/node/Cargo.toml index 3a6d7d7..7e3cd0d 100644 --- a/crates/node/Cargo.toml +++ b/crates/node/Cargo.toml @@ -10,10 +10,12 @@ repository.workspace = true [dependencies] signet-block-processor.workspace = true -signet-db.workspace = true +signet-extract.workspace = true signet-node-config.workspace = true -signet-node-types.workspace = true signet-rpc.workspace = true +signet-hot.workspace = true +signet-storage.workspace = true +signet-storage-types.workspace = true signet-blobber.workspace = true signet-tx-cache.workspace = true @@ -25,12 +27,11 @@ axum = "0.8.1" interprocess = { version = "2.2.2", features = ["tokio"] } reth.workspace = true -reth-chainspec.workspace = true -reth-db.workspace = true -reth-db-common.workspace = true reth-exex.workspace = true reth-node-api.workspace = true +trevm.workspace = true + eyre.workspace = true futures-util.workspace = true metrics.workspace = true diff --git a/crates/node/src/builder.rs b/crates/node/src/builder.rs index 183b05b..c55cf17 100644 --- a/crates/node/src/builder.rs +++ b/crates/node/src/builder.rs @@ -2,52 +2,45 @@ use crate::{NodeStatus, SignetNode}; use eyre::OptionExt; -use reth::{ - primitives::EthPrimitives, - providers::{BlockHashReader, ProviderFactory, StateProviderFactory}, -}; -use reth_db::transaction::DbTxMut; -use reth_db_common::init; +use reth::{primitives::EthPrimitives, providers::StateProviderFactory}; use reth_exex::ExExContext; use reth_node_api::{FullNodeComponents, NodeTypes}; use signet_block_processor::AliasOracleFactory; -use signet_db::DbProviderExt; +use signet_hot::db::UnsafeDbWrite; use signet_node_config::SignetNodeConfig; -use signet_node_types::{NodeTypesDbTrait, SignetNodeTypes}; +use signet_storage::{HistoryRead, HistoryWrite, HotKv, HotKvRead, UnifiedStorage}; +use signet_storage_types::EthereumHardfork; use std::sync::Arc; +use tracing::info; +use trevm::revm::database::DBErrorMarker; /// A type that does not implement [`AliasOracleFactory`]. #[derive(Debug, Clone, Copy)] pub struct NotAnAof; -/// A type that does not implement [`NodeTypesDbTrait`]. +/// Sentinel indicating no storage has been provided. #[derive(Debug, Clone, Copy)] -pub struct NotADb; +pub struct NotAStorage; /// Builder for [`SignetNode`]. This is the main way to create a signet node. /// /// The builder requires the following components to be set before building: /// - An [`ExExContext`], via [`Self::with_ctx`]. -/// - A [`ProviderFactory`] for the signet node's database. -/// - This can be provided directly via [`Self::with_factory`]. -/// - Or created from a database implementing [`NodeTypesDbTrait`] via -/// [`Self::with_db`]. -/// - If not set directly, can be created from the config via -/// [`Self::with_config_db`]. +/// - An [`Arc>`], via [`Self::with_storage`]. /// - An [`AliasOracleFactory`], via [`Self::with_alias_oracle`]. /// - If not set, a default one will be created from the [`ExExContext`]'s /// provider. /// - A `reqwest::Client`, via [`Self::with_client`]. /// - If not set, a default client will be created. -pub struct SignetNodeBuilder { +pub struct SignetNodeBuilder { config: SignetNodeConfig, alias_oracle: Option, ctx: Option, - factory: Option, + storage: Option, client: Option, } -impl core::fmt::Debug for SignetNodeBuilder { +impl core::fmt::Debug for SignetNodeBuilder { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("SignetNodeBuilder").finish_non_exhaustive() } @@ -56,70 +49,21 @@ impl core::fmt::Debug for SignetNodeBuilder { impl SignetNodeBuilder { /// Create a new SignetNodeBuilder instance. pub const fn new(config: SignetNodeConfig) -> Self { - Self { config, alias_oracle: None, ctx: None, factory: None, client: None } + Self { config, alias_oracle: None, ctx: None, storage: None, client: None } } } -impl SignetNodeBuilder { - /// Set the DB for the signet node. - pub fn with_db( +impl SignetNodeBuilder { + /// Set the [`UnifiedStorage`] backend for the signet node. + pub fn with_storage( self, - db: NewDb, - ) -> eyre::Result>, Aof>> { - let factory = ProviderFactory::new( - db, - self.config.chain_spec().clone(), - self.config.static_file_rw()?, - self.config.open_rocks_db()?, - )?; - - Ok(SignetNodeBuilder { - config: self.config, - alias_oracle: self.alias_oracle, - ctx: self.ctx, - factory: Some(factory), - client: self.client, - }) - } - - /// Set the DB for the signet node from config, opening the mdbx database. - pub fn with_config_db( - self, - ) -> eyre::Result< - SignetNodeBuilder>>, Aof>, - > { - let factory = ProviderFactory::new_with_database_path( - self.config.database_path(), - self.config.chain_spec().clone(), - reth_db::mdbx::DatabaseArguments::default(), - self.config.static_file_rw()?, - self.config.open_rocks_db()?, - )?; - Ok(SignetNodeBuilder { - config: self.config, - alias_oracle: self.alias_oracle, - ctx: self.ctx, - factory: Some(factory), - client: self.client, - }) - } - - /// Set the provider factory for the signet node. - /// - /// This is an alternative to [`Self::with_db`] and - /// [`Self::with_config_db`]. - pub fn with_factory( - self, - factory: ProviderFactory>, - ) -> SignetNodeBuilder>, Aof> - where - NewDb: NodeTypesDbTrait, - { + storage: Arc>, + ) -> SignetNodeBuilder>, Aof> { SignetNodeBuilder { config: self.config, alias_oracle: self.alias_oracle, ctx: self.ctx, - factory: Some(factory), + storage: Some(storage), client: self.client, } } @@ -128,7 +72,7 @@ impl SignetNodeBuilder { pub fn with_ctx( self, ctx: ExExContext, - ) -> SignetNodeBuilder, Db, Aof> + ) -> SignetNodeBuilder, Storage, Aof> where NewHost: FullNodeComponents, NewHost::Types: NodeTypes, @@ -137,7 +81,7 @@ impl SignetNodeBuilder { config: self.config, alias_oracle: self.alias_oracle, ctx: Some(ctx), - factory: self.factory, + storage: self.storage, client: self.client, } } @@ -146,126 +90,64 @@ impl SignetNodeBuilder { pub fn with_alias_oracle( self, alias_oracle: NewAof, - ) -> SignetNodeBuilder { + ) -> SignetNodeBuilder { SignetNodeBuilder { config: self.config, alias_oracle: Some(alias_oracle), ctx: self.ctx, - factory: self.factory, + storage: self.storage, client: self.client, } } /// Set the reqwest client for the signet node. - pub fn with_client(mut self, client: reqwest::Client) -> SignetNodeBuilder { + pub fn with_client(mut self, client: reqwest::Client) -> Self { self.client = Some(client); self } } -impl SignetNodeBuilder, ProviderFactory>, Aof> +impl SignetNodeBuilder, Arc>, Aof> where Host: FullNodeComponents, Host::Types: NodeTypes, - Db: NodeTypesDbTrait, + H: HotKv, { /// Prebuild checks for the signet node builder. Shared by all build /// commands. fn prebuild(&mut self) -> eyre::Result<()> { self.client.get_or_insert_default(); self.ctx.as_ref().ok_or_eyre("Launch context must be set")?; - let factory = self.factory.as_ref().ok_or_eyre("Provider factory must be set")?; - - // This check appears redundant with the same check made in - // `init_genesis`, but is not. We init the genesis DB state but then we - // drop some of it, and reuse those tables for our own nefarious - // purposes. If we attempt to drop those tables AFTER we have reused - // them, we will get a key deser error (as the tables will contain keys - // the old schema does not permit). This check ensures we only attempt - // to drop the tables once. - if matches!( - factory.block_hash(0), - Ok(None) - | Err(reth::providers::ProviderError::MissingStaticFileBlock( - reth::primitives::StaticFileSegment::Headers, - 0 - )) - ) { - init::init_genesis(factory)?; - - factory.provider_rw()?.update( - |writer: &mut reth::providers::DatabaseProviderRW>| { - writer.tx_mut().clear::()?; - writer.tx_mut().clear::()?; - writer.tx_mut().clear::()?; - - Ok(()) - }, - )?; + let storage = self.storage.as_ref().ok_or_eyre("Storage must be set")?; + + // Check if genesis is loaded + let reader = storage.reader()?; + let has_genesis = HistoryRead::has_block(&reader, 0)?; + drop(reader); + + if !has_genesis { + let genesis = self.config.genesis(); + let writer = storage.hot().writer()?; + writer.load_genesis(genesis, &EthereumHardfork::Paris)?; + writer.commit()?; + info!("loaded genesis into hot storage"); } Ok(()) } } -impl SignetNodeBuilder, NotADb, NotAnAof> -where - Host: FullNodeComponents, - Host::Types: NodeTypes, -{ - /// Build the node. This performs the following steps: - /// - /// - Runs prebuild checks. - /// - Inits the rollup DB from genesis if needed. - /// - Creates a default `AliasOracleFactory` from the host DB. - /// - /// # Panics - /// - /// If called outside a tokio runtime. - pub fn build( - self, - ) -> eyre::Result<( - SignetNode, Box>, - tokio::sync::watch::Receiver, - )> { - self.with_config_db()?.build() - } -} - -impl SignetNodeBuilder, NotADb, Aof> -where - Host: FullNodeComponents, - Host::Types: NodeTypes, - Aof: AliasOracleFactory, -{ - /// Build the node. This performs the following steps: - /// - /// - Runs prebuild checks. - /// - Inits the rollup DB from genesis if needed. - /// - /// # Panics - /// - /// If called outside a tokio runtime. - pub fn build( - self, - ) -> eyre::Result<( - SignetNode, Aof>, - tokio::sync::watch::Receiver, - )> { - self.with_config_db()?.build() - } -} - -impl SignetNodeBuilder, ProviderFactory>, NotAnAof> +impl SignetNodeBuilder, Arc>, NotAnAof> where Host: FullNodeComponents, Host::Types: NodeTypes, - Db: NodeTypesDbTrait, + H: HotKv + Clone + Send + Sync + 'static, + ::Error: DBErrorMarker, { /// Build the node. This performs the following steps: /// /// - Runs prebuild checks. - /// - Inits the rollup DB from genesis if needed. + /// - Inits storage from genesis if needed. /// - Creates a default `AliasOracleFactory` from the host DB. /// /// # Panics @@ -273,9 +155,8 @@ where /// If called outside a tokio runtime. pub fn build( mut self, - ) -> eyre::Result<(SignetNode, tokio::sync::watch::Receiver)> { + ) -> eyre::Result<(SignetNode, tokio::sync::watch::Receiver)> { self.prebuild()?; - // This allows the node to look up contract status. let ctx = self.ctx.unwrap(); let provider = ctx.provider().clone(); let alias_oracle: Box = Box::new(provider); @@ -283,36 +164,37 @@ where SignetNode::new_unsafe( ctx, self.config, - self.factory.unwrap(), + self.storage.unwrap(), alias_oracle, self.client.unwrap(), ) } } -impl SignetNodeBuilder, ProviderFactory>, Aof> +impl SignetNodeBuilder, Arc>, Aof> where Host: FullNodeComponents, Host::Types: NodeTypes, - Db: NodeTypesDbTrait, + H: HotKv + Clone + Send + Sync + 'static, + ::Error: DBErrorMarker, Aof: AliasOracleFactory, { /// Build the node. This performs the following steps: /// /// - Runs prebuild checks. - /// - Inits the rollup DB from genesis if needed. + /// - Inits storage from genesis if needed. /// /// # Panics /// /// If called outside a tokio runtime. pub fn build( mut self, - ) -> eyre::Result<(SignetNode, tokio::sync::watch::Receiver)> { + ) -> eyre::Result<(SignetNode, tokio::sync::watch::Receiver)> { self.prebuild()?; SignetNode::new_unsafe( self.ctx.unwrap(), self.config, - self.factory.unwrap(), + self.storage.unwrap(), self.alias_oracle.unwrap(), self.client.unwrap(), ) diff --git a/crates/node/src/metrics.rs b/crates/node/src/metrics.rs index 7623d73..d775464 100644 --- a/crates/node/src/metrics.rs +++ b/crates/node/src/metrics.rs @@ -7,6 +7,7 @@ //! - Number of reorgs processed use metrics::{Counter, counter, describe_counter}; +use reth::primitives::NodePrimitives; use reth_exex::ExExNotification; use std::sync::LazyLock; @@ -29,13 +30,22 @@ static DESCRIBE: LazyLock<()> = LazyLock::new(|| { describe_counter!(REORGS_PROCESSED, REORGS_PROCESSED_HELP); }); -fn reorgs_processed() -> Counter { +fn notifications_received() -> Counter { LazyLock::force(&DESCRIBE); - counter!(REORGS_PROCESSED) + counter!(NOTIFICATION_RECEIVED) } -fn inc_reorgs_processed() { - reorgs_processed().increment(1); +fn inc_notifications_received() { + notifications_received().increment(1); +} + +fn reorgs_received() -> Counter { + LazyLock::force(&DESCRIBE); + counter!(REORGS_RECEIVED) +} + +fn inc_reorgs_received() { + reorgs_received().increment(1); } fn notifications_processed() -> Counter { @@ -47,14 +57,23 @@ fn inc_notifications_processed() { notifications_processed().increment(1); } -pub(crate) fn record_notification_received(notification: &ExExNotification) { - inc_notifications_processed(); +fn reorgs_processed() -> Counter { + LazyLock::force(&DESCRIBE); + counter!(REORGS_PROCESSED) +} + +fn inc_reorgs_processed() { + reorgs_processed().increment(1); +} + +pub(crate) fn record_notification_received(notification: &ExExNotification) { + inc_notifications_received(); if notification.reverted_chain().is_some() { - inc_reorgs_processed(); + inc_reorgs_received(); } } -pub(crate) fn record_notification_processed(notification: &ExExNotification) { +pub(crate) fn record_notification_processed(notification: &ExExNotification) { inc_notifications_processed(); if notification.reverted_chain().is_some() { inc_reorgs_processed(); diff --git a/crates/node/src/node.rs b/crates/node/src/node.rs index 7d3d43a..5e5b7b9 100644 --- a/crates/node/src/node.rs +++ b/crates/node/src/node.rs @@ -1,44 +1,37 @@ use crate::{NodeStatus, metrics, serve::RpcServerGuard}; -use alloy::{ - consensus::BlockHeader, - eips::NumHash, - primitives::{B256, BlockNumber}, -}; +use alloy::consensus::BlockHeader; use eyre::Context; use futures_util::StreamExt; use reth::{ + chainspec::EthChainSpec, primitives::EthPrimitives, - providers::{ - BlockIdReader, BlockNumReader, BlockReader, CanonChainTracker, CanonStateNotification, - CanonStateNotifications, CanonStateSubscriptions, HeaderProvider, NodePrimitivesProvider, - ProviderFactory, StateProviderFactory, providers::BlockchainProvider, - }, - rpc::types::engine::ForkchoiceState, + providers::{BlockIdReader, BlockReader, HeaderProvider, StateProviderFactory}, }; -use reth_chainspec::EthChainSpec; use reth_exex::{ExExContext, ExExEvent, ExExHead, ExExNotificationsStream}; use reth_node_api::{FullNodeComponents, FullNodeTypes, NodeTypes}; -use signet_blobber::BlobFetcher; +use signet_blobber::ExtractableChainShim; use signet_block_processor::{AliasOracleFactory, SignetBlockProcessorV1}; -use signet_db::{DbProviderExt, ProviderConsistencyExt, RuChain, RuWriter}; +use signet_extract::Extractor; use signet_node_config::SignetNodeConfig; -use signet_node_types::{NodeTypesDbTrait, SignetNodeTypes}; +use signet_rpc::{BlockTags, NewBlockNotification}; +use signet_storage::{HistoryRead, HotKv, HotKvRead, UnifiedStorage}; use signet_types::{PairedHeights, constants::SignetSystemConstants}; -use std::{fmt, mem::MaybeUninit, sync::Arc}; -use tokio::sync::watch; +use std::{fmt, sync::Arc}; +use tokio::sync::{broadcast, watch}; use tracing::{debug, info, instrument}; +use trevm::revm::database::DBErrorMarker; -/// Make it easier to write some args +/// Type alias for the host primitives. type PrimitivesOf = <::Types as NodeTypes>::Primitives; type ExExNotification = reth_exex::ExExNotification>; type Chain = reth::providers::Chain>; /// Signet context and configuration. -pub struct SignetNode> +pub struct SignetNode> where Host: FullNodeComponents, Host::Types: NodeTypes, - Db: NodeTypesDbTrait, + H: HotKv, { /// The host context, which manages provider access and notifications. pub(crate) host: ExExContext, @@ -46,12 +39,16 @@ where /// Signet node configuration. pub(crate) config: Arc, - /// A [`ProviderFactory`] instance to allow RU database access. - pub(crate) ru_provider: ProviderFactory>, + /// Unified hot + cold storage backend. + pub(crate) storage: Arc>, + + /// Atomic block tag tracking (latest/safe/finalized). + /// Shared with the RPC context via `Clone` (backed by `Arc`). + pub(crate) tags: BlockTags, - /// A [`BlockchainProvider`] instance. Used to notify the RPC server of - /// changes to the canonical/safe/finalized head. - pub(crate) bp: BlockchainProvider>, + /// Notification sender for new blocks. + /// Shared with the RPC context's `SubscriptionManager` via `Clone`. + pub(crate) notif_tx: broadcast::Sender, /// The join handle for the RPC server. None if the RPC server is not /// yet running. @@ -60,61 +57,41 @@ where /// Chain configuration constants. pub(crate) constants: SignetSystemConstants, - /// Status channel, currently used only for testing + /// Status channel, currently used only for testing. pub(crate) status: watch::Sender, - /// The block processor - pub(crate) processor: SignetBlockProcessorV1, + /// The block processor. + pub(crate) processor: SignetBlockProcessorV1, /// A reqwest client, used by the blob fetch and the tx cache forwarder. pub(crate) client: reqwest::Client, } -impl fmt::Debug for SignetNode +impl fmt::Debug for SignetNode where Host: FullNodeComponents, Host::Types: NodeTypes, - Db: NodeTypesDbTrait, + H: HotKv, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("SignetNode").field("config", &self.config).finish_non_exhaustive() } } -impl NodePrimitivesProvider for SignetNode +impl SignetNode where Host: FullNodeComponents, Host::Types: NodeTypes, - Db: NodeTypesDbTrait, -{ - type Primitives = EthPrimitives; -} - -impl CanonStateSubscriptions for SignetNode -where - Host: FullNodeComponents, - Host::Types: NodeTypes, - Db: NodeTypesDbTrait, - AliasOracle: AliasOracleFactory, -{ - fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { - self.bp.subscribe_to_canonical_state() - } -} - -impl SignetNode -where - Host: FullNodeComponents, - Host::Types: NodeTypes, - Db: NodeTypesDbTrait, + H: HotKv + Clone + Send + Sync + 'static, + ::Error: DBErrorMarker, AliasOracle: AliasOracleFactory, { /// Create a new Signet instance. It is strongly recommend that you use the /// [`SignetNodeBuilder`] instead of this function. /// /// This function does NOT initialize the genesis state. As such it is NOT - /// safe to use directly. The genesis state in the `factory` MUST be - /// initialized BEFORE calling this function. + /// safe to use directly. The genesis state in storage MUST be initialized + /// BEFORE calling this function. /// /// # Panics /// @@ -125,18 +102,18 @@ where pub fn new_unsafe( ctx: ExExContext, config: SignetNodeConfig, - factory: ProviderFactory>, + storage: Arc>, alias_oracle: AliasOracle, client: reqwest::Client, - ) -> eyre::Result<(Self, tokio::sync::watch::Receiver)> { + ) -> eyre::Result<(Self, watch::Receiver)> { let constants = config.constants().wrap_err("failed to load signet constants from genesis")?; - let bp: BlockchainProvider> = BlockchainProvider::new(factory.clone())?; + let (status, receiver) = watch::channel(NodeStatus::Booting); + let tags = BlockTags::new(0, 0, 0); + let (notif_tx, _) = broadcast::channel(128); - let (status, receiver) = tokio::sync::watch::channel(NodeStatus::Booting); - - let blob_cacher = BlobFetcher::builder() + let blob_cacher = signet_blobber::BlobFetcher::builder() .with_config(config.block_extractor())? .with_pool(ctx.pool().clone()) .with_client(client.clone()) @@ -147,7 +124,7 @@ where let processor = SignetBlockProcessorV1::new( constants.clone(), config.chain_spec().clone(), - factory.clone(), + storage.hot().clone(), alias_oracle, config.slot_calculator(), blob_cacher, @@ -156,15 +133,13 @@ where let this = Self { config: config.into(), host: ctx, - ru_provider: factory.clone(), - bp, - + storage, + tags, + notif_tx, rpc_handle: None, constants, status, - processor, - client, }; Ok((this, receiver)) @@ -174,8 +149,8 @@ where /// errors. #[instrument(skip(self), fields(host = ?self.host.config.chain.chain()))] pub async fn start(mut self) -> eyre::Result<()> { - if let Some(height) = self.ru_provider.ru_check_consistency()? { - self.unwind_to(height).wrap_err("failed to unwind RU database to consistent state")?; + if let Some(lag_start) = self.storage.cold_lag().await? { + info!(%lag_start, "cold storage behind hot, will catch up asynchronously"); } // This exists only to bypass the `tracing::instrument(err)` macro to @@ -185,7 +160,11 @@ where // includes cause reporting. let err = format!("{err:#}"); - let last_block = self.ru_provider.last_block_number().ok(); + let last_block = self + .storage + .reader() + .ok() + .and_then(|r| HistoryRead::last_block_number(&r).ok().flatten()); let exex_head = last_block.and_then(|h| self.set_exex_head(h).ok()); tracing::error!(err, last_block, ?exex_head, "Signet node crashed"); @@ -198,8 +177,10 @@ where self.start_rpc().await?; - // Determine the last block written to the database for backfill - let last_rollup_block: u64 = self.ru_provider.last_block_number()?; + // Determine the last block written to storage for backfill + let reader = self.storage.reader()?; + let last_rollup_block = HistoryRead::last_block_number(&reader)?.unwrap_or(0); + drop(reader); info!(last_rollup_block, "resuming execution from last rollup block found"); @@ -227,9 +208,11 @@ where Ok(()) } - /// Sets the head of the Exex chain from the last rollup block, handling genesis conditions if necessary. + /// Sets the head of the Exex chain from the last rollup block, handling + /// genesis conditions if necessary. fn set_exex_head(&mut self, last_rollup_block: u64) -> eyre::Result { - // If the last rollup block is 0, we can shortcut and just set the head to the host rollup deployment block. + // If the last rollup block is 0, shortcut to the host rollup + // deployment block. if last_rollup_block == 0 { let host_deployment_block = self.host.provider().block_by_number(self.constants.host_deploy_height())?; @@ -245,22 +228,19 @@ where host_ru_deploy_block, "Host deploy height not found. Falling back to genesis block" ); - let genesis_block = self.host.provider().block_by_number(0)?; - match genesis_block { - Some(genesis_block) => { - let exex_head = ExExHead { block: genesis_block.num_hash_slow() }; - self.host.notifications.set_with_head(exex_head); - return Ok(exex_head); - } - None => panic!("failed to find genesis block"), - } + let genesis_block = self + .host + .provider() + .block_by_number(0)? + .expect("failed to find genesis block"); + let exex_head = ExExHead { block: genesis_block.num_hash_slow() }; + self.host.notifications.set_with_head(exex_head); + return Ok(exex_head); } } } - // If the last rollup block is not 0, we need to find the corresponding host block. - // We do this by looking up the host block number for the rollup block number, and then - // looking up the host block for that number. + // Find the corresponding host block for the rollup block number. let host_height = self.constants.pair_ru(last_rollup_block).host; match self.host.provider().block_by_number(host_height)? { @@ -272,15 +252,11 @@ where } None => { debug!(host_height, "no host block found for host height"); - let genesis_block = self.host.provider().block_by_number(0)?; - match genesis_block { - Some(genesis_block) => { - let exex_head = ExExHead { block: genesis_block.num_hash_slow() }; - self.host.notifications.set_with_head(exex_head); - Ok(exex_head) - } - None => panic!("failed to find genesis block"), - } + let genesis_block = + self.host.provider().block_by_number(0)?.expect("failed to find genesis block"); + let exex_head = ExExHead { block: genesis_block.num_hash_slow() }; + self.host.notifications.set_with_head(exex_head); + Ok(exex_head) } } } @@ -293,355 +269,194 @@ where pub async fn on_notification(&self, notification: ExExNotification) -> eyre::Result<()> { metrics::record_notification_received(¬ification); + let mut changed = false; + // NB: REVERTS MUST RUN FIRST - let mut reverted = None; if let Some(chain) = notification.reverted_chain() { - reverted = self.on_host_revert(&chain).wrap_err("error encountered during revert")?; + self.on_host_revert(&chain).wrap_err("error encountered during revert")?; + changed = true; } - let mut committed = None; if let Some(chain) = notification.committed_chain() { - committed = self - .processor - .on_host_commit::(&chain) + self.process_committed_chain(&chain) .await .wrap_err("error encountered during commit")?; + changed = true; } - if committed.is_some() || reverted.is_some() { - // Update the status channel and canon heights, etc. - self.update_status(committed, reverted)?; + if changed { + self.update_status()?; } metrics::record_notification_processed(¬ification); Ok(()) } - /// Update the status channel and the latest block info. This is necessary - /// to keep the RPC node in sync with the latest block information. - fn update_status( - &self, - committed: Option, - reverted: Option, - ) -> eyre::Result<()> { - let ru_height = self.ru_provider.last_block_number()?; + /// Process a committed chain by extracting and executing blocks. + async fn process_committed_chain(&self, chain: &Arc>) -> eyre::Result<()> { + let shim = ExtractableChainShim::new(chain); + let extractor = Extractor::new(self.constants.clone()); + let extracts: Vec<_> = extractor.extract_signet(&shim).collect(); - // Update the RPC's block information - self.update_canon_heights(ru_height)?; + let reader = self.storage.reader()?; + let last_height = HistoryRead::last_block_number(&reader)?.unwrap_or(0); + drop(reader); - // We'll also emit the new chains as notifications on our canonstate - // notification channel, provided anyone is listening - self.update_canon_state(committed, reverted); + for block_extracts in extracts.iter().filter(|e| e.ru_height > last_height) { + let executed = self.processor.process_block(block_extracts).await?; + self.notify_new_block(&executed); + self.storage.append_blocks(vec![executed])?; + } + Ok(()) + } - // Update the status channel. This is used by the test-utils to watch - // notification processing, and may be removed in the future. - self.status.send_modify(|s| *s = NodeStatus::AtHeight(ru_height)); + /// Send a new block notification on the broadcast channel. + fn notify_new_block(&self, block: &signet_storage::ExecutedBlock) { + let notif = NewBlockNotification { + header: block.header.inner().clone(), + transactions: block.transactions.iter().map(|tx| tx.inner().clone()).collect(), + receipts: block.receipts.clone(), + }; + // Ignore send errors — no subscribers is fine. + let _ = self.notif_tx.send(notif); + } + + /// Update the status channel and block tags. This keeps the RPC node + /// in sync with the latest block information. + fn update_status(&self) -> eyre::Result<()> { + let reader = self.storage.reader()?; + let ru_height = HistoryRead::last_block_number(&reader)?.unwrap_or(0); + drop(reader); + self.update_block_tags(ru_height)?; + self.status.send_modify(|s| *s = NodeStatus::AtHeight(ru_height)); Ok(()) } - /// Update the canonical heights of the chain. This does two main things - /// - Update the RPC server's view of the forkchoice rule, setting the - /// tip and block labels - /// - Update the reth node that the ExEx has finished processing blocks up - /// to the finalized block. - /// - /// This is used by the RPC to resolve block tags including "latest", - /// "safe", and "finalized", as well as the number returned by - /// `eth_blockNumber`. - fn update_canon_heights(&self, ru_height: u64) -> eyre::Result<()> { - // Set the canonical head ("latest" label) - let latest_ru_block_header = self - .ru_provider - .sealed_header(ru_height)? - .expect("ru db inconsistent. no header for height"); - let latest_ru_block_hash = latest_ru_block_header.hash(); - self.bp.set_canonical_head(latest_ru_block_header); - - // This is our fallback safe and finalized, in case the host chain - // hasn't finalized more recent blocks - let genesis_ru_hash = self - .ru_provider - .sealed_header(0)? - .expect("ru db inconsistent. no header for height") - .hash(); - - // Load the safe block hash for both the host and the rollup. - // The safe block height of the rollup CANNOT be higher than the latest ru height, - // as we've already processed all the blocks up to the latest ru height. - let PairedHeights { host: _, rollup: safe_ru_height } = - self.load_safe_block_heights(ru_height)?; - let safe_ru_block_header = self - .ru_provider - .sealed_header(safe_ru_height)? - .expect("ru db inconsistent. no header for height"); - let safe_ru_block_hash = safe_ru_block_header.hash(); - + /// Update block tags (latest/safe/finalized) and notify reth of processed + /// height. + fn update_block_tags(&self, ru_height: u64) -> eyre::Result<()> { + // Safe height + let safe_heights = self.load_safe_block_heights(ru_height)?; + let safe_ru_height = safe_heights.rollup; debug!(safe_ru_height, "calculated safe ru height"); - // Update the safe rollup block hash iff it's not the genesis rollup block. - if safe_ru_block_hash != genesis_ru_hash { - self.bp.set_safe(safe_ru_block_header); - } - - // Load the finalized rollup block hash. - // The finalized rollup block height CANNOT be higher than the latest ru height, - // as we've already processed all the blocks up to the latest ru height. + // Finalized height let finalized_heights = self.load_finalized_block_heights(ru_height)?; - debug!( finalized_host_height = finalized_heights.host, finalized_ru_height = finalized_heights.rollup, "calculated finalized heights" ); - // Load the finalized RU block hash. It's the genesis hash if the host - // and rollup finalized heights are both 0. Otherwise, we load the finalized - // RU header and set the finalized block hash. - let finalized_ru_block_hash = - self.set_finalized_ru_block_hash(finalized_heights, genesis_ru_hash)?; - - // NB: - // We also need to notify the reth node that we are totally - // finished processing the host block before the finalized block now. - // We want to keep the finalized host block in case we reorg to the block - // immediately on top of it, and we need some state from the parent. - // - // If this errors, it means that the reth node has shut down and we - // should stop processing blocks. - // - // To do this, we grab the finalized host header to get its height and hash, - // so we can send the corresponding [`ExExEvent`]. - if finalized_ru_block_hash != genesis_ru_hash { + // Atomically update all three tags + self.tags.update_all(ru_height, safe_ru_height, finalized_heights.rollup); + + // Notify reth that we've finished processing up to the finalized + // height. Skip if finalized is still at genesis. + if finalized_heights.host != 0 || finalized_heights.rollup != 0 { self.update_highest_processed_height(finalized_heights.host)?; } - // Update the RPC's forkchoice timestamp. - self.bp.on_forkchoice_update_received(&ForkchoiceState { - head_block_hash: latest_ru_block_hash, - safe_block_hash: safe_ru_block_hash, - finalized_block_hash: finalized_ru_block_hash, - }); debug!( - %latest_ru_block_hash, %safe_ru_block_hash, %finalized_ru_block_hash, - "updated RPC block producer" + latest = ru_height, + safe = safe_ru_height, + finalized = finalized_heights.rollup, + "updated block tags" ); Ok(()) } - /// Update the ExEx head to the finalized host block. - /// - /// If this errors, it means that the reth node has shut down and we - /// should stop processing blocks. - fn update_exex_head( - &self, - finalized_host_height: u64, - finalized_host_hash: B256, - ) -> eyre::Result<()> { - debug!(finalized_host_height, "Sending FinishedHeight notification"); - self.host.events.send(ExExEvent::FinishedHeight(NumHash { - number: finalized_host_height, - hash: finalized_host_hash, - }))?; - Ok(()) - } - - /// Send a canon state notification via the channel. - fn update_canon_state(&self, committed: Option, reverted: Option) { - let commit_count = committed.as_ref().map(|c| c.len()).unwrap_or_default(); - let revert_count = reverted.as_ref().map(|r| r.len()).unwrap_or_default(); - - let notif = match (committed, reverted) { - (None, None) => None, - (None, Some(r)) => Some(CanonStateNotification::Reorg { - old: Arc::new(r.inner), - new: Arc::new(Default::default()), - }), - (Some(c), None) => Some(CanonStateNotification::Commit { new: Arc::new(c.inner) }), - (Some(c), Some(r)) => Some(CanonStateNotification::Reorg { - old: Arc::new(r.inner), - new: Arc::new(c.inner), - }), - }; - if let Some(notif) = notif { - tracing::debug!(commit_count, revert_count, "sending canon state notification"); - // we don't care if it fails, we just want to send it - self.bp.canonical_in_memory_state().notify_canon_state(notif); - } - } - /// Load the host chain "safe" block number and determine the rollup "safe" - /// block number. There are three cases: + /// block number. /// + /// There are three cases: /// 1. The host chain "safe" block number is below the rollup genesis. - /// In this case, we'll use the host genesis block number as the "safe" - /// block number. This can happen if the rollup starts syncing while the - /// host still hasn't seen the rollup genesis block. - /// 2. The host safe "block", when converted to the equivalent rollup block, - /// is beyond the current rollup block. In this case, we'll use the current - /// rollup block as safe block. This can happen if the host chain is - /// synced beyond the current rollup block, but the rollup is still syncing - /// and catching up with the host head and therefore hasn't seen the host - /// safe block. - /// 3. The host safe block number is below the current rollup block. In this - /// case, we can use the safe host block number, converted to its rollup - /// equivalent, as the safe rollup block number. This is the expected case - /// when the rollup and host are both caught up and in live sync. + /// 2. The safe rollup equivalent is beyond the current rollup height. + /// 3. The safe rollup equivalent is below the current rollup height (normal + /// case). fn load_safe_block_heights(&self, ru_height: u64) -> eyre::Result { - // Load the host safe block number let safe_host_height = self.host.provider().safe_block_number()?; - // Convert the host safe block number to the rollup safe block number. - // If the host safe block number is below the rollup genesis, - // this will return None. let safe_heights = safe_host_height .and_then(|safe_host_height| self.constants.pair_host(safe_host_height)); - // If we successfully converted the host safe block number to the rollup safe block number, - // then we'll compare it to the current rollup block height and use the smaller of the two. - if let Some(safe_heights) = safe_heights { - // We compare the safe ru height to the current ru height. If the safe ru height is - // beyond the current ru height, we're in case 2. + safe_heights.map_or(Ok(PairedHeights { host: 0, rollup: 0 }), |safe_heights| { if safe_heights.rollup > ru_height { - // We are in case 2. Ok(PairedHeights { host: self.constants.rollup_block_to_host_block_num(ru_height), rollup: ru_height, }) } else { - // If the safe ru height is below the current ru height, we're in case 3. Ok(safe_heights) } - } else { - // If the host safe block number is below the rollup genesis, - // we'll use the host genesis block number as the "safe" block number. - Ok(PairedHeights { host: 0, rollup: 0 }) - } - } - - /// Set the finalized RU block hash. - /// - /// Depending on the current rollup sync status, there are two cases: - /// 1. If we're syncing from scratch, we'll set the finalized RU block hash to the genesis hash. - /// 2. If we're syncing, or following the tip, we'll set the finalized RU block hash to the current RU block hash. - fn set_finalized_ru_block_hash( - &self, - finalized_heights: PairedHeights, - genesis_hash: B256, - ) -> eyre::Result { - // If both heights are 0, return genesis hash - if finalized_heights.host == 0 && finalized_heights.rollup == 0 { - return Ok(genesis_hash); - } - - // Load and set finalized RU header - let finalized_ru_header = self - .ru_provider - .sealed_header(finalized_heights.rollup)? - .expect("ru db inconsistent. no header for height"); - let finalized_ru_block_hash = finalized_ru_header.hash(); - self.bp.set_finalized(finalized_ru_header); - - Ok(finalized_ru_block_hash) - } - - /// Update the host node with the highest processed host height for the exex. - fn update_highest_processed_height(&self, finalized_host_height: u64) -> eyre::Result<()> { - let finalized_host_header = self - .host - .provider() - .sealed_header(finalized_host_height)? - .expect("db inconsistent. no host header for finalized height"); - - let adjusted_height = finalized_host_header.number.saturating_sub(1); - let hash = finalized_host_header.hash(); - - debug!(finalized_host_height = adjusted_height, "Sending FinishedHeight notification"); - self.update_exex_head(adjusted_height, hash) + }) } /// Load the host chain "finalized" block number and determine the rollup - /// "finalized" block number. If the host chain "finalized" block number is below the - /// rollup genesis, we'll use the genesis hash as the "finalized" block. - /// If the host chain "finalized" block number is beyond the current rollup block, - /// we'll use the current rollup block and its host equivalent as the "finalized" blocks. - /// - /// This returns a tuple of the host and rollup "finalized" block numbers. + /// "finalized" block number. /// /// There are three cases: - /// 1. The host chain "finalized" block number is below the rollup genesis (and therefore the current rollup block). - /// In this case, we'll use the host genesis block number as the "finalized" block number, with the rollup syncing from scratch. - /// This can happen if the rollup starts syncing while the host still hasn't seen the rollup genesis block. - /// 2. The host chain "finalized" block number is beyond the current rollup block. - /// In this case, we'll use the current rollup block number as the "finalized" block number. - /// This can happen if the host chain is synced beyond the current rollup block, but the rollup is still syncing - /// and catching up with the host head and therefore hasn't seen the host finalized block. - /// 3. The host chain "finalized" block number is below the current rollup block. - /// In this case, we'll use the host chain "finalized" block number, converted to its rollup equivalent, as the "finalized" block number. - /// This is the expected case when the rollup and host are both caught up and in live sync. + /// 1. The host chain "finalized" block is below the rollup genesis. + /// 2. The finalized rollup equivalent is beyond the current rollup height. + /// 3. The finalized rollup equivalent is below the current rollup height + /// (normal case). fn load_finalized_block_heights(&self, ru_height: u64) -> eyre::Result { - // Load the host chain "finalized" block number let finalized_host_block_number = self.host.provider().finalized_block_number()?; - // Convert the host chain "finalized" block number to the rollup "finalized" block number. - // If the host chain "finalized" block number is below the rollup genesis, - // this will return None. let finalized_ru_block_number = finalized_host_block_number.and_then(|finalized_host_block_number| { self.constants.host_block_to_rollup_block_num(finalized_host_block_number) }); - // If we successfully converted the host chain "finalized" block number to the rollup "finalized" block number, - // then we'll figure out which case we're in and return the appropriate heights. - if let Some(finalized_ru_block_number) = finalized_ru_block_number { - // We compare the finalized ru height to the current ru height. If the finalized ru height is - // beyond the current ru height, we're in case 2 (rollup is behind host). - if finalized_ru_block_number > ru_height { - Ok(self.constants.pair_ru(ru_height)) - } else { - // If the finalized ru height is below the current ru height, we're in case 3 (rollup is near or in sync with the host head). - Ok(self.constants.pair_ru(finalized_ru_block_number)) - } - } else { - // If we failed to convert the host chain "finalized" block number to the rollup "finalized" block number, - // then this means the host chain "finalized" block number is below the rollup genesis (and therefore the current rollup block). - // We'll use the genesis block number as the "finalized" block number. - Ok(PairedHeights { host: 0, rollup: 0 }) - } + finalized_ru_block_number.map_or( + Ok(PairedHeights { host: 0, rollup: 0 }), + |finalized_ru_block_number| { + if finalized_ru_block_number > ru_height { + Ok(self.constants.pair_ru(ru_height)) + } else { + Ok(self.constants.pair_ru(finalized_ru_block_number)) + } + }, + ) } - /// Unwind the RU chain DB to the target block number. - fn unwind_to(&self, target: BlockNumber) -> eyre::Result { - let mut reverted = MaybeUninit::uninit(); - self.ru_provider - .provider_rw()? - .update(|writer| { - reverted.write(writer.ru_take_blocks_and_execution_above(target)?); - Ok(()) - }) - // SAFETY: if the closure above returns Ok, reverted is initialized. - .map(|_| unsafe { reverted.assume_init() }) - .map_err(Into::into) + /// Update the host node with the highest processed host height for the + /// ExEx. + fn update_highest_processed_height(&self, finalized_host_height: u64) -> eyre::Result<()> { + let adjusted_height = finalized_host_height.saturating_sub(1); + let adjusted_header = self + .host + .provider() + .sealed_header(adjusted_height)? + .expect("db inconsistent. no host header for adjusted height"); + + let hash = adjusted_header.hash(); + + debug!(finalized_host_height = adjusted_height, "Sending FinishedHeight notification"); + self.host.events.send(ExExEvent::FinishedHeight(alloy::eips::NumHash { + number: adjusted_height, + hash, + }))?; + Ok(()) } /// Called when the host chain has reverted a block or set of blocks. #[instrument(skip_all, fields(first = chain.first().number(), tip = chain.tip().number()))] - pub fn on_host_revert(&self, chain: &Arc>) -> eyre::Result> { - // If the end is before the RU genesis, we don't need to do anything at - // all. + pub fn on_host_revert(&self, chain: &Arc>) -> eyre::Result<()> { + // If the end is before the RU genesis, nothing to do. if chain.tip().number() <= self.constants.host_deploy_height() { - return Ok(None); + return Ok(()); } - // The target is - // - the block BEFORE the first block in the chain - // - or block 0, if the first block is before the rollup deploy height + // Target is the block BEFORE the first block in the chain, or 0. let target = self .constants .host_block_to_rollup_block_num(chain.first().number()) - .unwrap_or_default() // 0 if the block is before the deploy height - .saturating_sub(1); // still 0 if 0, otherwise the block BEFORE. + .unwrap_or_default() + .saturating_sub(1); - self.unwind_to(target).map(Some) + self.storage.unwind_above(target)?; + Ok(()) } } diff --git a/crates/node/src/rpc.rs b/crates/node/src/rpc.rs index b963685..73e5f81 100644 --- a/crates/node/src/rpc.rs +++ b/crates/node/src/rpc.rs @@ -2,19 +2,21 @@ use crate::{ SignetNode, serve::{RpcServerGuard, ServeConfig}, }; -use reth::{primitives::EthPrimitives, rpc::builder::config::RethRpcServerConfig}; +use reth::primitives::EthPrimitives; use reth_node_api::{FullNodeComponents, NodeTypes}; use signet_block_processor::AliasOracleFactory; -use signet_node_types::NodeTypesDbTrait; -use signet_rpc::RpcCtx; +use signet_rpc::{StorageRpcConfig, StorageRpcCtx}; +use signet_storage::HotKv; use signet_tx_cache::TxCache; +use std::sync::Arc; use tracing::info; -impl SignetNode +impl SignetNode where Host: FullNodeComponents, Host::Types: NodeTypes, - Db: NodeTypesDbTrait, + H: HotKv + Send + Sync + 'static, + ::Error: trevm::revm::database::DBErrorMarker, AliasOracle: AliasOracleFactory, { /// Start the RPC server. @@ -27,17 +29,17 @@ where async fn launch_rpc(&self) -> eyre::Result { let tasks = self.host.task_executor(); - let forwarder = + let tx_cache = self.config.forward_url().map(|url| TxCache::new_with_client(url, self.client.clone())); - let eth_config = self.host.config.rpc.eth_config(); - let router = signet_rpc::router().with_state(RpcCtx::new( - self.host.components.clone(), + let rpc_ctx = StorageRpcCtx::new( + Arc::clone(&self.storage), self.constants.clone(), - self.bp.clone(), - eth_config, - forwarder, - tasks.clone(), - )?); + self.tags.clone(), + tx_cache, + StorageRpcConfig::default(), + self.notif_tx.clone(), + ); + let router = signet_rpc::router::().with_state(rpc_ctx); let serve_config: ServeConfig = self.config.merge_rpc_configs(&self.host)?.into(); serve_config.serve(tasks, router).await } diff --git a/crates/node/src/serve.rs b/crates/node/src/serve.rs index bfd07da..720269a 100644 --- a/crates/node/src/serve.rs +++ b/crates/node/src/serve.rs @@ -12,7 +12,7 @@ use tower_http::cors::{AllowOrigin, Any, CorsLayer}; use tracing::error; /// Guard to shutdown the RPC servers. When dropped, this will shutdown all -/// running servers +/// running servers. #[derive(Default)] pub(crate) struct RpcServerGuard { http: Option>, @@ -77,7 +77,7 @@ impl From for ServeConfig { } impl ServeConfig { - /// Serve the router on the given addresses. + /// Serve the router via HTTP. async fn serve_http( &self, tasks: &TaskExecutor, @@ -89,7 +89,7 @@ impl ServeConfig { serve_axum(tasks, router, &self.http, self.http_cors.as_deref()).await.map(Some) } - /// Serve the router on the given addresses. + /// Serve the router via WebSocket. async fn serve_ws( &self, tasks: &TaskExecutor, @@ -156,7 +156,7 @@ async fn serve( tasks: &TaskExecutor, addrs: &[SocketAddr], service: axum::Router, -) -> Result, eyre::Error> { +) -> eyre::Result> { let listener = tokio::net::TcpListener::bind(addrs).await?; let fut = async move { @@ -214,7 +214,7 @@ async fn serve_ipc( endpoint: &str, ) -> eyre::Result { let name = std::ffi::OsStr::new(endpoint); - let name = to_name(name).expect("invalid name"); + let name = to_name(name)?; ls::ListenerOptions::new() .name(name) .serve_with_handle(router.clone(), tasks.handle().clone()) diff --git a/crates/rpc-storage/Cargo.toml b/crates/rpc-storage/Cargo.toml deleted file mode 100644 index f9982a0..0000000 --- a/crates/rpc-storage/Cargo.toml +++ /dev/null @@ -1,47 +0,0 @@ -[package] -name = "signet-rpc-storage" -version.workspace = true -edition.workspace = true -rust-version.workspace = true -authors.workspace = true -license.workspace = true -homepage.workspace = true -repository.workspace = true -description = "Ethereum JSON-RPC server backed by signet-storage" - -[dependencies] -signet-storage.workspace = true -signet-cold.workspace = true -signet-hot.workspace = true -signet-storage-types.workspace = true -signet-evm.workspace = true -trevm = { workspace = true, features = ["call", "estimate_gas"] } -signet-types.workspace = true -signet-tx-cache.workspace = true -signet-bundle.workspace = true -alloy.workspace = true -ajj.workspace = true -tokio.workspace = true -tokio-stream = "0.1" -tokio-util = "0.7" -tracing.workspace = true -thiserror.workspace = true -serde.workspace = true -dashmap = "6.1.0" -revm-inspectors.workspace = true -itertools.workspace = true - -[dev-dependencies] -tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } -tokio-util = "0.7" -signet-cold = { workspace = true, features = ["test-utils"] } -signet-hot = { workspace = true, features = ["test-utils"] } -signet-storage.workspace = true -signet-storage-types.workspace = true -signet-constants.workspace = true -alloy.workspace = true -serde_json.workspace = true -axum = "0.8" -tower = { version = "0.5", features = ["util"] } -http = "1" -trevm.workspace = true diff --git a/crates/rpc-storage/README.md b/crates/rpc-storage/README.md deleted file mode 100644 index ec3247e..0000000 --- a/crates/rpc-storage/README.md +++ /dev/null @@ -1,16 +0,0 @@ -# signet-rpc-storage - -Ethereum JSON-RPC server backed by `signet-storage`'s unified storage backend. - -This crate provides a standalone ETH RPC implementation that uses hot storage -for state queries and cold storage for block, transaction, and receipt data. -Unlike `signet-rpc`, it does not depend on reth's `FullNodeComponents`. - -## Supported Methods - -- Block queries: `eth_blockNumber`, `eth_getBlockByHash`, `eth_getBlockByNumber`, etc. -- Transaction queries: `eth_getTransactionByHash`, `eth_getTransactionReceipt`, etc. -- Account state: `eth_getBalance`, `eth_getStorageAt`, `eth_getCode`, `eth_getTransactionCount` -- EVM execution: `eth_call`, `eth_estimateGas` -- Logs: `eth_getLogs` -- Transaction submission: `eth_sendRawTransaction` (optional, via `TxCache`) diff --git a/crates/rpc-storage/src/debug/endpoints.rs b/crates/rpc-storage/src/debug/endpoints.rs deleted file mode 100644 index d72fdf0..0000000 --- a/crates/rpc-storage/src/debug/endpoints.rs +++ /dev/null @@ -1,206 +0,0 @@ -//! Debug namespace RPC endpoint implementations. - -use crate::{ - config::StorageRpcCtx, - debug::{ - DebugError, - types::{TraceBlockParams, TraceTransactionParams}, - }, - eth::helpers::{CfgFiller, await_handler, response_tri}, -}; -use ajj::{HandlerCtx, ResponsePayload}; -use alloy::{ - consensus::BlockHeader, - eips::BlockId, - rpc::types::trace::geth::{GethTrace, TraceResult}, -}; -use itertools::Itertools; -use signet_evm::EvmErrored; -use signet_hot::HotKv; -use signet_hot::model::HotKvRead; -use signet_types::MagicSig; -use tracing::Instrument; -use trevm::revm::database::DBErrorMarker; - -/// `debug_traceBlockByNumber` and `debug_traceBlockByHash` handler. -pub(super) async fn trace_block( - hctx: HandlerCtx, - TraceBlockParams(id, opts): TraceBlockParams, - ctx: StorageRpcCtx, -) -> ResponsePayload, DebugError> -where - T: Into, - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let opts = response_tri!(opts.ok_or(DebugError::InvalidTracerConfig)); - - // Acquire a tracing semaphore permit to limit concurrent debug - // requests. The permit is held for the entire handler lifetime and - // is dropped when the async block completes. - let _permit = ctx.acquire_tracing_permit().await; - - let id = id.into(); - let span = tracing::debug_span!("traceBlock", ?id, tracer = ?opts.tracer.as_ref()); - - let fut = async move { - let cold = ctx.cold(); - let block_num = response_tri!(ctx.resolve_block_id(id).map_err(|e| { - tracing::warn!(error = %e, ?id, "block resolution failed"); - DebugError::BlockNotFound(id) - })); - - let (header, txs) = response_tri!( - tokio::try_join!( - cold.get_header_by_number(block_num), - cold.get_transactions_in_block(block_num), - ) - .map_err(|e| { - tracing::warn!(error = %e, block_num, "cold storage read failed"); - DebugError::Cold(e.to_string()) - }) - ); - - let Some(header) = header else { - return ResponsePayload::internal_error_message( - format!("block not found: {id}").into(), - ); - }; - - let block_hash = header.hash(); - let header = header.into_inner(); - - tracing::debug!(number = header.number, "Loaded block"); - - let mut frames = Vec::with_capacity(txs.len()); - - // State BEFORE this block. - let db = - response_tri!(ctx.revm_state_at_height(header.number.saturating_sub(1)).map_err(|e| { - tracing::warn!(error = %e, block_num, "hot storage read failed"); - DebugError::Hot(e.to_string()) - })); - - let mut trevm = signet_evm::signet_evm(db, ctx.constants().clone()) - .fill_cfg(&CfgFiller(ctx.chain_id())) - .fill_block(&header); - - let mut txns = txs.iter().enumerate().peekable(); - for (idx, tx) in txns - .by_ref() - .peeking_take_while(|(_, t)| MagicSig::try_from_signature(t.signature()).is_none()) - { - let tx_info = alloy::rpc::types::TransactionInfo { - hash: Some(*tx.tx_hash()), - index: Some(idx as u64), - block_hash: Some(block_hash), - block_number: Some(header.number), - base_fee: header.base_fee_per_gas(), - }; - - let t = trevm.fill_tx(tx); - let frame; - (frame, trevm) = response_tri!(crate::debug::tracer::trace(t, &opts, tx_info)); - frames.push(TraceResult::Success { result: frame, tx_hash: Some(*tx.tx_hash()) }); - - tracing::debug!(tx_index = idx, tx_hash = ?tx.tx_hash(), "Traced transaction"); - } - - ResponsePayload(Ok(frames)) - } - .instrument(span); - - await_handler!(@response_option hctx.spawn(fut)) -} - -/// `debug_traceTransaction` handler. -pub(super) async fn trace_transaction( - hctx: HandlerCtx, - TraceTransactionParams(tx_hash, opts): TraceTransactionParams, - ctx: StorageRpcCtx, -) -> ResponsePayload -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let opts = response_tri!(opts.ok_or(DebugError::InvalidTracerConfig)); - - // Held for the handler duration; dropped when the async block completes. - let _permit = ctx.acquire_tracing_permit().await; - - let span = tracing::debug_span!("traceTransaction", %tx_hash, tracer = ?opts.tracer.as_ref()); - - let fut = async move { - let cold = ctx.cold(); - - // Look up the transaction and its containing block. - let confirmed = response_tri!(cold.get_tx_by_hash(tx_hash).await.map_err(|e| { - tracing::warn!(error = %e, %tx_hash, "cold storage read failed"); - DebugError::Cold(e.to_string()) - })); - - let confirmed = response_tri!(confirmed.ok_or(DebugError::TransactionNotFound)); - let (_tx, meta) = confirmed.into_parts(); - - let block_num = meta.block_number(); - let block_hash = meta.block_hash(); - - let (header, txs) = response_tri!( - tokio::try_join!( - cold.get_header_by_number(block_num), - cold.get_transactions_in_block(block_num), - ) - .map_err(|e| { - tracing::warn!(error = %e, block_num, "cold storage read failed"); - DebugError::Cold(e.to_string()) - }) - ); - - let block_id = BlockId::Number(block_num.into()); - let header = response_tri!(header.ok_or(DebugError::BlockNotFound(block_id))).into_inner(); - - tracing::debug!(number = block_num, "Loaded containing block"); - - // State BEFORE this block. - let db = - response_tri!(ctx.revm_state_at_height(block_num.saturating_sub(1)).map_err(|e| { - tracing::warn!(error = %e, block_num, "hot storage read failed"); - DebugError::Hot(e.to_string()) - })); - - let mut trevm = signet_evm::signet_evm(db, ctx.constants().clone()) - .fill_cfg(&CfgFiller(ctx.chain_id())) - .fill_block(&header); - - // Replay all transactions up to (but not including) the target - let mut txns = txs.iter().enumerate().peekable(); - for (_idx, tx) in txns.by_ref().peeking_take_while(|(_, t)| t.tx_hash() != &tx_hash) { - if MagicSig::try_from_signature(tx.signature()).is_some() { - return ResponsePayload::internal_error_message( - DebugError::TransactionNotFound.to_string().into(), - ); - } - - trevm = response_tri!(trevm.run_tx(tx).map_err(EvmErrored::into_error)).accept_state(); - } - - let (index, tx) = response_tri!(txns.next().ok_or(DebugError::TransactionNotFound)); - - let trevm = trevm.fill_tx(tx); - - let tx_info = alloy::rpc::types::TransactionInfo { - hash: Some(*tx.tx_hash()), - index: Some(index as u64), - block_hash: Some(block_hash), - block_number: Some(header.number), - base_fee: header.base_fee_per_gas(), - }; - - let res = response_tri!(crate::debug::tracer::trace(trevm, &opts, tx_info)).0; - - ResponsePayload(Ok(res)) - } - .instrument(span); - - await_handler!(@response_option hctx.spawn(fut)) -} diff --git a/crates/rpc-storage/src/debug/error.rs b/crates/rpc-storage/src/debug/error.rs deleted file mode 100644 index a078034..0000000 --- a/crates/rpc-storage/src/debug/error.rs +++ /dev/null @@ -1,50 +0,0 @@ -//! Error types for the debug namespace. - -use alloy::eips::BlockId; - -/// Errors that can occur in the `debug` namespace. -/// -/// The [`serde::Serialize`] impl emits sanitized messages suitable for -/// API responses — internal storage details are not exposed to callers. -/// Use [`tracing`] to log the full error chain before constructing the -/// variant. -#[derive(Debug, Clone, thiserror::Error)] -pub enum DebugError { - /// Cold storage error. - #[error("cold storage error")] - Cold(String), - /// Hot storage error. - #[error("hot storage error")] - Hot(String), - /// Invalid tracer configuration. - #[error("invalid tracer config")] - InvalidTracerConfig, - /// Unsupported tracer type. - #[error("unsupported: {0}")] - Unsupported(&'static str), - /// EVM execution error. - #[error("evm execution error")] - Evm(String), - /// Block not found. - #[error("block not found: {0}")] - BlockNotFound(BlockId), - /// Transaction not found. - #[error("transaction not found")] - TransactionNotFound, -} - -impl DebugError { - /// Convert to a string by value. - pub fn into_string(self) -> String { - self.to_string() - } -} - -impl serde::Serialize for DebugError { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - serializer.serialize_str(&self.to_string()) - } -} diff --git a/crates/rpc-storage/src/debug/mod.rs b/crates/rpc-storage/src/debug/mod.rs deleted file mode 100644 index 2a4da50..0000000 --- a/crates/rpc-storage/src/debug/mod.rs +++ /dev/null @@ -1,26 +0,0 @@ -//! Debug namespace RPC router backed by storage. - -mod endpoints; -use endpoints::{trace_block, trace_transaction}; -mod error; -pub use error::DebugError; -pub(crate) mod tracer; -mod types; - -use crate::config::StorageRpcCtx; -use alloy::{eips::BlockNumberOrTag, primitives::B256}; -use signet_hot::HotKv; -use signet_hot::model::HotKvRead; -use trevm::revm::database::DBErrorMarker; - -/// Instantiate a `debug` API router backed by storage. -pub(crate) fn debug() -> ajj::Router> -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - ajj::Router::new() - .route("traceBlockByNumber", trace_block::) - .route("traceBlockByHash", trace_block::) - .route("traceTransaction", trace_transaction::) -} diff --git a/crates/rpc-storage/src/debug/tracer.rs b/crates/rpc-storage/src/debug/tracer.rs deleted file mode 100644 index fa24073..0000000 --- a/crates/rpc-storage/src/debug/tracer.rs +++ /dev/null @@ -1,224 +0,0 @@ -//! Core tracing logic for the debug namespace. -//! -//! Largely adapted from reth: `crates/rpc/rpc/src/debug.rs`. - -use crate::debug::DebugError; -use alloy::rpc::types::{ - TransactionInfo, - trace::geth::{ - FourByteFrame, GethDebugBuiltInTracerType, GethDebugTracerConfig, GethDebugTracerType, - GethDebugTracingOptions, GethTrace, NoopFrame, - }, -}; -use revm_inspectors::tracing::{ - FourByteInspector, MuxInspector, TracingInspector, TracingInspectorConfig, -}; -use signet_evm::{EvmNeedsTx, EvmReady}; -use tracing::instrument; -use trevm::{ - helpers::Ctx, - revm::{Database, DatabaseCommit, DatabaseRef, Inspector, context::ContextTr}, -}; - -/// Trace a transaction using the provided EVM and tracing options. -#[instrument(skip(trevm, config, tx_info), fields(tx_hash = ?tx_info.hash))] -pub(super) fn trace( - trevm: EvmReady, - config: &GethDebugTracingOptions, - tx_info: TransactionInfo, -) -> Result<(GethTrace, EvmNeedsTx), DebugError> -where - Db: Database + DatabaseCommit + DatabaseRef, - Insp: Inspector>, -{ - let Some(tracer) = &config.tracer else { - return Err(DebugError::InvalidTracerConfig); - }; - - let GethDebugTracerType::BuiltInTracer(built_in) = tracer else { - return Err(DebugError::Unsupported("JS tracer")); - }; - - match built_in { - GethDebugBuiltInTracerType::Erc7562Tracer => { - Err(DebugError::Unsupported("ERC-7562 tracing is not yet implemented")) - } - GethDebugBuiltInTracerType::FourByteTracer => trace_four_byte(trevm), - GethDebugBuiltInTracerType::CallTracer => trace_call(&config.tracer_config, trevm), - GethDebugBuiltInTracerType::FlatCallTracer => { - trace_flat_call(&config.tracer_config, trevm, tx_info) - } - GethDebugBuiltInTracerType::PreStateTracer => trace_pre_state(&config.tracer_config, trevm), - GethDebugBuiltInTracerType::NoopTracer => Ok(( - NoopFrame::default().into(), - trevm - .run() - .map_err(|err| DebugError::Evm(err.into_error().to_string()))? - .accept_state(), - )), - GethDebugBuiltInTracerType::MuxTracer => trace_mux(&config.tracer_config, trevm, tx_info), - } -} - -fn trace_four_byte( - trevm: EvmReady, -) -> Result<(GethTrace, EvmNeedsTx), DebugError> -where - Db: Database + DatabaseCommit, - Insp: Inspector>, -{ - let mut four_byte = FourByteInspector::default(); - let trevm = trevm - .try_with_inspector(&mut four_byte, |trevm| trevm.run()) - .map_err(|e| DebugError::Evm(e.into_error().to_string()))?; - Ok((FourByteFrame::from(four_byte).into(), trevm.accept_state())) -} - -fn trace_call( - tracer_config: &GethDebugTracerConfig, - trevm: EvmReady, -) -> Result<(GethTrace, EvmNeedsTx), DebugError> -where - Db: Database + DatabaseCommit, - Insp: Inspector>, -{ - let call_config = - tracer_config.clone().into_call_config().map_err(|_| DebugError::InvalidTracerConfig)?; - - let mut inspector = - TracingInspector::new(TracingInspectorConfig::from_geth_call_config(&call_config)); - - let trevm = trevm - .try_with_inspector(&mut inspector, |trevm| trevm.run()) - .map_err(|e| DebugError::Evm(e.into_error().to_string()))?; - - let frame = inspector - .with_transaction_gas_limit(trevm.gas_limit()) - .into_geth_builder() - .geth_call_traces(call_config, trevm.gas_used()); - - Ok((frame.into(), trevm.accept_state())) -} - -fn trace_pre_state( - tracer_config: &GethDebugTracerConfig, - trevm: EvmReady, -) -> Result<(GethTrace, EvmNeedsTx), DebugError> -where - Db: Database + DatabaseCommit + DatabaseRef, - Insp: Inspector>, -{ - let prestate_config = tracer_config - .clone() - .into_pre_state_config() - .map_err(|_| DebugError::InvalidTracerConfig)?; - - let mut inspector = - TracingInspector::new(TracingInspectorConfig::from_geth_prestate_config(&prestate_config)); - - let trevm = trevm - .try_with_inspector(&mut inspector, |trevm| trevm.run()) - .map_err(|e| DebugError::Evm(e.into_error().to_string()))?; - let gas_limit = trevm.gas_limit(); - - // NB: state must be UNCOMMITTED for prestate diff computation. - let (result, mut trevm) = trevm.take_result_and_state(); - - let frame = inspector - .with_transaction_gas_limit(gas_limit) - .into_geth_builder() - .geth_prestate_traces(&result, &prestate_config, trevm.inner_mut_unchecked().db_mut()) - .map_err(|err| DebugError::Evm(err.to_string()))?; - - // Equivalent to `trevm.accept_state()`. - trevm.inner_mut_unchecked().db_mut().commit(result.state); - - Ok((frame.into(), trevm)) -} - -fn trace_flat_call( - tracer_config: &GethDebugTracerConfig, - trevm: EvmReady, - tx_info: TransactionInfo, -) -> Result<(GethTrace, EvmNeedsTx), DebugError> -where - Db: Database + DatabaseCommit, - Insp: Inspector>, -{ - let flat_call_config = tracer_config - .clone() - .into_flat_call_config() - .map_err(|_| DebugError::InvalidTracerConfig)?; - - let mut inspector = - TracingInspector::new(TracingInspectorConfig::from_flat_call_config(&flat_call_config)); - - let trevm = trevm - .try_with_inspector(&mut inspector, |trevm| trevm.run()) - .map_err(|e| DebugError::Evm(e.into_error().to_string()))?; - - let frame = inspector - .with_transaction_gas_limit(trevm.gas_limit()) - .into_parity_builder() - .into_localized_transaction_traces(tx_info); - - Ok((frame.into(), trevm.accept_state())) -} - -fn trace_mux( - tracer_config: &GethDebugTracerConfig, - trevm: EvmReady, - tx_info: TransactionInfo, -) -> Result<(GethTrace, EvmNeedsTx), DebugError> -where - Db: Database + DatabaseCommit + DatabaseRef, - Insp: Inspector>, -{ - let mux_config = - tracer_config.clone().into_mux_config().map_err(|_| DebugError::InvalidTracerConfig)?; - - let mut inspector = MuxInspector::try_from_config(mux_config) - .map_err(|err| DebugError::Evm(err.to_string()))?; - - let trevm = trevm - .try_with_inspector(&mut inspector, |trevm| trevm.run()) - .map_err(|e| DebugError::Evm(e.into_error().to_string()))?; - - // NB: state must be UNCOMMITTED for prestate diff computation. - let (result, mut trevm) = trevm.take_result_and_state(); - - let frame = inspector - .try_into_mux_frame(&result, trevm.inner_mut_unchecked().db_mut(), tx_info) - .map_err(|err| DebugError::Evm(err.to_string()))?; - - // Equivalent to `trevm.accept_state()`. - trevm.inner_mut_unchecked().db_mut().commit(result.state); - - Ok((frame.into(), trevm)) -} - -// Some code in this file has been copied and modified from reth -// -// The original license is included below: -// -// The MIT License (MIT) -// -// Copyright (c) 2022-2025 Reth Contributors -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. diff --git a/crates/rpc-storage/src/eth/endpoints.rs b/crates/rpc-storage/src/eth/endpoints.rs deleted file mode 100644 index e00f2c2..0000000 --- a/crates/rpc-storage/src/eth/endpoints.rs +++ /dev/null @@ -1,1160 +0,0 @@ -//! ETH namespace RPC endpoint implementations. - -use crate::{ - config::{EvmBlockContext, StorageRpcCtx, gas_oracle}, - eth::{ - error::{CallErrorData, EthError}, - helpers::{ - AddrWithBlock, BlockParams, CfgFiller, FeeHistoryArgs, StorageAtArgs, SubscribeArgs, - TxParams, await_handler, build_receipt, build_rpc_transaction, hot_reader_at_block, - normalize_gas_stateless, response_tri, - }, - types::{ - BlockTransactions, EmptyArray, LazyReceipts, RpcBlock, RpcHeader, RpcReceipt, - RpcTransaction, - }, - }, - interest::{FilterOutput, InterestKind}, -}; -use ajj::{HandlerCtx, ResponsePayload}; -use alloy::{ - consensus::Transaction, - eips::{ - BlockId, BlockNumberOrTag, - eip1559::BaseFeeParams, - eip2718::{Decodable2718, Encodable2718}, - eip2930::AccessListResult, - }, - primitives::{B256, U64, U256}, - rpc::types::{FeeHistory, Filter, Log}, -}; -use revm_inspectors::access_list::AccessListInspector; -use serde::Serialize; -use signet_cold::{HeaderSpecifier, ReceiptSpecifier}; -use signet_hot::{HistoryRead, HotKv, db::HotDbRead, model::HotKvRead}; -use tracing::{Instrument, debug, trace_span}; -use trevm::{ - EstimationResult, revm::context::result::ExecutionResult, revm::database::DBErrorMarker, -}; - -// --------------------------------------------------------------------------- -// Not Supported -// --------------------------------------------------------------------------- - -pub(crate) async fn not_supported() -> ResponsePayload<(), ()> { - ResponsePayload::method_not_found() -} - -/// Response for `eth_syncing`. -/// -/// Returns `false` when the node is fully synced, or a sync-status -/// object when it is still catching up. -#[derive(Debug, Clone, Serialize)] -#[serde(untagged)] -pub(crate) enum SyncingResponse { - /// Node is fully synced. - NotSyncing(bool), - /// Node is still syncing. - Syncing { - /// Block number the node started syncing from. - starting_block: U64, - /// Current block the node has synced to. - current_block: U64, - /// Highest known block number on the network. - highest_block: U64, - }, -} - -/// `eth_syncing` — returns sync status or `false` when fully synced. -pub(crate) async fn syncing(ctx: StorageRpcCtx) -> Result { - match ctx.tags().sync_status() { - Some(status) => Ok(SyncingResponse::Syncing { - starting_block: U64::from(status.starting_block), - current_block: U64::from(status.current_block), - highest_block: U64::from(status.highest_block), - }), - None => Ok(SyncingResponse::NotSyncing(false)), - } -} - -/// Uncle count is always zero — Signet has no uncle blocks. -pub(crate) async fn uncle_count() -> Result { - Ok(U64::ZERO) -} - -/// Uncle block is always absent — Signet has no uncle blocks. -pub(crate) async fn uncle_block() -> Result, ()> { - Ok(None) -} - -// --------------------------------------------------------------------------- -// Simple Queries -// --------------------------------------------------------------------------- - -/// `eth_blockNumber` — returns the latest block number from block tags. -pub(crate) async fn block_number(ctx: StorageRpcCtx) -> Result { - Ok(U64::from(ctx.tags().latest())) -} - -/// `eth_chainId` — returns the configured chain ID. -pub(crate) async fn chain_id(ctx: StorageRpcCtx) -> Result { - Ok(U64::from(ctx.chain_id())) -} - -// --------------------------------------------------------------------------- -// Gas & Fee Queries -// --------------------------------------------------------------------------- - -/// `eth_gasPrice` — suggests gas price based on recent block tips + base fee. -pub(crate) async fn gas_price(hctx: HandlerCtx, ctx: StorageRpcCtx) -> Result -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let task = async move { - let latest = ctx.tags().latest(); - let cold = ctx.cold(); - - let tip = gas_oracle::suggest_tip_cap(&cold, latest, ctx.config()) - .await - .map_err(|e| e.to_string())?; - - let base_fee = cold - .get_header_by_number(latest) - .await - .map_err(|e| e.to_string())? - .and_then(|h| h.base_fee_per_gas) - .unwrap_or_default(); - - Ok(tip + U256::from(base_fee)) - }; - - await_handler!(@option hctx.spawn(task)) -} - -/// `eth_maxPriorityFeePerGas` — suggests priority fee from recent block tips. -pub(crate) async fn max_priority_fee_per_gas( - hctx: HandlerCtx, - ctx: StorageRpcCtx, -) -> Result -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let task = async move { - let latest = ctx.tags().latest(); - gas_oracle::suggest_tip_cap(&ctx.cold(), latest, ctx.config()) - .await - .map_err(|e| e.to_string()) - }; - - await_handler!(@option hctx.spawn(task)) -} - -/// `eth_feeHistory` — returns base fee and reward percentile data. -pub(crate) async fn fee_history( - hctx: HandlerCtx, - FeeHistoryArgs(block_count, newest, reward_percentiles): FeeHistoryArgs, - ctx: StorageRpcCtx, -) -> Result -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let task = async move { - let mut block_count = block_count.to::(); - - if block_count == 0 { - return Ok(FeeHistory::default()); - } - - let max_fee_history = if reward_percentiles.is_none() { - ctx.config().max_header_history - } else { - ctx.config().max_block_history - }; - - block_count = block_count.min(max_fee_history); - - let newest = if newest.is_pending() { - block_count = block_count.saturating_sub(1); - BlockNumberOrTag::Latest - } else { - newest - }; - - let end_block = ctx.resolve_block_tag(newest); - let end_block_plus = end_block + 1; - - block_count = block_count.min(end_block_plus); - - // Validate percentiles - if let Some(percentiles) = &reward_percentiles - && percentiles.windows(2).any(|w| w[0] > w[1] || w[0] > 100.) - { - return Err("invalid reward percentiles".to_string()); - } - - let start_block = end_block_plus - block_count; - let cold = ctx.cold(); - - let specs: Vec<_> = (start_block..=end_block).map(HeaderSpecifier::Number).collect(); - let headers = cold.get_headers(specs).await.map_err(|e| e.to_string())?; - - let mut base_fee_per_gas: Vec = Vec::with_capacity(headers.len() + 1); - let mut gas_used_ratio: Vec = Vec::with_capacity(headers.len()); - let mut rewards: Vec> = Vec::new(); - - for (offset, maybe_header) in headers.iter().enumerate() { - let Some(header) = maybe_header else { - return Err(format!("missing header at block {}", start_block + offset as u64)); - }; - - base_fee_per_gas.push(header.base_fee_per_gas.unwrap_or_default() as u128); - gas_used_ratio.push(if header.gas_limit > 0 { - header.gas_used as f64 / header.gas_limit as f64 - } else { - 0.0 - }); - - if let Some(percentiles) = &reward_percentiles { - let block_num = start_block + offset as u64; - - let (txs, receipts) = tokio::try_join!( - cold.get_transactions_in_block(block_num), - cold.get_receipts_in_block(block_num), - ) - .map_err(|e| e.to_string())?; - - let block_rewards = calculate_reward_percentiles( - percentiles, - header.gas_used, - header.base_fee_per_gas.unwrap_or_default(), - &txs, - &receipts, - ); - rewards.push(block_rewards); - } - } - - // Next block base fee - if let Some(last_header) = headers.last().and_then(|h| h.as_ref()) { - base_fee_per_gas.push( - last_header.next_block_base_fee(BaseFeeParams::ethereum()).unwrap_or_default() - as u128, - ); - } - - let base_fee_per_blob_gas = vec![0; base_fee_per_gas.len()]; - let blob_gas_used_ratio = vec![0.; gas_used_ratio.len()]; - - Ok(FeeHistory { - base_fee_per_gas, - gas_used_ratio, - base_fee_per_blob_gas, - blob_gas_used_ratio, - oldest_block: start_block, - reward: reward_percentiles.map(|_| rewards), - }) - }; - - await_handler!(@option hctx.spawn(task)) -} - -/// Calculate reward percentiles for a single block. -/// -/// Sorts transactions by effective tip ascending, then walks -/// cumulative gas used to find the tip value at each percentile. -fn calculate_reward_percentiles( - percentiles: &[f64], - gas_used: u64, - base_fee: u64, - txs: &[signet_storage_types::RecoveredTx], - receipts: &[signet_cold::ColdReceipt], -) -> Vec { - if gas_used == 0 || txs.is_empty() { - return vec![0; percentiles.len()]; - } - - // Pair each tx's effective tip with its gas used. - let mut tx_gas_and_tip: Vec<(u64, u128)> = txs - .iter() - .zip(receipts.iter()) - .map(|(tx, receipt)| { - let tip = tx.effective_tip_per_gas(base_fee).unwrap_or_default(); - (receipt.gas_used, tip) - }) - .collect(); - - // Sort by tip ascending - tx_gas_and_tip.sort_by_key(|&(_, tip)| tip); - - let mut result = Vec::with_capacity(percentiles.len()); - let mut cumulative_gas: u64 = 0; - let mut tx_idx = 0; - - for &percentile in percentiles { - let threshold = (gas_used as f64 * percentile / 100.0) as u64; - - while tx_idx < tx_gas_and_tip.len() - 1 { - cumulative_gas += tx_gas_and_tip[tx_idx].0; - if cumulative_gas >= threshold { - break; - } - tx_idx += 1; - } - - result.push(tx_gas_and_tip[tx_idx].1); - } - - result -} - -// --------------------------------------------------------------------------- -// Block Queries -// --------------------------------------------------------------------------- - -/// `eth_getBlockByHash` / `eth_getBlockByNumber` — resolve block, fetch -/// header + transactions from cold storage, assemble RPC block response. -pub(crate) async fn block( - hctx: HandlerCtx, - BlockParams(t, full): BlockParams, - ctx: StorageRpcCtx, -) -> Result, String> -where - T: Into, - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let id = t.into(); - let full = full.unwrap_or(false); - - let task = async move { - let cold = ctx.cold(); - let block_num = ctx.resolve_block_id(id).map_err(|e| e.to_string())?; - - let (header, txs) = tokio::try_join!( - cold.get_header_by_number(block_num), - cold.get_transactions_in_block(block_num), - ) - .map_err(|e| e.to_string())?; - - let Some(header) = header else { - return Ok(None); - }; - - let block_hash = header.hash(); - let base_fee = header.base_fee_per_gas; - - let transactions = if full { - BlockTransactions::Full { txs, block_num, block_hash, base_fee } - } else { - BlockTransactions::Hashes(txs) - }; - - Ok(Some(RpcBlock { - header: alloy::rpc::types::Header { - inner: header.into_inner(), - hash: block_hash, - total_difficulty: None, - size: None, - }, - transactions, - uncles: EmptyArray, - })) - }; - - await_handler!(@option hctx.spawn(task)) -} - -/// `eth_getBlockTransactionCount*` — transaction count in a block. -pub(crate) async fn block_tx_count( - hctx: HandlerCtx, - (t,): (T,), - ctx: StorageRpcCtx, -) -> Result, String> -where - T: Into, - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let id = t.into(); - - let task = async move { - let cold = ctx.cold(); - let block_num = ctx.resolve_block_id(id).map_err(|e| e.to_string())?; - - cold.get_transaction_count(block_num) - .await - .map(|c| Some(U64::from(c))) - .map_err(|e| e.to_string()) - }; - - await_handler!(@option hctx.spawn(task)) -} - -/// `eth_getBlockReceipts` — all receipts in a block. -pub(crate) async fn block_receipts( - hctx: HandlerCtx, - (id,): (BlockId,), - ctx: StorageRpcCtx, -) -> Result, String> -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let task = async move { - let cold = ctx.cold(); - let block_num = ctx.resolve_block_id(id).map_err(|e| e.to_string())?; - - let (header, txs, receipts) = tokio::try_join!( - cold.get_header_by_number(block_num), - cold.get_transactions_in_block(block_num), - cold.get_receipts_in_block(block_num), - ) - .map_err(|e| e.to_string())?; - - let Some(header) = header else { - return Ok(None); - }; - - let base_fee = header.base_fee_per_gas; - - Ok(Some(LazyReceipts { txs, receipts, base_fee })) - }; - - await_handler!(@option hctx.spawn(task)) -} - -/// `eth_getBlockHeaderByHash` / `eth_getBlockHeaderByNumber`. -pub(crate) async fn header_by( - hctx: HandlerCtx, - (t,): (T,), - ctx: StorageRpcCtx, -) -> Result, String> -where - T: Into, - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let id = t.into(); - - let task = async move { - ctx.resolve_header(id) - .map(|opt| { - opt.map(|sh| { - let hash = sh.hash(); - alloy::rpc::types::Header { - inner: sh.into_inner(), - hash, - total_difficulty: None, - size: None, - } - }) - }) - .map_err(|e| e.to_string()) - }; - - await_handler!(@option hctx.spawn_blocking(task)) -} - -// --------------------------------------------------------------------------- -// Transaction Queries -// --------------------------------------------------------------------------- - -/// `eth_getTransactionByHash` — look up transaction by hash from cold storage. -pub(crate) async fn transaction_by_hash( - hctx: HandlerCtx, - (hash,): (B256,), - ctx: StorageRpcCtx, -) -> Result, String> -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let task = async move { - let cold = ctx.cold(); - let Some(confirmed) = cold.get_tx_by_hash(hash).await.map_err(|e| e.to_string())? else { - return Ok(None); - }; - - let (tx, meta) = confirmed.into_parts(); - - // Fetch header for base_fee - let header = - cold.get_header_by_number(meta.block_number()).await.map_err(|e| e.to_string())?; - let base_fee = header.and_then(|h| h.base_fee_per_gas); - - Ok(Some(build_rpc_transaction(&tx, &meta, base_fee))) - }; - - await_handler!(@option hctx.spawn(task)) -} - -/// `eth_getRawTransactionByHash` — RLP-encoded transaction bytes. -pub(crate) async fn raw_transaction_by_hash( - hctx: HandlerCtx, - (hash,): (B256,), - ctx: StorageRpcCtx, -) -> Result, String> -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let task = async move { - ctx.cold() - .get_tx_by_hash(hash) - .await - .map(|opt| opt.map(|c| c.into_inner().encoded_2718().into())) - .map_err(|e| e.to_string()) - }; - - await_handler!(@option hctx.spawn(task)) -} - -/// `eth_getTransactionByBlock*AndIndex` — transaction by position in block. -pub(crate) async fn transaction_by_block_and_index( - hctx: HandlerCtx, - (t, index): (T, U64), - ctx: StorageRpcCtx, -) -> Result, String> -where - T: Into, - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let id = t.into(); - - let task = async move { - let cold = ctx.cold(); - let block_num = ctx.resolve_block_id(id).map_err(|e| e.to_string())?; - - let Some(confirmed) = cold - .get_tx_by_block_and_index(block_num, index.to::()) - .await - .map_err(|e| e.to_string())? - else { - return Ok(None); - }; - - let (tx, meta) = confirmed.into_parts(); - let header = - cold.get_header_by_number(meta.block_number()).await.map_err(|e| e.to_string())?; - let base_fee = header.and_then(|h| h.base_fee_per_gas); - - Ok(Some(build_rpc_transaction(&tx, &meta, base_fee))) - }; - - await_handler!(@option hctx.spawn(task)) -} - -/// `eth_getRawTransactionByBlock*AndIndex` — raw RLP bytes by position. -pub(crate) async fn raw_transaction_by_block_and_index( - hctx: HandlerCtx, - (t, index): (T, U64), - ctx: StorageRpcCtx, -) -> Result, String> -where - T: Into, - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let id = t.into(); - - let task = async move { - let cold = ctx.cold(); - let block_num = ctx.resolve_block_id(id).map_err(|e| e.to_string())?; - - cold.get_tx_by_block_and_index(block_num, index.to::()) - .await - .map(|opt| opt.map(|c| c.into_inner().encoded_2718().into())) - .map_err(|e| e.to_string()) - }; - - await_handler!(@option hctx.spawn(task)) -} - -/// `eth_getTransactionReceipt` — receipt by tx hash. Fetches the receipt, -/// then the associated transaction and header for derived fields. -pub(crate) async fn transaction_receipt( - hctx: HandlerCtx, - (hash,): (B256,), - ctx: StorageRpcCtx, -) -> Result, String> -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let task = async move { - let cold = ctx.cold(); - - let Some(cr) = - cold.get_receipt(ReceiptSpecifier::TxHash(hash)).await.map_err(|e| e.to_string())? - else { - return Ok(None); - }; - - let (tx, header) = tokio::try_join!( - cold.get_tx_by_hash(cr.tx_hash), - cold.get_header_by_number(cr.block_number), - ) - .map_err(|e| e.to_string())?; - - let tx = tx.ok_or(EthError::TransactionMissing).map_err(|e| e.to_string())?.into_inner(); - let base_fee = header.and_then(|h| h.base_fee_per_gas); - - Ok(Some(build_receipt(&cr, &tx, base_fee))) - }; - - await_handler!(@option hctx.spawn(task)) -} - -// --------------------------------------------------------------------------- -// Account State (Hot Storage) -// --------------------------------------------------------------------------- - -/// `eth_getBalance` — account balance at a given block from hot storage. -pub(crate) async fn balance( - hctx: HandlerCtx, - AddrWithBlock(address, block): AddrWithBlock, - ctx: StorageRpcCtx, -) -> Result -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let id = block.unwrap_or(BlockId::latest()); - - let task = async move { - let (reader, height) = hot_reader_at_block(&ctx, id)?; - let acct = - reader.get_account_at_height(&address, Some(height)).map_err(|e| e.to_string())?; - - Ok(acct.map(|a| a.balance).unwrap_or_default()) - }; - - await_handler!(@option hctx.spawn_blocking(task)) -} - -/// `eth_getStorageAt` — contract storage slot at a given block. -pub(crate) async fn storage_at( - hctx: HandlerCtx, - StorageAtArgs(address, key, block): StorageAtArgs, - ctx: StorageRpcCtx, -) -> Result -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let id = block.unwrap_or(BlockId::latest()); - - let task = async move { - let (reader, height) = hot_reader_at_block(&ctx, id)?; - let val = reader - .get_storage_at_height(&address, &key, Some(height)) - .map_err(|e| e.to_string())?; - - Ok(val.unwrap_or_default().to_be_bytes().into()) - }; - - await_handler!(@option hctx.spawn_blocking(task)) -} - -/// `eth_getTransactionCount` — account nonce at a given block. -pub(crate) async fn addr_tx_count( - hctx: HandlerCtx, - AddrWithBlock(address, block): AddrWithBlock, - ctx: StorageRpcCtx, -) -> Result -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let id = block.unwrap_or(BlockId::latest()); - - let task = async move { - let (reader, height) = hot_reader_at_block(&ctx, id)?; - let acct = - reader.get_account_at_height(&address, Some(height)).map_err(|e| e.to_string())?; - - Ok(U64::from(acct.map(|a| a.nonce).unwrap_or_default())) - }; - - await_handler!(@option hctx.spawn_blocking(task)) -} - -/// `eth_getCode` — contract bytecode at a given block. -pub(crate) async fn code_at( - hctx: HandlerCtx, - AddrWithBlock(address, block): AddrWithBlock, - ctx: StorageRpcCtx, -) -> Result -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let id = block.unwrap_or(BlockId::latest()); - - let task = async move { - let (reader, height) = hot_reader_at_block(&ctx, id)?; - let acct = - reader.get_account_at_height(&address, Some(height)).map_err(|e| e.to_string())?; - - let Some(acct) = acct else { - return Ok(alloy::primitives::Bytes::new()); - }; - - let Some(code_hash) = acct.bytecode_hash else { - return Ok(alloy::primitives::Bytes::new()); - }; - - let code = reader.get_bytecode(&code_hash).map_err(|e| e.to_string())?; - - Ok(code.map(|c| c.original_bytes()).unwrap_or_default()) - }; - - await_handler!(@option hctx.spawn_blocking(task)) -} - -// --------------------------------------------------------------------------- -// EVM Execution -// --------------------------------------------------------------------------- - -/// Shared EVM call execution used by `eth_call` and `eth_estimateGas`. -/// -/// Resolves the block, builds a revm instance with the requested state -/// and block overrides, then executes the transaction request. -pub(crate) async fn run_call( - hctx: HandlerCtx, - TxParams(request, block, state_overrides, block_overrides): TxParams, - ctx: StorageRpcCtx, -) -> ResponsePayload -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let id = block.unwrap_or(BlockId::latest()); - let span = trace_span!("run_call", block_id = %id); - - let task = async move { - let EvmBlockContext { header, db } = response_tri!(ctx.resolve_evm_block(id)); - - let trevm = signet_evm::signet_evm(db, ctx.constants().clone()) - .fill_cfg(&CfgFiller(ctx.chain_id())) - .fill_block(&header); - - let trevm = response_tri!(trevm.maybe_apply_state_overrides(state_overrides.as_ref())) - .maybe_apply_block_overrides(block_overrides.as_deref()) - .fill_tx(&request); - - let mut trevm = trevm; - let new_gas = response_tri!(trevm.cap_tx_gas()); - if Some(new_gas) != request.gas { - debug!(req_gas = ?request.gas, new_gas, "capping gas for call"); - } - - let result = response_tri!(trevm.call().map_err(signet_evm::EvmErrored::into_error)); - ResponsePayload(Ok(result.0)) - } - .instrument(span); - - await_handler!(@response_option hctx.spawn_blocking(task)) -} - -/// `eth_call` — execute a call and return the output bytes. -/// -/// Delegates to [`run_call`], then maps the execution result to raw -/// output bytes, revert data, or halt reason. -pub(crate) async fn call( - hctx: HandlerCtx, - mut params: TxParams, - ctx: StorageRpcCtx, -) -> ResponsePayload -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let max_gas = ctx.config().rpc_gas_cap; - normalize_gas_stateless(&mut params.0, max_gas); - - await_handler!(@response_option hctx.spawn_with_ctx(|hctx| async move { - let res = match run_call(hctx, params, ctx).await.0 { - Ok(res) => res, - Err(err) => return ResponsePayload(Err(err)), - }; - - match res { - ExecutionResult::Success { output, .. } => { - ResponsePayload(Ok(output.data().clone())) - } - ExecutionResult::Revert { output, .. } => { - ResponsePayload::internal_error_with_message_and_obj( - "execution reverted".into(), - output.clone().into(), - ) - } - ExecutionResult::Halt { reason, .. } => { - ResponsePayload::internal_error_with_message_and_obj( - "execution halted".into(), - format!("{reason:?}").into(), - ) - } - } - })) -} - -/// `eth_estimateGas` — estimate gas required for a transaction. -pub(crate) async fn estimate_gas( - hctx: HandlerCtx, - TxParams(mut request, block, state_overrides, block_overrides): TxParams, - ctx: StorageRpcCtx, -) -> ResponsePayload -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let max_gas = ctx.config().rpc_gas_cap; - normalize_gas_stateless(&mut request, max_gas); - - let id = block.unwrap_or(BlockId::pending()); - let span = trace_span!("eth_estimateGas", block_id = %id); - - let task = async move { - let EvmBlockContext { header, db } = response_tri!(ctx.resolve_evm_block(id)); - - let trevm = signet_evm::signet_evm(db, ctx.constants().clone()) - .fill_cfg(&CfgFiller(ctx.chain_id())) - .fill_block(&header); - - let trevm = response_tri!(trevm.maybe_apply_state_overrides(state_overrides.as_ref())) - .maybe_apply_block_overrides(block_overrides.as_deref()) - .fill_tx(&request); - - let (estimate, _) = - response_tri!(trevm.estimate_gas().map_err(signet_evm::EvmErrored::into_error)); - - match estimate { - EstimationResult::Success { limit, .. } => ResponsePayload(Ok(U64::from(limit))), - EstimationResult::Revert { reason, .. } => { - ResponsePayload::internal_error_with_message_and_obj( - "execution reverted".into(), - reason.clone().into(), - ) - } - EstimationResult::Halt { reason, .. } => { - ResponsePayload::internal_error_with_message_and_obj( - "execution halted".into(), - format!("{reason:?}").into(), - ) - } - } - } - .instrument(span); - - await_handler!(@response_option hctx.spawn_blocking(task)) -} - -/// `eth_createAccessList` — generate an access list for a transaction. -pub(crate) async fn create_access_list( - hctx: HandlerCtx, - TxParams(mut request, block, state_overrides, block_overrides): TxParams, - ctx: StorageRpcCtx, -) -> ResponsePayload -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let max_gas = ctx.config().rpc_gas_cap; - normalize_gas_stateless(&mut request, max_gas); - - let id = block.unwrap_or(BlockId::pending()); - let span = trace_span!("eth_createAccessList", block_id = %id); - - let task = async move { - let EvmBlockContext { header, db } = response_tri!(ctx.resolve_evm_block(id)); - - let trevm = signet_evm::signet_evm(db, ctx.constants().clone()) - .fill_cfg(&CfgFiller(ctx.chain_id())) - .fill_block(&header); - - let trevm = response_tri!(trevm.maybe_apply_state_overrides(state_overrides.as_ref())) - .maybe_apply_block_overrides(block_overrides.as_deref()) - .fill_tx(&request); - - let initial = request.access_list.clone().unwrap_or_default(); - let mut inspector = AccessListInspector::new(initial); - - let result = trevm - .try_with_inspector(&mut inspector, |trevm| trevm.run()) - .map_err(signet_evm::EvmErrored::into_error); - - let (gas_used, error) = match result { - Ok(ref trevm) => (U256::from(trevm.gas_used()), None), - Err(ref e) => (U256::ZERO, Some(e.to_string())), - }; - - let access_list = inspector.into_access_list(); - - ResponsePayload(Ok(AccessListResult { access_list, gas_used, error })) - } - .instrument(span); - - await_handler!(@response_option hctx.spawn_blocking(task)) -} - -// --------------------------------------------------------------------------- -// Transaction Submission -// --------------------------------------------------------------------------- - -/// `eth_sendRawTransaction` — decode and forward a signed transaction. -/// -/// The transaction is forwarded to the tx cache in a fire-and-forget -/// task; the hash is returned immediately. -pub(crate) async fn send_raw_transaction( - hctx: HandlerCtx, - (tx,): (alloy::primitives::Bytes,), - ctx: StorageRpcCtx, -) -> Result -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let Some(tx_cache) = ctx.tx_cache().cloned() else { - return Err("tx-cache URL not provided".to_string()); - }; - - let task = |hctx: HandlerCtx| async move { - let envelope = alloy::consensus::TxEnvelope::decode_2718(&mut tx.as_ref()) - .map_err(|e| e.to_string())?; - - let hash = *envelope.tx_hash(); - hctx.spawn(async move { - if let Err(e) = tx_cache.forward_raw_transaction(envelope).await { - tracing::warn!(error = %e, %hash, "failed to forward raw transaction"); - } - }); - - Ok(hash) - }; - - await_handler!(@option hctx.spawn_blocking_with_ctx(task)) -} - -// --------------------------------------------------------------------------- -// Logs -// --------------------------------------------------------------------------- - -/// Drain a [`signet_cold::LogStream`] into a `Vec`. -/// -/// Errors from the stream (deadline exceeded, too many logs, reorg) are -/// propagated as the first encountered error. -async fn collect_log_stream(stream: signet_cold::LogStream) -> signet_cold::ColdResult> { - use tokio_stream::StreamExt; - let mut logs = Vec::new(); - let mut stream = std::pin::pin!(stream); - while let Some(log) = stream.next().await { - logs.push(log?); - } - Ok(logs) -} - -/// `eth_getLogs` — query logs from cold storage with filter criteria. -/// -/// Uses `stream_logs` for deadline enforcement and dedicated concurrency -/// control. The stream is collected into a `Vec` for the JSON-RPC response. -pub(crate) async fn get_logs( - hctx: HandlerCtx, - (filter,): (Filter,), - ctx: StorageRpcCtx, -) -> Result, String> -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let task = async move { - let cold = ctx.cold(); - - let resolved_filter = match filter.block_option { - alloy::rpc::types::FilterBlockOption::AtBlockHash(_) => filter, - alloy::rpc::types::FilterBlockOption::Range { from_block, to_block } => { - let from = from_block.map(|b| ctx.resolve_block_tag(b)).unwrap_or(0); - let to = to_block - .map(|b| ctx.resolve_block_tag(b)) - .unwrap_or_else(|| ctx.tags().latest()); - - if from > to { - return Err("fromBlock must not exceed toBlock".to_string()); - } - let max_blocks = ctx.config().max_blocks_per_filter; - if to - from > max_blocks { - return Err(format!("query exceeds max block range ({max_blocks})")); - } - - Filter { - block_option: alloy::rpc::types::FilterBlockOption::Range { - from_block: Some(BlockNumberOrTag::Number(from)), - to_block: Some(BlockNumberOrTag::Number(to)), - }, - ..filter - } - } - }; - - let max_logs = ctx.config().max_logs_per_response; - let deadline = ctx.config().max_log_query_deadline; - - let stream = cold - .stream_logs(resolved_filter, max_logs, deadline) - .await - .map_err(|e| e.to_string())?; - - collect_log_stream(stream).await.map_err(|e| e.to_string()) - }; - - await_handler!(@option hctx.spawn(task)) -} - -// --------------------------------------------------------------------------- -// Filters -// --------------------------------------------------------------------------- - -/// `eth_newFilter` — install a log filter for polling. -pub(crate) async fn new_filter( - hctx: HandlerCtx, - (filter,): (Filter,), - ctx: StorageRpcCtx, -) -> Result -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let task = async move { - let latest = ctx.tags().latest(); - Ok(ctx.filter_manager().install_log_filter(latest, filter)) - }; - - await_handler!(@option hctx.spawn_blocking(task)) -} - -/// `eth_newBlockFilter` — install a block hash filter for polling. -pub(crate) async fn new_block_filter( - hctx: HandlerCtx, - ctx: StorageRpcCtx, -) -> Result -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let task = async move { - let latest = ctx.tags().latest(); - Ok(ctx.filter_manager().install_block_filter(latest)) - }; - - await_handler!(@option hctx.spawn_blocking(task)) -} - -/// `eth_uninstallFilter` — remove a filter. -pub(crate) async fn uninstall_filter( - hctx: HandlerCtx, - (id,): (U64,), - ctx: StorageRpcCtx, -) -> Result -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let task = async move { Ok(ctx.filter_manager().uninstall(id).is_some()) }; - await_handler!(@option hctx.spawn_blocking(task)) -} - -/// `eth_getFilterChanges` / `eth_getFilterLogs` — poll a filter for new -/// results since the last poll. Fetches matching data from cold storage. -pub(crate) async fn get_filter_changes( - hctx: HandlerCtx, - (id,): (U64,), - ctx: StorageRpcCtx, -) -> Result -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let task = async move { - let fm = ctx.filter_manager(); - let mut entry = fm.get_mut(id).ok_or_else(|| format!("filter not found: {id}"))?; - - let latest = ctx.tags().latest(); - let start = entry.next_start_block(); - - if start > latest { - entry.mark_polled(latest); - return Ok(entry.empty_output()); - } - - let cold = ctx.cold(); - - if entry.is_block() { - let specs: Vec<_> = (start..=latest).map(HeaderSpecifier::Number).collect(); - let headers = cold.get_headers(specs).await.map_err(|e| e.to_string())?; - let hashes: Vec = headers.into_iter().flatten().map(|h| h.hash()).collect(); - entry.mark_polled(latest); - Ok(FilterOutput::from(hashes)) - } else { - let stored = entry.as_filter().cloned().unwrap(); - let resolved = Filter { - block_option: alloy::rpc::types::FilterBlockOption::Range { - from_block: Some(BlockNumberOrTag::Number(start)), - to_block: Some(BlockNumberOrTag::Number(latest)), - }, - ..stored - }; - - let max_logs = ctx.config().max_logs_per_response; - let deadline = ctx.config().max_log_query_deadline; - - let stream = - cold.stream_logs(resolved, max_logs, deadline).await.map_err(|e| e.to_string())?; - - let logs = collect_log_stream(stream).await.map_err(|e| e.to_string())?; - - entry.mark_polled(latest); - Ok(FilterOutput::from(logs)) - } - }; - - await_handler!(@option hctx.spawn(task)) -} - -// --------------------------------------------------------------------------- -// Subscriptions -// --------------------------------------------------------------------------- - -/// `eth_subscribe` — register a push-based subscription (WebSocket/SSE). -pub(crate) async fn subscribe( - hctx: HandlerCtx, - sub: SubscribeArgs, - ctx: StorageRpcCtx, -) -> Result -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let interest: InterestKind = sub.try_into()?; - - ctx.sub_manager() - .subscribe(&hctx, interest) - .ok_or_else(|| "notifications not enabled on this transport".to_string()) -} - -/// `eth_unsubscribe` — cancel a push-based subscription. -pub(crate) async fn unsubscribe( - hctx: HandlerCtx, - (id,): (U64,), - ctx: StorageRpcCtx, -) -> Result -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let task = async move { Ok(ctx.sub_manager().unsubscribe(id)) }; - await_handler!(@option hctx.spawn_blocking(task)) -} diff --git a/crates/rpc-storage/src/eth/error.rs b/crates/rpc-storage/src/eth/error.rs deleted file mode 100644 index 135b6b8..0000000 --- a/crates/rpc-storage/src/eth/error.rs +++ /dev/null @@ -1,61 +0,0 @@ -//! Error types for the storage-backed ETH RPC. - -use alloy::{eips::BlockId, primitives::Bytes}; -use serde::Serialize; - -/// Errors from the storage-backed ETH RPC. -#[derive(Debug, thiserror::Error)] -pub enum EthError { - /// Cold storage error. - #[error("cold storage: {0}")] - Cold(#[from] signet_cold::ColdStorageError), - /// Hot storage error. - #[error("hot storage: {0}")] - Hot(#[from] signet_storage::StorageError), - /// Block resolution error. - #[error("resolve: {0}")] - Resolve(#[from] crate::config::resolve::ResolveError), - /// Invalid transaction signature. - #[error("invalid transaction signature")] - InvalidSignature, - /// Block not found. - #[error("block not found: {0}")] - BlockNotFound(BlockId), - /// Receipt found but the corresponding transaction is missing. - #[error("receipt found but transaction missing")] - TransactionMissing, - /// EVM execution error. - #[error("evm: {0}")] - Evm(String), -} - -impl EthError { - /// Convert the error to a string for JSON-RPC responses. - pub fn into_string(self) -> String { - self.to_string() - } -} - -/// Error data for `eth_call` and `eth_estimateGas` responses. -/// -/// Serialized as JSON in the error response `data` field. -#[derive(Debug, Clone, Serialize)] -#[serde(untagged)] -pub(crate) enum CallErrorData { - /// Revert data bytes. - Bytes(Bytes), - /// Error message string. - String(String), -} - -impl From for CallErrorData { - fn from(b: Bytes) -> Self { - Self::Bytes(b) - } -} - -impl From for CallErrorData { - fn from(s: String) -> Self { - Self::String(s) - } -} diff --git a/crates/rpc-storage/src/eth/helpers.rs b/crates/rpc-storage/src/eth/helpers.rs deleted file mode 100644 index 399a177..0000000 --- a/crates/rpc-storage/src/eth/helpers.rs +++ /dev/null @@ -1,239 +0,0 @@ -//! Parameter types, macros, and utility helpers for ETH RPC endpoints. - -use super::types::{RpcReceipt, RpcTransaction}; -use crate::interest::InterestKind; -use alloy::{ - consensus::{ - ReceiptEnvelope, ReceiptWithBloom, Transaction, TxReceipt, transaction::Recovered, - }, - eips::BlockId, - primitives::{Address, TxKind, U256}, - rpc::types::{ - BlockOverrides, Log, TransactionReceipt, TransactionRequest, pubsub::SubscriptionKind, - state::StateOverride, - }, -}; -use serde::Deserialize; -use signet_cold::ColdReceipt; -use signet_storage_types::ConfirmationMeta; -use trevm::MIN_TRANSACTION_GAS; - -/// Args for `eth_call` and `eth_estimateGas`. -#[derive(Debug, Deserialize)] -pub(crate) struct TxParams( - pub TransactionRequest, - #[serde(default)] pub Option, - #[serde(default)] pub Option, - #[serde(default)] pub Option>, -); - -/// Args for `eth_getBlockByHash` and `eth_getBlockByNumber`. -#[derive(Debug, Deserialize)] -pub(crate) struct BlockParams(pub T, #[serde(default)] pub Option); - -/// Args for `eth_getStorageAt`. -#[derive(Debug, Deserialize)] -pub(crate) struct StorageAtArgs(pub Address, pub U256, #[serde(default)] pub Option); - -/// Args for `eth_getBalance`, `eth_getTransactionCount`, and `eth_getCode`. -#[derive(Debug, Deserialize)] -pub(crate) struct AddrWithBlock(pub Address, #[serde(default)] pub Option); - -/// Args for `eth_feeHistory`. -#[derive(Debug, Deserialize)] -pub(crate) struct FeeHistoryArgs( - pub alloy::primitives::U64, - pub alloy::eips::BlockNumberOrTag, - #[serde(default)] pub Option>, -); - -/// Args for `eth_subscribe`. -#[derive(Debug, Deserialize)] -pub(crate) struct SubscribeArgs( - pub alloy::rpc::types::pubsub::SubscriptionKind, - #[serde(default)] pub Option>, -); - -impl TryFrom for InterestKind { - type Error = String; - - fn try_from(args: SubscribeArgs) -> Result { - match args.0 { - SubscriptionKind::Logs => args - .1 - .map(InterestKind::Log) - .ok_or_else(|| "missing filter for Logs subscription".to_string()), - SubscriptionKind::NewHeads => { - if args.1.is_some() { - Err("filter not supported for NewHeads subscription".to_string()) - } else { - Ok(InterestKind::Block) - } - } - other => Err(format!("unsupported subscription kind: {other:?}")), - } - } -} - -/// Normalize transaction request gas without making DB reads. -/// -/// - If the gas is below `MIN_TRANSACTION_GAS`, set it to `None` -/// - If the gas is above the `rpc_gas_cap`, set it to the `rpc_gas_cap` -pub(crate) const fn normalize_gas_stateless(request: &mut TransactionRequest, max_gas: u64) { - match request.gas { - Some(..MIN_TRANSACTION_GAS) => request.gas = None, - Some(val) if val > max_gas => request.gas = Some(max_gas), - _ => {} - } -} - -/// Await a handler task, returning an error string on panic/cancel. -macro_rules! await_handler { - ($h:expr) => { - match $h.await { - Ok(res) => res, - Err(_) => return Err("task panicked or cancelled".to_string()), - } - }; - - (@option $h:expr) => { - match $h.await { - Ok(Some(res)) => res, - _ => return Err("task panicked or cancelled".to_string()), - } - }; - - (@response_option $h:expr) => { - match $h.await { - Ok(Some(res)) => res, - _ => { - return ajj::ResponsePayload::internal_error_message(std::borrow::Cow::Borrowed( - "task panicked or cancelled", - )) - } - } - }; -} -pub(crate) use await_handler; - -/// Try-operator for `ResponsePayload`. -macro_rules! response_tri { - ($h:expr) => { - match $h { - Ok(res) => res, - Err(err) => return ajj::ResponsePayload::internal_error_message(err.to_string().into()), - } - }; -} -pub(crate) use response_tri; - -/// Resolve a block ID and open a hot storage reader at that height. -/// -/// Shared by account-state endpoints (`balance`, `storage_at`, -/// `addr_tx_count`, `code_at`) which all follow the same -/// resolve → open reader → query pattern. -pub(crate) fn hot_reader_at_block( - ctx: &crate::config::StorageRpcCtx, - id: BlockId, -) -> Result<(H::RoTx, u64), String> -where - H: signet_hot::HotKv, - ::Error: std::error::Error + Send + Sync + 'static, -{ - let height = ctx.resolve_block_id(id).map_err(|e| e.to_string())?; - let reader = ctx.hot_reader().map_err(|e| e.to_string())?; - Ok((reader, height)) -} - -/// Small wrapper implementing [`trevm::Cfg`] to set the chain ID. -pub(crate) struct CfgFiller(pub u64); - -impl trevm::Cfg for CfgFiller { - fn fill_cfg_env(&self, cfg: &mut trevm::revm::context::CfgEnv) { - cfg.chain_id = self.0; - } -} - -/// Build an [`alloy::rpc::types::Transaction`] from cold storage types. -pub(crate) fn build_rpc_transaction( - tx: &signet_storage_types::RecoveredTx, - meta: &ConfirmationMeta, - base_fee: Option, -) -> RpcTransaction { - let signer = tx.signer(); - let tx_envelope: alloy::consensus::TxEnvelope = tx.clone().into_inner().into(); - let inner = Recovered::new_unchecked(tx_envelope, signer); - - let egp = base_fee - .map(|bf| inner.effective_tip_per_gas(bf).unwrap_or_default() as u64 + bf) - .unwrap_or_else(|| inner.max_fee_per_gas() as u64); - - alloy::rpc::types::Transaction { - inner, - block_hash: Some(meta.block_hash()), - block_number: Some(meta.block_number()), - transaction_index: Some(meta.transaction_index()), - effective_gas_price: Some(egp as u128), - } -} - -/// Build a [`TransactionReceipt`] from a [`ColdReceipt`] and its transaction. -/// -/// The transaction is needed for `to`, `contract_address`, and -/// `effective_gas_price` which are not stored on the receipt. -pub(crate) fn build_receipt( - cr: &ColdReceipt, - tx: &signet_storage_types::RecoveredTx, - base_fee: Option, -) -> RpcReceipt { - let logs_bloom = cr.receipt.bloom(); - let status = cr.receipt.status; - let cumulative_gas_used = cr.receipt.cumulative_gas_used; - - let rpc_receipt = alloy::rpc::types::eth::Receipt { - status, - cumulative_gas_used, - logs: cr.receipt.logs.clone(), - }; - - let (contract_address, to) = match tx.kind() { - TxKind::Create => (Some(cr.from.create(tx.nonce())), None), - TxKind::Call(addr) => (None, Some(Address(*addr))), - }; - - let egp = base_fee - .map(|bf| tx.effective_tip_per_gas(bf).unwrap_or_default() as u64 + bf) - .unwrap_or_else(|| tx.max_fee_per_gas() as u64); - - TransactionReceipt { - inner: build_receipt_envelope( - ReceiptWithBloom { receipt: rpc_receipt, logs_bloom }, - cr.tx_type, - ), - transaction_hash: cr.tx_hash, - transaction_index: Some(cr.transaction_index), - block_hash: Some(cr.block_hash), - block_number: Some(cr.block_number), - from: cr.from, - to, - gas_used: cr.gas_used, - contract_address, - effective_gas_price: egp as u128, - blob_gas_price: None, - blob_gas_used: None, - } -} - -/// Wrap a receipt in the appropriate [`ReceiptEnvelope`] variant. -const fn build_receipt_envelope( - receipt: ReceiptWithBloom>, - tx_type: alloy::consensus::TxType, -) -> ReceiptEnvelope { - match tx_type { - alloy::consensus::TxType::Legacy => ReceiptEnvelope::Legacy(receipt), - alloy::consensus::TxType::Eip2930 => ReceiptEnvelope::Eip2930(receipt), - alloy::consensus::TxType::Eip1559 => ReceiptEnvelope::Eip1559(receipt), - alloy::consensus::TxType::Eip4844 => ReceiptEnvelope::Eip4844(receipt), - alloy::consensus::TxType::Eip7702 => ReceiptEnvelope::Eip7702(receipt), - } -} diff --git a/crates/rpc-storage/src/eth/mod.rs b/crates/rpc-storage/src/eth/mod.rs deleted file mode 100644 index 43aa594..0000000 --- a/crates/rpc-storage/src/eth/mod.rs +++ /dev/null @@ -1,99 +0,0 @@ -//! ETH namespace RPC router backed by storage. - -mod endpoints; -use endpoints::{ - addr_tx_count, balance, block, block_number, block_receipts, block_tx_count, call, chain_id, - code_at, create_access_list, estimate_gas, fee_history, gas_price, get_filter_changes, - get_logs, header_by, max_priority_fee_per_gas, new_block_filter, new_filter, not_supported, - raw_transaction_by_block_and_index, raw_transaction_by_hash, send_raw_transaction, storage_at, - subscribe, syncing, transaction_by_block_and_index, transaction_by_hash, transaction_receipt, - uncle_block, uncle_count, uninstall_filter, unsubscribe, -}; - -mod error; -pub use error::EthError; - -pub(crate) mod helpers; -pub(crate) mod types; - -use crate::config::StorageRpcCtx; -use alloy::{eips::BlockNumberOrTag, primitives::B256}; -use signet_hot::HotKv; -use signet_hot::model::HotKvRead; -use trevm::revm::database::DBErrorMarker; - -/// Instantiate the `eth` API router backed by storage. -pub(crate) fn eth() -> ajj::Router> -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - ajj::Router::new() - .route("blockNumber", block_number::) - .route("chainId", chain_id::) - .route("getBlockByHash", block::) - .route("getBlockByNumber", block::) - .route("getBlockTransactionCountByHash", block_tx_count::) - .route("getBlockTransactionCountByNumber", block_tx_count::) - .route("getBlockReceipts", block_receipts::) - .route("getRawTransactionByHash", raw_transaction_by_hash::) - .route("getTransactionByHash", transaction_by_hash::) - .route( - "getRawTransactionByBlockHashAndIndex", - raw_transaction_by_block_and_index::, - ) - .route( - "getRawTransactionByBlockNumberAndIndex", - raw_transaction_by_block_and_index::, - ) - .route("getTransactionByBlockHashAndIndex", transaction_by_block_and_index::) - .route( - "getTransactionByBlockNumberAndIndex", - transaction_by_block_and_index::, - ) - .route("getTransactionReceipt", transaction_receipt::) - .route("getBlockHeaderByHash", header_by::) - .route("getBlockHeaderByNumber", header_by::) - .route("getBalance", balance::) - .route("getStorageAt", storage_at::) - .route("getTransactionCount", addr_tx_count::) - .route("getCode", code_at::) - .route("call", call::) - .route("estimateGas", estimate_gas::) - .route("sendRawTransaction", send_raw_transaction::) - .route("getLogs", get_logs::) - // --- - // Unsupported methods - // --- - .route("protocolVersion", not_supported) - .route("syncing", syncing::) - .route("gasPrice", gas_price::) - .route("maxPriorityFeePerGas", max_priority_fee_per_gas::) - .route("feeHistory", fee_history::) - .route("coinbase", not_supported) - .route("accounts", not_supported) - .route("blobBaseFee", not_supported) - .route("getUncleCountByBlockHash", uncle_count) - .route("getUncleCountByBlockNumber", uncle_count) - .route("getUncleByBlockHashAndIndex", uncle_block) - .route("getUncleByBlockNumberAndIndex", uncle_block) - .route("getWork", not_supported) - .route("hashrate", not_supported) - .route("mining", not_supported) - .route("submitHashrate", not_supported) - .route("submitWork", not_supported) - .route("sendTransaction", not_supported) - .route("sign", not_supported) - .route("signTransaction", not_supported) - .route("signTypedData", not_supported) - .route("getProof", not_supported) - .route("createAccessList", create_access_list::) - .route("newFilter", new_filter::) - .route("newBlockFilter", new_block_filter::) - .route("newPendingTransactionFilter", not_supported) - .route("uninstallFilter", uninstall_filter::) - .route("getFilterChanges", get_filter_changes::) - .route("getFilterLogs", get_filter_changes::) - .route("subscribe", subscribe::) - .route("unsubscribe", unsubscribe::) -} diff --git a/crates/rpc-storage/src/interest/filters.rs b/crates/rpc-storage/src/interest/filters.rs deleted file mode 100644 index ae09367..0000000 --- a/crates/rpc-storage/src/interest/filters.rs +++ /dev/null @@ -1,222 +0,0 @@ -//! Filter management for `eth_newFilter` / `eth_getFilterChanges`. - -use crate::interest::{InterestKind, buffer::EventBuffer}; -use alloy::{ - primitives::{B256, U64}, - rpc::types::Filter, -}; -use dashmap::{DashMap, mapref::one::RefMut}; -use std::{ - sync::{ - Arc, Weak, - atomic::{AtomicU64, Ordering}, - }, - time::{Duration, Instant}, -}; -use tracing::trace; - -type FilterId = U64; - -/// Output of a polled filter: log entries or block hashes. -pub(crate) type FilterOutput = EventBuffer; - -/// An active filter. -/// -/// Records the filter details, the [`Instant`] at which the filter was last -/// polled, and the first block whose contents should be considered. -#[derive(Debug, Clone, PartialEq, Eq)] -pub(crate) struct ActiveFilter { - next_start_block: u64, - last_poll_time: Instant, - kind: InterestKind, -} - -impl core::fmt::Display for ActiveFilter { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!( - f, - "ActiveFilter {{ next_start_block: {}, ms_since_last_poll: {}, kind: {:?} }}", - self.next_start_block, - self.last_poll_time.elapsed().as_millis(), - self.kind - ) - } -} - -impl ActiveFilter { - /// True if this is a block filter. - pub(crate) const fn is_block(&self) -> bool { - self.kind.is_block() - } - - /// Fallible cast to a filter. - pub(crate) const fn as_filter(&self) -> Option<&Filter> { - self.kind.as_filter() - } - - /// Mark the filter as having been polled at the given block. - pub(crate) fn mark_polled(&mut self, current_block: u64) { - self.next_start_block = current_block + 1; - self.last_poll_time = Instant::now(); - } - - /// Get the next start block for the filter. - pub(crate) const fn next_start_block(&self) -> u64 { - self.next_start_block - } - - /// Get the duration since the filter was last polled. - pub(crate) fn time_since_last_poll(&self) -> Duration { - self.last_poll_time.elapsed() - } - - /// Return an empty output of the same kind as this filter. - pub(crate) const fn empty_output(&self) -> FilterOutput { - self.kind.empty_output() - } -} - -/// Inner logic for [`FilterManager`]. -#[derive(Debug)] -pub(crate) struct FilterManagerInner { - current_id: AtomicU64, - filters: DashMap, -} - -impl FilterManagerInner { - /// Create a new filter manager. - fn new() -> Self { - // Start from 1, as 0 is weird in quantity encoding. - Self { current_id: AtomicU64::new(1), filters: DashMap::new() } - } - - /// Get the next filter ID. - fn next_id(&self) -> FilterId { - FilterId::from(self.current_id.fetch_add(1, Ordering::Relaxed)) - } - - /// Get a filter by ID. - pub(crate) fn get_mut(&self, id: FilterId) -> Option> { - self.filters.get_mut(&id) - } - - fn install(&self, current_block: u64, kind: InterestKind) -> FilterId { - let id = self.next_id(); - let next_start_block = current_block + 1; - let _ = self - .filters - .insert(id, ActiveFilter { next_start_block, last_poll_time: Instant::now(), kind }); - id - } - - /// Install a new log filter. - pub(crate) fn install_log_filter(&self, current_block: u64, filter: Filter) -> FilterId { - self.install(current_block, InterestKind::Log(Box::new(filter))) - } - - /// Install a new block filter. - pub(crate) fn install_block_filter(&self, current_block: u64) -> FilterId { - self.install(current_block, InterestKind::Block) - } - - /// Uninstall a filter, returning the kind of filter that was uninstalled. - pub(crate) fn uninstall(&self, id: FilterId) -> Option<(U64, ActiveFilter)> { - self.filters.remove(&id) - } - - /// Clean stale filters that have not been polled in a while. - fn clean_stale(&self, older_than: Duration) { - self.filters.retain(|_, filter| filter.time_since_last_poll() < older_than); - } -} - -/// Manager for filters. -/// -/// The manager tracks active filters, and periodically cleans stale filters. -/// Filters are stored in a [`DashMap`] that maps filter IDs to active filters. -/// Filter IDs are assigned sequentially, starting from 1. -/// -/// Calling [`Self::new`] spawns a task that periodically cleans stale filters. -/// This task runs on a separate thread to avoid [`DashMap::retain`] deadlock. -/// See [`DashMap`] documentation for more information. -#[derive(Debug, Clone)] -pub(crate) struct FilterManager { - inner: Arc, -} - -impl FilterManager { - /// Create a new filter manager. Spawn a task to clean stale filters. - pub(crate) fn new(clean_interval: Duration, age_limit: Duration) -> Self { - let inner = Arc::new(FilterManagerInner::new()); - let manager = Self { inner }; - FilterCleanTask::new(Arc::downgrade(&manager.inner), clean_interval, age_limit).spawn(); - manager - } -} - -impl std::ops::Deref for FilterManager { - type Target = FilterManagerInner; - - fn deref(&self) -> &Self::Target { - self.inner.deref() - } -} - -/// Task to clean up unpolled filters. -/// -/// This task runs on a separate thread to avoid [`DashMap::retain`] deadlocks. -#[derive(Debug)] -struct FilterCleanTask { - manager: Weak, - sleep: Duration, - age_limit: Duration, -} - -impl FilterCleanTask { - /// Create a new filter cleaner task. - const fn new(manager: Weak, sleep: Duration, age_limit: Duration) -> Self { - Self { manager, sleep, age_limit } - } - - /// Run the task. This task runs on a separate thread, which ensures that - /// [`DashMap::retain`]'s deadlock condition is not met. See [`DashMap`] - /// documentation for more information. - fn spawn(self) { - std::thread::spawn(move || { - loop { - std::thread::sleep(self.sleep); - trace!("cleaning stale filters"); - match self.manager.upgrade() { - Some(manager) => manager.clean_stale(self.age_limit), - None => break, - } - } - }); - } -} - -// Some code in this file has been copied and modified from reth -// -// The original license is included below: -// -// The MIT License (MIT) -// -// Copyright (c) 2022-2025 Reth Contributors -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. diff --git a/crates/rpc-storage/src/interest/kind.rs b/crates/rpc-storage/src/interest/kind.rs deleted file mode 100644 index ed4f49c..0000000 --- a/crates/rpc-storage/src/interest/kind.rs +++ /dev/null @@ -1,100 +0,0 @@ -//! Filter kinds for subscriptions and polling filters. - -use crate::interest::{NewBlockNotification, filters::FilterOutput, subs::SubscriptionBuffer}; -use alloy::rpc::types::{Filter, Header, Log}; -use std::collections::VecDeque; - -/// The different kinds of filters that can be created. -/// -/// Pending tx filters are not supported by Signet. -#[derive(Debug, Clone, PartialEq, Eq)] -pub(crate) enum InterestKind { - /// Log filter with a user-supplied [`Filter`]. - Log(Box), - /// New-block filter. - Block, -} - -impl InterestKind { - /// True if this is a block filter. - pub(crate) const fn is_block(&self) -> bool { - matches!(self, Self::Block) - } - - /// Fallible cast to a filter. - pub(crate) const fn as_filter(&self) -> Option<&Filter> { - match self { - Self::Log(f) => Some(f), - _ => None, - } - } - - fn apply_block(notif: &NewBlockNotification) -> SubscriptionBuffer { - let header = Header { - hash: notif.header.hash_slow(), - inner: notif.header.clone(), - total_difficulty: None, - size: None, - }; - SubscriptionBuffer::Block(VecDeque::from([header])) - } - - fn apply_filter(&self, notif: &NewBlockNotification) -> SubscriptionBuffer { - let filter = self.as_filter().unwrap(); - let block_hash = notif.header.hash_slow(); - let block_number = notif.header.number; - let block_timestamp = notif.header.timestamp; - - let logs: VecDeque = notif - .receipts - .iter() - .enumerate() - .flat_map(|(tx_idx, receipt)| { - let tx_hash = *notif.transactions[tx_idx].tx_hash(); - receipt.inner.logs.iter().enumerate().filter_map(move |(log_idx, log)| { - if filter.matches(log) { - Some(Log { - inner: log.clone(), - block_hash: Some(block_hash), - block_number: Some(block_number), - block_timestamp: Some(block_timestamp), - transaction_hash: Some(tx_hash), - transaction_index: Some(tx_idx as u64), - log_index: Some(log_idx as u64), - removed: false, - }) - } else { - None - } - }) - }) - .collect(); - - SubscriptionBuffer::Log(logs) - } - - /// Apply the filter to a [`NewBlockNotification`], producing a - /// subscription buffer. - pub(crate) fn filter_notification_for_sub( - &self, - notif: &NewBlockNotification, - ) -> SubscriptionBuffer { - if self.is_block() { Self::apply_block(notif) } else { self.apply_filter(notif) } - } - - /// Return an empty output of the same kind as this filter. - pub(crate) const fn empty_output(&self) -> FilterOutput { - match self { - Self::Log(_) => FilterOutput::Log(VecDeque::new()), - Self::Block => FilterOutput::Block(VecDeque::new()), - } - } - - /// Return an empty subscription buffer of the same kind as this filter. - pub(crate) const fn empty_sub_buffer(&self) -> SubscriptionBuffer { - match self { - Self::Log(_) => SubscriptionBuffer::Log(VecDeque::new()), - Self::Block => SubscriptionBuffer::Block(VecDeque::new()), - } - } -} diff --git a/crates/rpc-storage/src/interest/mod.rs b/crates/rpc-storage/src/interest/mod.rs deleted file mode 100644 index 644d64b..0000000 --- a/crates/rpc-storage/src/interest/mod.rs +++ /dev/null @@ -1,58 +0,0 @@ -//! Filter and subscription management for storage-backed RPC. -//! -//! This module implements two managers that track client-registered -//! interests in chain events: -//! -//! - **[`FilterManager`]** — manages poll-based filters created via -//! `eth_newFilter` and `eth_newBlockFilter`. Clients poll with -//! `eth_getFilterChanges` to retrieve accumulated results. -//! -//! - **[`SubscriptionManager`]** — manages push-based subscriptions -//! created via `eth_subscribe`. Matching events are forwarded to -//! the client over the notification channel (WebSocket / SSE). -//! -//! # Architecture -//! -//! Both managers wrap a shared `Arc` containing a [`DashMap`] -//! that maps client-assigned IDs to their active state. This makes -//! both types cheaply clonable — cloning just increments an `Arc` -//! reference count. -//! -//! # Resource lifecycle -//! -//! Each manager spawns a **background OS thread** that periodically -//! cleans up stale entries. The cleanup threads hold a [`Weak`] -//! reference to the `Arc`, so they self-terminate once all -//! strong references are dropped. -//! -//! OS threads are used (rather than tokio tasks) because -//! [`DashMap::retain`] can deadlock if called from an async context -//! that also holds a `DashMap` read guard on the same shard. Running -//! cleanup on a dedicated OS thread ensures the retain lock is never -//! contended with an in-flight async handler. -//! -//! [`Weak`]: std::sync::Weak -//! [`DashMap`]: dashmap::DashMap -//! [`DashMap::retain`]: dashmap::DashMap::retain - -mod buffer; -mod filters; -pub(crate) use filters::{FilterManager, FilterOutput}; -mod kind; -pub(crate) use kind::InterestKind; -mod subs; -pub(crate) use subs::SubscriptionManager; - -/// Notification sent when a new block is available in storage. -/// -/// The caller constructs and sends these via a -/// [`tokio::sync::broadcast::Sender`]. -#[derive(Debug, Clone)] -pub struct NewBlockNotification { - /// The block header. - pub header: alloy::consensus::Header, - /// Transactions in the block. - pub transactions: Vec, - /// Receipts for the block. - pub receipts: Vec, -} diff --git a/crates/rpc-storage/src/interest/subs.rs b/crates/rpc-storage/src/interest/subs.rs deleted file mode 100644 index 3df1a00..0000000 --- a/crates/rpc-storage/src/interest/subs.rs +++ /dev/null @@ -1,266 +0,0 @@ -//! Subscription management for `eth_subscribe` / `eth_unsubscribe`. - -use crate::interest::{ - InterestKind, NewBlockNotification, - buffer::{EventBuffer, EventItem}, -}; -use ajj::HandlerCtx; -use alloy::primitives::U64; -use dashmap::DashMap; -use std::{ - future::pending, - sync::{ - Arc, Weak, - atomic::{AtomicU64, Ordering}, - }, - time::Duration, -}; -use tokio::sync::broadcast::{self, error::RecvError}; -use tokio_util::sync::{CancellationToken, WaitForCancellationFutureOwned}; -use tracing::{Instrument, debug, debug_span, enabled, trace}; - -/// Buffer for subscription outputs: log entries or block headers. -pub(crate) type SubscriptionBuffer = EventBuffer; - -/// JSON-RPC subscription notification envelope. -#[derive(serde::Serialize)] -struct SubscriptionNotification<'a> { - jsonrpc: &'static str, - method: &'static str, - params: SubscriptionParams<'a>, -} - -/// Params field of a subscription notification. -#[derive(serde::Serialize)] -struct SubscriptionParams<'a> { - result: &'a EventItem, - subscription: U64, -} - -/// Tracks ongoing subscription tasks. -/// -/// Performs the following functions: -/// - assigns unique subscription IDs -/// - spawns tasks to manage each subscription -/// - allows cancelling subscriptions by ID -/// -/// Calling [`Self::new`] spawns a task that periodically cleans stale filters. -/// This task runs on a separate thread to avoid [`DashMap::retain`] deadlock. -/// See [`DashMap`] documentation for more information. -#[derive(Clone)] -pub(crate) struct SubscriptionManager { - inner: Arc, -} - -impl SubscriptionManager { - /// Instantiate a new subscription manager, start a task to clean up - /// subscriptions cancelled by user disconnection. - pub(crate) fn new( - notif_sender: broadcast::Sender, - clean_interval: Duration, - ) -> Self { - let inner = Arc::new(SubscriptionManagerInner::new(notif_sender)); - let task = SubCleanerTask::new(Arc::downgrade(&inner), clean_interval); - task.spawn(); - Self { inner } - } -} - -impl core::ops::Deref for SubscriptionManager { - type Target = SubscriptionManagerInner; - - fn deref(&self) -> &Self::Target { - &self.inner - } -} - -impl core::fmt::Debug for SubscriptionManager { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("SubscriptionManager").finish_non_exhaustive() - } -} - -/// Inner logic for [`SubscriptionManager`]. -#[derive(Debug)] -pub(crate) struct SubscriptionManagerInner { - next_id: AtomicU64, - tasks: DashMap, - notif_sender: broadcast::Sender, -} - -impl SubscriptionManagerInner { - /// Create a new subscription manager. - fn new(notif_sender: broadcast::Sender) -> Self { - Self { next_id: AtomicU64::new(1), tasks: DashMap::new(), notif_sender } - } - - /// Assign a new subscription ID. - fn next_id(&self) -> U64 { - U64::from(self.next_id.fetch_add(1, Ordering::Relaxed)) - } - - /// Cancel a subscription task. - pub(crate) fn unsubscribe(&self, id: U64) -> bool { - if let Some(task) = self.tasks.remove(&id) { - task.1.cancel(); - true - } else { - false - } - } - - /// Subscribe to notifications. Returns `None` if notifications are - /// disabled. - pub(crate) fn subscribe(&self, ajj_ctx: &HandlerCtx, filter: InterestKind) -> Option { - if !ajj_ctx.notifications_enabled() { - return None; - } - - let id = self.next_id(); - let token = CancellationToken::new(); - self.tasks.insert(id, token.clone()); - let task = SubscriptionTask { - id, - filter, - token: token.clone(), - notifs: self.notif_sender.subscribe(), - }; - task.spawn(ajj_ctx); - - debug!(%id, "registered new subscription"); - - Some(id) - } -} - -/// Task to manage a single subscription. -#[derive(Debug)] -struct SubscriptionTask { - id: U64, - filter: InterestKind, - token: CancellationToken, - notifs: broadcast::Receiver, -} - -impl SubscriptionTask { - /// Create the task future. - async fn task_future(self, ajj_ctx: HandlerCtx, ajj_cancel: WaitForCancellationFutureOwned) { - let SubscriptionTask { id, filter, token, mut notifs } = self; - - if !ajj_ctx.notifications_enabled() { - return; - } - - let mut notif_buffer = filter.empty_sub_buffer(); - tokio::pin!(ajj_cancel); - - loop { - let span = debug_span!(parent: None, "SubscriptionTask::task_future", %id, filter = tracing::field::Empty); - if enabled!(tracing::Level::TRACE) { - span.record("filter", format!("{filter:?}")); - } - - // NB: reserve half the capacity to avoid blocking other - // usage. This is a heuristic and can be adjusted as needed. - let guard = span.enter(); - let permit_fut = async { - if !notif_buffer.is_empty() { - ajj_ctx - .permit_many((ajj_ctx.notification_capacity() / 2).min(notif_buffer.len())) - .await - } else { - pending().await - } - } - .in_current_span(); - drop(guard); - - // NB: biased select ensures we check cancellation before - // processing new notifications. - let _guard = span.enter(); - tokio::select! { - biased; - _ = &mut ajj_cancel => { - trace!("subscription cancelled by client disconnect"); - token.cancel(); - break; - } - _ = token.cancelled() => { - trace!("subscription cancelled by user"); - break; - } - permits = permit_fut => { - let Some(permits) = permits else { - trace!("channel to client closed"); - break - }; - - for permit in permits { - let Some(item) = notif_buffer.pop_front() else { break }; - let notification = SubscriptionNotification { - jsonrpc: "2.0", - method: "eth_subscription", - params: SubscriptionParams { result: &item, subscription: id }, - }; - let _ = permit.send(¬ification); - } - } - notif_res = notifs.recv() => { - let notif = match notif_res { - Ok(notif) => notif, - Err(RecvError::Lagged(skipped)) => { - trace!(skipped, "missed notifications"); - continue; - } - Err(e) => { - trace!(?e, "notification stream closed"); - break; - } - }; - - let output = filter.filter_notification_for_sub(¬if); - trace!(count = output.len(), "Filter applied to notification"); - if !output.is_empty() { - notif_buffer.extend(output); - } - } - } - } - } - - /// Spawn on the ajj [`HandlerCtx`]. - fn spawn(self, ctx: &HandlerCtx) { - ctx.spawn_graceful_with_ctx(|ctx, ajj_cancel| self.task_future(ctx, ajj_cancel)); - } -} - -/// Task to clean up cancelled subscriptions. -/// -/// This task runs on a separate thread to avoid [`DashMap::retain`] deadlocks. -#[derive(Debug)] -struct SubCleanerTask { - inner: Weak, - interval: Duration, -} - -impl SubCleanerTask { - /// Create a new subscription cleaner task. - const fn new(inner: Weak, interval: Duration) -> Self { - Self { inner, interval } - } - - /// Run the task. This task runs on a separate thread, which ensures that - /// [`DashMap::retain`]'s deadlock condition is not met. See [`DashMap`] - /// documentation for more information. - fn spawn(self) { - std::thread::spawn(move || { - loop { - std::thread::sleep(self.interval); - match self.inner.upgrade() { - Some(inner) => inner.tasks.retain(|_, task| !task.is_cancelled()), - None => break, - } - } - }); - } -} diff --git a/crates/rpc-storage/src/lib.rs b/crates/rpc-storage/src/lib.rs deleted file mode 100644 index b1e68d6..0000000 --- a/crates/rpc-storage/src/lib.rs +++ /dev/null @@ -1,40 +0,0 @@ -#![doc = include_str!("../README.md")] -#![warn( - missing_copy_implementations, - missing_debug_implementations, - missing_docs, - unreachable_pub, - clippy::missing_const_for_fn, - rustdoc::all -)] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![deny(unused_must_use, rust_2018_idioms)] -#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] - -pub(crate) mod config; -pub use config::{BlockTags, StorageRpcConfig, StorageRpcCtx, SyncStatus}; - -mod eth; -pub use eth::EthError; - -mod interest; -pub use interest::NewBlockNotification; - -mod debug; -pub use debug::DebugError; - -mod signet; -pub use signet::error::SignetError; - -/// Instantiate a combined router with `eth`, `debug`, and `signet` -/// namespaces. -pub fn router() -> ajj::Router> -where - H: signet_hot::HotKv + Send + Sync + 'static, - ::Error: trevm::revm::database::DBErrorMarker, -{ - ajj::Router::new() - .nest("eth", eth::eth()) - .nest("debug", debug::debug()) - .nest("signet", signet::signet()) -} diff --git a/crates/rpc-storage/src/signet/endpoints.rs b/crates/rpc-storage/src/signet/endpoints.rs deleted file mode 100644 index 7294073..0000000 --- a/crates/rpc-storage/src/signet/endpoints.rs +++ /dev/null @@ -1,95 +0,0 @@ -//! Signet namespace RPC endpoint implementations. - -use crate::{ - config::{EvmBlockContext, StorageRpcCtx}, - eth::helpers::{CfgFiller, await_handler, response_tri}, - signet::error::SignetError, -}; -use ajj::{HandlerCtx, ResponsePayload}; -use alloy::eips::BlockId; -use signet_bundle::{SignetBundleDriver, SignetCallBundle, SignetCallBundleResponse}; -use signet_hot::HotKv; -use signet_hot::model::HotKvRead; -use signet_types::SignedOrder; -use std::time::Duration; -use tokio::select; -use trevm::revm::database::DBErrorMarker; - -/// `signet_sendOrder` handler. -pub(super) async fn send_order( - hctx: HandlerCtx, - order: SignedOrder, - ctx: StorageRpcCtx, -) -> Result<(), String> -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let Some(tx_cache) = ctx.tx_cache().cloned() else { - return Err(SignetError::TxCacheNotProvided.to_string()); - }; - - let task = |hctx: HandlerCtx| async move { - hctx.spawn(async move { - if let Err(e) = tx_cache.forward_order(order).await { - tracing::warn!(error = %e, "failed to forward order"); - } - }); - Ok(()) - }; - - await_handler!(@option hctx.spawn_blocking_with_ctx(task)) -} - -/// `signet_callBundle` handler. -pub(super) async fn call_bundle( - hctx: HandlerCtx, - bundle: SignetCallBundle, - ctx: StorageRpcCtx, -) -> ResponsePayload -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - let timeout = bundle.bundle.timeout.unwrap_or(ctx.config().default_bundle_timeout_ms); - - let task = async move { - let id = bundle.state_block_number(); - let block_id: BlockId = id.into(); - - let EvmBlockContext { header, db } = - response_tri!(ctx.resolve_evm_block(block_id).map_err(|e| { - tracing::warn!(error = %e, ?block_id, "block resolution failed for bundle"); - SignetError::Resolve(e.to_string()) - })); - - let mut driver = SignetBundleDriver::from(&bundle); - - let trevm = signet_evm::signet_evm(db, ctx.constants().clone()) - .fill_cfg(&CfgFiller(ctx.chain_id())) - .fill_block(&header); - - response_tri!(trevm.drive_bundle(&mut driver).map_err(|e| { - let e = e.into_error(); - tracing::warn!(error = %e, "evm error during bundle simulation"); - SignetError::Evm(e.to_string()) - })); - - ResponsePayload(Ok(driver.into_response())) - }; - - let task = async move { - select! { - _ = tokio::time::sleep(Duration::from_millis(timeout)) => { - ResponsePayload::internal_error_message( - SignetError::Timeout.to_string().into(), - ) - } - result = task => { - result - } - } - }; - - await_handler!(@response_option hctx.spawn(task)) -} diff --git a/crates/rpc-storage/src/signet/error.rs b/crates/rpc-storage/src/signet/error.rs deleted file mode 100644 index 83570ab..0000000 --- a/crates/rpc-storage/src/signet/error.rs +++ /dev/null @@ -1,27 +0,0 @@ -//! Error types for the signet namespace. - -/// Errors that can occur in the `signet` namespace. -#[derive(Debug, Clone, thiserror::Error)] -pub enum SignetError { - /// The transaction cache was not provided. - #[error("transaction cache not provided")] - TxCacheNotProvided, - /// Block resolution failed. - #[error("block resolution error")] - Resolve(String), - /// EVM execution error. - #[error("evm execution error")] - Evm(String), - /// Bundle simulation timed out. - #[error("timeout during bundle simulation")] - Timeout, -} - -impl serde::Serialize for SignetError { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - serializer.serialize_str(&self.to_string()) - } -} diff --git a/crates/rpc-storage/src/signet/mod.rs b/crates/rpc-storage/src/signet/mod.rs deleted file mode 100644 index 276e5fb..0000000 --- a/crates/rpc-storage/src/signet/mod.rs +++ /dev/null @@ -1,19 +0,0 @@ -//! Signet RPC methods and related code. - -mod endpoints; -use endpoints::{call_bundle, send_order}; -pub(crate) mod error; - -use crate::config::StorageRpcCtx; -use signet_hot::HotKv; -use signet_hot::model::HotKvRead; -use trevm::revm::database::DBErrorMarker; - -/// Instantiate a `signet` API router backed by storage. -pub(crate) fn signet() -> ajj::Router> -where - H: HotKv + Send + Sync + 'static, - ::Error: DBErrorMarker, -{ - ajj::Router::new().route("sendOrder", send_order::).route("callBundle", call_bundle::) -} diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index f0b0b95..fdf1910 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -7,42 +7,41 @@ authors.workspace = true license.workspace = true homepage.workspace = true repository.workspace = true +description = "Ethereum JSON-RPC server backed by signet-storage" [dependencies] -signet-node-types.workspace = true -signet-db.workspace = true - -signet-bundle.workspace = true +signet-storage.workspace = true +signet-cold.workspace = true +signet-hot.workspace = true +signet-storage-types.workspace = true signet-evm.workspace = true -signet-tx-cache.workspace = true +trevm = { workspace = true, features = ["call", "estimate_gas"] } signet-types.workspace = true - -ajj.workspace = true -trevm.workspace = true - +signet-tx-cache.workspace = true +signet-bundle.workspace = true alloy.workspace = true -revm-inspectors.workspace = true - -reth.workspace = true -reth-chainspec.workspace = true -reth-db.workspace = true -reth-db-common.workspace = true -reth-node-api.workspace = true -reth-rpc-eth-api.workspace = true - -dashmap = "6.1.0" -eyre.workspace = true -serde.workspace = true -thiserror.workspace = true -tokio = { workspace = true, features = ["macros"] } -tokio-util = "0.7.13" +ajj.workspace = true +tokio.workspace = true +tokio-stream = "0.1" +tokio-util = "0.7" tracing.workspace = true -serde_json.workspace = true -futures-util = "0.3.31" +thiserror.workspace = true +serde.workspace = true +dashmap = "6.1.0" +revm-inspectors.workspace = true itertools.workspace = true -signet-block-processor.workspace = true [dev-dependencies] -signet-zenith.workspace = true +tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } +tokio-util = "0.7" +signet-cold = { workspace = true, features = ["test-utils"] } +signet-hot = { workspace = true, features = ["test-utils"] } +signet-storage.workspace = true +signet-storage-types.workspace = true signet-constants.workspace = true -chrono.workspace = true +alloy.workspace = true +serde_json.workspace = true +axum = "0.8" +tower = { version = "0.5", features = ["util"] } +http = "1" +trevm.workspace = true diff --git a/crates/rpc/README.md b/crates/rpc/README.md index 324f060..1046e59 100644 --- a/crates/rpc/README.md +++ b/crates/rpc/README.md @@ -1,51 +1,15 @@ -## signet-rpc +# signet-rpc -This crate contains the RPC server for Signet. The RPC server is a JSON-RPC -server that listens for incoming requests and processes them. The server is -built on top of the `ajj` crate, and uses the `tokio` runtime. +Ethereum JSON-RPC server backed by `signet-storage`'s unified storage backend. -This crate is intended to be used as part of a complete [reth] node. It is -incredibly difficult to use this crate without a full reth node, as it requires -a database handle and access to host configuration. If you are interested in -doing that, let us know we think it'd be cool. +This crate provides a standalone ETH RPC implementation that uses hot storage +for state queries and cold storage for block, transaction, and receipt data. -### What's new in Signet? +## Supported Methods -Signet's RPC server draws heavily on [reth]'s data types, and borrows code from -reth's RPC handler logic. However, we make a few design decisions that are -unique to Signet: - -- The following endpoints are disabled - - wallet-related endpoints like `eth_sign`. Good Riddance. - - p2p-related endpoints like `eth_listening`. Signet has no peer. - - mining-related endpoints like `eth_mining`. Signet needs no miners. - - txpool-related endpoints like `txpool_content`. Signet wants no txpool. - - uncle-related endpoints like `eth_getUncleByBlockHashAndIndex`. Signet - knows no family. - - trie-related endpoints like `eth_getProof`. Signet grows no tries. -- Filters and subscriptions have been rewritten from the ground up. -- Bundle-related endpoints (WIP) use signet bundles from the `signet-bundle` - crate. - -See the [Signet Docs] for more information. - -### What's in this crate? - -- `RpcCtx` a struct managing the DB handle, subscriptions, filters, etc. -- The `router()` function will create a complete [`ajj::Router`]. -- `serve_*` family of methods allow quick setup of the RPC server. - -This is a work in progress. The RPC server is fully functional, but a few -things are missing. - -- The following namespaces are not well-supported yet: - - `admin_` - - `debug_` - - `trace_` - - `signet_` - -[reth]: https://github.com/paradigmxyz/reth -[`ajj`]: https://docs.rs/ajj/latest/ajj/ -[`ajj::Router`]: https://docs.rs/ajj/latest/ajj/struct.Router.html -[`tokio`]: https://docs.rs/tokio/latest/tokio/ -[Signet Docs]: https://docs.signet.sh +- Block queries: `eth_blockNumber`, `eth_getBlockByHash`, `eth_getBlockByNumber`, etc. +- Transaction queries: `eth_getTransactionByHash`, `eth_getTransactionReceipt`, etc. +- Account state: `eth_getBalance`, `eth_getStorageAt`, `eth_getCode`, `eth_getTransactionCount` +- EVM execution: `eth_call`, `eth_estimateGas` +- Logs: `eth_getLogs` +- Transaction submission: `eth_sendRawTransaction` (optional, via `TxCache`) diff --git a/crates/rpc-storage/src/config/ctx.rs b/crates/rpc/src/config/ctx.rs similarity index 97% rename from crates/rpc-storage/src/config/ctx.rs rename to crates/rpc/src/config/ctx.rs index e46324a..ac6c064 100644 --- a/crates/rpc-storage/src/config/ctx.rs +++ b/crates/rpc/src/config/ctx.rs @@ -10,16 +10,17 @@ use crate::{ }; use alloy::eips::{BlockId, BlockNumberOrTag}; use signet_cold::ColdStorageReadHandle; -use signet_hot::HotKv; -use signet_hot::db::HotDbRead; -use signet_hot::model::{HotKvRead, RevmRead}; +use signet_hot::{ + HotKv, + db::HotDbRead, + model::{HotKvRead, RevmRead}, +}; use signet_storage::UnifiedStorage; use signet_tx_cache::TxCache; use signet_types::constants::SignetSystemConstants; use std::sync::Arc; use tokio::sync::{Semaphore, broadcast}; -use trevm::revm::database::DBErrorMarker; -use trevm::revm::database::StateBuilder; +use trevm::revm::database::{DBErrorMarker, StateBuilder}; /// Resolved block context for EVM execution. /// @@ -56,7 +57,7 @@ impl Clone for StorageRpcCtx { #[derive(Debug)] struct StorageRpcCtxInner { - storage: UnifiedStorage, + storage: Arc>, constants: SignetSystemConstants, tags: BlockTags, tx_cache: Option, @@ -73,7 +74,7 @@ impl StorageRpcCtx { /// new block notifications. Callers send [`NewBlockNotification`]s on /// this channel as blocks are appended to storage. pub fn new( - storage: UnifiedStorage, + storage: Arc>, constants: SignetSystemConstants, tags: BlockTags, tx_cache: Option, diff --git a/crates/rpc-storage/src/config/gas_oracle.rs b/crates/rpc/src/config/gas_oracle.rs similarity index 68% rename from crates/rpc-storage/src/config/gas_oracle.rs rename to crates/rpc/src/config/gas_oracle.rs index 866cad3..787c64c 100644 --- a/crates/rpc-storage/src/config/gas_oracle.rs +++ b/crates/rpc/src/config/gas_oracle.rs @@ -2,6 +2,9 @@ //! //! Reads recent block headers and transactions from cold storage to //! compute a suggested tip cap based on recent transaction activity. +//! Behavior mirrors reth's `GasPriceOracle`: a configurable default +//! price when no transactions exist, an `ignore_price` floor, and a +//! `max_price` cap. use alloy::{consensus::Transaction, primitives::U256}; use signet_cold::{ColdStorageError, ColdStorageReadHandle, HeaderSpecifier}; @@ -11,10 +14,12 @@ use crate::config::StorageRpcConfig; /// Suggest a tip cap based on recent transaction tips. /// /// Reads the last `gas_oracle_block_count` blocks from cold storage, -/// computes the effective tip per gas for each transaction, sorts all -/// tips, and returns the value at `gas_oracle_percentile`. +/// computes the effective tip per gas for each transaction, filters +/// tips below `ignore_price`, sorts the remainder, and returns the +/// value at `gas_oracle_percentile`, clamped to `max_price`. /// -/// Returns `U256::ZERO` if no transactions are found in the range. +/// Returns `default_gas_price` (default 1 Gwei) when no qualifying +/// transactions are found. pub(crate) async fn suggest_tip_cap( cold: &ColdStorageReadHandle, latest: u64, @@ -36,14 +41,16 @@ pub(crate) async fn suggest_tip_cap( let txs = cold.get_transactions_in_block(block_num).await?; for tx in &txs { - if let Some(tip) = tx.effective_tip_per_gas(base_fee) { + if let Some(tip) = tx.effective_tip_per_gas(base_fee) + && config.ignore_price.is_none_or(|floor| tip >= floor) + { all_tips.push(tip); } } } if all_tips.is_empty() { - return Ok(U256::ZERO); + return Ok(config.default_gas_price.map_or(U256::ZERO, U256::from)); } all_tips.sort_unstable(); @@ -51,5 +58,11 @@ pub(crate) async fn suggest_tip_cap( let index = ((config.gas_oracle_percentile / 100.0) * (all_tips.len() - 1) as f64) as usize; let index = index.min(all_tips.len() - 1); - Ok(U256::from(all_tips[index])) + let mut price = U256::from(all_tips[index]); + + if let Some(max) = config.max_price { + price = price.min(U256::from(max)); + } + + Ok(price) } diff --git a/crates/rpc-storage/src/config/mod.rs b/crates/rpc/src/config/mod.rs similarity index 100% rename from crates/rpc-storage/src/config/mod.rs rename to crates/rpc/src/config/mod.rs diff --git a/crates/rpc-storage/src/config/resolve.rs b/crates/rpc/src/config/resolve.rs similarity index 99% rename from crates/rpc-storage/src/config/resolve.rs rename to crates/rpc/src/config/resolve.rs index 8a23ef4..47d4a1f 100644 --- a/crates/rpc-storage/src/config/resolve.rs +++ b/crates/rpc/src/config/resolve.rs @@ -35,7 +35,7 @@ pub struct SyncStatus { /// # Example /// /// ``` -/// use signet_rpc_storage::BlockTags; +/// use signet_rpc::BlockTags; /// /// let tags = BlockTags::new(100, 95, 90); /// assert_eq!(tags.latest(), 100); diff --git a/crates/rpc-storage/src/config/rpc_config.rs b/crates/rpc/src/config/rpc_config.rs similarity index 88% rename from crates/rpc-storage/src/config/rpc_config.rs rename to crates/rpc/src/config/rpc_config.rs index 64f562a..b0841f8 100644 --- a/crates/rpc-storage/src/config/rpc_config.rs +++ b/crates/rpc/src/config/rpc_config.rs @@ -10,7 +10,7 @@ use std::time::Duration; /// # Example /// /// ``` -/// use signet_rpc_storage::StorageRpcConfig; +/// use signet_rpc::StorageRpcConfig; /// /// // Use defaults (matches reth defaults). /// let config = StorageRpcConfig::default(); @@ -75,6 +75,26 @@ pub struct StorageRpcConfig { /// Default: `60.0`. pub gas_oracle_percentile: f64, + /// Default gas price returned when no recent transactions exist. + /// + /// Reth defaults to 1 Gwei. Set to `None` to return zero. + /// + /// Default: `Some(1_000_000_000)` (1 Gwei). + pub default_gas_price: Option, + + /// Minimum effective tip to include in the oracle sample. + /// + /// Tips below this threshold are discarded, matching reth's + /// `ignore_price` behavior. + /// + /// Default: `Some(2)` (2 wei). + pub ignore_price: Option, + + /// Maximum gas price the oracle will ever suggest. + /// + /// Default: `Some(500_000_000_000)` (500 Gwei). + pub max_price: Option, + /// Maximum header history for `eth_feeHistory` without percentiles. /// /// Default: `1024`. @@ -111,6 +131,9 @@ impl Default for StorageRpcConfig { stale_filter_ttl: Duration::from_secs(5 * 60), gas_oracle_block_count: 20, gas_oracle_percentile: 60.0, + default_gas_price: Some(1_000_000_000), + ignore_price: Some(2), + max_price: Some(500_000_000_000), max_header_history: 1024, max_block_history: 1024, default_bundle_timeout_ms: 1000, diff --git a/crates/rpc/src/ctx/fee_hist.rs b/crates/rpc/src/ctx/fee_hist.rs deleted file mode 100644 index 4ceaf14..0000000 --- a/crates/rpc/src/ctx/fee_hist.rs +++ /dev/null @@ -1,150 +0,0 @@ -use reth::{ - core::primitives::SealedBlock, - primitives::{Block, RecoveredBlock}, - providers::{CanonStateNotification, Chain}, -}; -use signet_types::MagicSig; -use std::sync::Arc; - -/// Removes Signet system transactions from the block. -fn strip_block(block: RecoveredBlock) -> RecoveredBlock { - let (sealed, mut senders) = block.split_sealed(); - let (header, mut body) = sealed.split_sealed_header_body(); - - // This is the index of the first transaction that has a system magic - // signature. - let sys_index = body - .transactions - .partition_point(|tx| MagicSig::try_from_signature(tx.signature()).is_none()); - - body.transactions.truncate(sys_index); - senders.truncate(sys_index); - - let sealed = SealedBlock::from_sealed_parts(header, body); - - RecoveredBlock::new_sealed(sealed, senders) -} - -/// Removes Signet system transactions from the chain. This function uses -/// `Arc::make_mut` to clone the contents of the Arc and modify the new -/// instance. -fn strip_chain(chain: &Chain) -> Arc { - // Takes the contents out, replacing with default - let (blocks, outcome, trie) = chain.clone().into_inner(); - - // Strip each block - let blocks: Vec> = blocks.into_blocks().map(strip_block).collect(); - - // Replace the original chain with the stripped version - Arc::new(Chain::new(blocks, outcome, trie)) -} - -/// Strips Signet system transactions from the `CanonStateNotification`. -pub(crate) fn strip_signet_system_txns(notif: CanonStateNotification) -> CanonStateNotification { - match notif { - CanonStateNotification::Commit { new } => { - CanonStateNotification::Commit { new: strip_chain(&new) } - } - CanonStateNotification::Reorg { mut old, mut new } => { - old = strip_chain(&old); - new = strip_chain(&new); - - CanonStateNotification::Reorg { old, new } - } - } -} - -#[cfg(test)] -mod test { - use alloy::{ - consensus::{TxEip1559, TxEnvelope}, - primitives::{Address, B256}, - signers::Signature, - }; - use reth::primitives::{BlockBody, Header, SealedHeader}; - - use super::*; - - fn test_magic_sig_tx() -> TxEnvelope { - let sig = MagicSig::enter(B256::repeat_byte(0x22), 3); - - let sig = sig.into(); - - dbg!(MagicSig::try_from_signature(&sig).is_some()); - - TxEnvelope::new_unchecked(TxEip1559::default().into(), sig, B256::repeat_byte(0x33)) - } - - fn test_non_magic_sig_tx() -> TxEnvelope { - let sig = Signature::test_signature(); - TxEnvelope::new_unchecked(TxEip1559::default().into(), sig, B256::repeat_byte(0x44)) - } - - fn test_block_body() -> BlockBody { - BlockBody { - transactions: vec![ - test_non_magic_sig_tx().into(), - test_non_magic_sig_tx().into(), - test_magic_sig_tx().into(), - test_magic_sig_tx().into(), - ], - ..Default::default() - } - } - - fn test_sealed_header(number: u64) -> SealedHeader { - let header = Header { number, ..Default::default() }; - SealedHeader::new_unhashed(header) - } - - fn test_sealed_block(block_num: u64) -> SealedBlock { - SealedBlock::from_sealed_parts(test_sealed_header(block_num), test_block_body()) - } - - fn test_block(block_num: u64) -> RecoveredBlock { - RecoveredBlock::new_sealed( - test_sealed_block(block_num), - vec![Address::repeat_byte(0x11); 4], - ) - } - - fn test_chain(count: u64) -> Arc { - let blocks = (0..count).map(test_block); - Arc::new(Chain::new(blocks, Default::default(), Default::default())) - } - - #[test] - fn test_strip_block() { - let block = test_block(0); - assert_eq!(block.body().transactions.len(), 4); - assert_eq!(block.senders().len(), 4); - - let stripped = strip_block(block); - assert_eq!(stripped.body().transactions.len(), 2); - assert_eq!(stripped.senders().len(), 2); - - for tx in stripped.body().transactions.iter() { - assert!(MagicSig::try_from_signature(tx.signature()).is_none()); - } - } - - #[test] - fn test_strip_chain() { - let original = test_chain(2); - assert_eq!(original.blocks().len(), 2); - - let chain = strip_chain(&original); - - assert_ne!(&*chain, &*original); - - assert_eq!(chain.blocks().len(), 2); - - for (_num, block) in chain.blocks().iter() { - assert_eq!(block.body().transactions.len(), 2); - assert_eq!(block.senders().len(), 2); - for tx in block.body().transactions.iter() { - assert!(MagicSig::try_from_signature(tx.signature()).is_none()); - } - } - } -} diff --git a/crates/rpc/src/ctx/full.rs b/crates/rpc/src/ctx/full.rs deleted file mode 100644 index 1300b8e..0000000 --- a/crates/rpc/src/ctx/full.rs +++ /dev/null @@ -1,282 +0,0 @@ -use crate::SignetCtx; -use alloy::{consensus::Header, eips::BlockId}; -use reth::{ - providers::{ProviderResult, providers::BlockchainProvider}, - rpc::server_types::eth::{EthApiError, EthConfig}, - rpc::types::BlockNumberOrTag, - tasks::{TaskExecutor, TaskSpawner}, -}; -use reth_node_api::FullNodeComponents; -use signet_db::RuRevmState; -use signet_evm::EvmNeedsTx; -use signet_node_types::Pnt; -use signet_tx_cache::TxCache; -use signet_types::constants::SignetSystemConstants; -use std::sync::Arc; -use tokio::sync::{AcquireError, OwnedSemaphorePermit, Semaphore}; -use trevm::{ - helpers::Ctx, - revm::{Inspector, inspector::NoOpInspector}, -}; - -/// State location when instantiating an EVM instance. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -#[repr(i8)] -pub enum LoadState { - /// Load the state before the block's transactions (i.e. at the start of - /// the block). - Before = -1, - /// Load the state after the block's transactions (i.e. at the end of the - /// block). - After = 0, -} - -impl LoadState { - /// Adjust the height based on the state location. - pub const fn adjust_height(&self, height: u64) -> u64 { - match self { - LoadState::Before => height.saturating_sub(1), - LoadState::After => height, - } - } - - /// Returns `true` if the state location is before the block. - pub const fn is_before_block(&self) -> bool { - matches!(self, Self::Before) - } - - /// Returns `true` if the state location is after the block. - pub const fn is_after_block(&self) -> bool { - matches!(self, Self::After) - } -} - -impl From for LoadState { - fn from(value: BlockId) -> Self { - match value { - BlockId::Number(no) => no.into(), - _ => LoadState::After, - } - } -} - -impl From for LoadState { - fn from(value: BlockNumberOrTag) -> Self { - match value { - BlockNumberOrTag::Pending => LoadState::Before, - _ => LoadState::After, - } - } -} - -impl From for bool { - fn from(value: LoadState) -> Self { - matches!(value, LoadState::Before) - } -} - -/// RPC context. Contains all necessary host and signet components for serving -/// RPC requests. -#[derive(Debug)] -pub struct RpcCtx -where - Host: FullNodeComponents, - Signet: Pnt, -{ - inner: Arc>, -} - -impl RpcCtx -where - Host: FullNodeComponents, - Signet: Pnt, -{ - /// Create a new `RpcCtx`. - /// - /// ## WARNING - /// - /// The [`BlockchainProvider`] passed in MUST be receiving updates from the - /// node wrt canonical chain changes. Some task MUST be calling relevant - /// [`CanonChainTracker`] methods on a clone of this [`BlockchainProvider`], - /// - /// If this is not correctly set up, [`BlockId`] resolution for `latest`, - /// `safe,` finalized, etc will not work correctly. - /// - /// [`CanonChainTracker`]: reth::providers::CanonChainTracker - pub fn new( - host: Host, - constants: SignetSystemConstants, - provider: BlockchainProvider, - eth_config: EthConfig, - tx_cache: Option, - spawner: Tasks, - ) -> ProviderResult - where - Tasks: TaskSpawner + Clone + 'static, - { - RpcCtxInner::new(host, constants, provider, eth_config, tx_cache, spawner) - .map(|inner| Self { inner: Arc::new(inner) }) - } -} - -impl Clone for RpcCtx -where - Host: FullNodeComponents, - Signet: Pnt, -{ - fn clone(&self) -> Self { - Self { inner: self.inner.clone() } - } -} - -impl core::ops::Deref for RpcCtx -where - Host: FullNodeComponents, - Signet: Pnt, -{ - type Target = RpcCtxInner; - - fn deref(&self) -> &Self::Target { - &self.inner - } -} - -/// Shared context between all RPC handlers. -#[derive(Debug)] -struct SharedContext { - tracing_semaphores: Arc, -} - -/// Inner context for [`RpcCtx`]. -#[derive(Debug)] -pub struct RpcCtxInner -where - Host: FullNodeComponents, - Signet: Pnt, -{ - host: Host, - signet: SignetCtx, - - shared: SharedContext, -} - -impl RpcCtxInner -where - Host: FullNodeComponents, - Signet: Pnt, -{ - /// Create a new `RpcCtxInner`. - /// - /// ## WARNING - /// - /// The [`BlockchainProvider`] passed in MUST be receiving updates from the - /// node wrt canonical chain changes. Some task MUST be calling relevant - /// [`CanonChainTracker`] methods on a clone of this [`BlockchainProvider`], - /// - /// If this is not correctly set up, [`BlockId`] resolution for `latest`, - /// `safe,` finalized, etc will not work correctly. - /// - /// [`CanonChainTracker`]: reth::providers::CanonChainTracker - pub fn new( - host: Host, - constants: SignetSystemConstants, - provider: BlockchainProvider, - eth_config: EthConfig, - tx_cache: Option, - spawner: Tasks, - ) -> ProviderResult - where - Tasks: TaskSpawner + Clone + 'static, - { - let tracing_semaphores = Semaphore::new(eth_config.max_tracing_requests).into(); - - SignetCtx::new(constants, provider, eth_config, tx_cache, spawner).map(|signet| Self { - host, - signet, - shared: SharedContext { tracing_semaphores }, - }) - } - - /// Acquire a permit for tracing. - pub async fn acquire_tracing_permit(&self) -> Result { - self.shared.tracing_semaphores.clone().acquire_owned().await - } - - pub const fn host(&self) -> &Host { - &self.host - } - - pub const fn signet(&self) -> &SignetCtx { - &self.signet - } - - pub fn task_executor(&self) -> &TaskExecutor { - self.host.task_executor() - } - - /// Instantiate a trevm instance with a custom inspector. - /// - /// The `header` argument is used to fill the block context of the EVM. If - /// the `block_id` is `Pending` the EVM state will be the block BEFORE the - /// `header`. I.e. if the block number of the `header` is `n`, the state - /// will be after block `n-1`, (effectively the state at the start of block - /// `n`). - /// - /// if the `block_id` is `Pending` the state will be based on the - /// and `block` arguments - pub fn trevm_with_inspector>>( - &self, - state: LoadState, - header: &Header, - inspector: I, - ) -> Result, EthApiError> { - let load_height = state.adjust_height(header.number); - let spec_id = self.signet.evm_spec_id(header); - - let db = self.signet.state_provider_database(load_height)?; - - let mut trevm = - signet_evm::signet_evm_with_inspector(db, inspector, self.signet.constants().clone()) - .fill_cfg(&self.signet) - .fill_block(header); - - trevm.set_spec_id(spec_id); - - Ok(trevm) - } - - /// Create a trevm instance. - pub fn trevm( - &self, - state: LoadState, - header: &Header, - ) -> Result, EthApiError> { - self.trevm_with_inspector(state, header, NoOpInspector) - } -} - -// Some code in this file has been copied and modified from reth -// -// The original license is included below: -// -// The MIT License (MIT) -// -// Copyright (c) 2022-2025 Reth Contributors -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -//. -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. diff --git a/crates/rpc/src/ctx/mod.rs b/crates/rpc/src/ctx/mod.rs deleted file mode 100644 index ea63b73..0000000 --- a/crates/rpc/src/ctx/mod.rs +++ /dev/null @@ -1,8 +0,0 @@ -mod signet; -pub use signet::SignetCtx; - -mod full; -pub use full::{LoadState, RpcCtx}; - -mod fee_hist; -pub(crate) use fee_hist::strip_signet_system_txns; diff --git a/crates/rpc/src/ctx/signet.rs b/crates/rpc/src/ctx/signet.rs deleted file mode 100644 index 5ca08dc..0000000 --- a/crates/rpc/src/ctx/signet.rs +++ /dev/null @@ -1,928 +0,0 @@ -use crate::{ - ctx::strip_signet_system_txns, - eth::EthError, - interest::{ActiveFilter, FilterManager, FilterOutput, SubscriptionManager}, - receipts::build_signet_receipt, - utils::BlockRangeInclusiveIter, -}; -use alloy::{ - consensus::{BlockHeader, Header, Signed, Transaction, TxEnvelope}, - eips::{BlockId, BlockNumberOrTag, NumHash}, - network::Ethereum, - primitives::{B256, U64}, - rpc::types::{FeeHistory, Filter, Log}, -}; -use futures_util::StreamExt; -use reth::{ - core::primitives::SignerRecoverable, - primitives::{Block, Receipt, Recovered, RecoveredBlock, TransactionSigned}, - providers::{ - BlockHashReader, BlockIdReader, BlockNumReader, CanonStateSubscriptions, HeaderProvider, - ProviderError, ProviderResult, ReceiptProvider, StateProviderFactory, TransactionsProvider, - providers::BlockchainProvider, - }, - revm::database::StateProviderDatabase, - rpc::{ - eth::{filter::EthFilterError, helpers::types::EthRpcConverter}, - server_types::eth::{ - EthApiError, EthConfig, EthStateCache, FeeHistoryCache, FeeHistoryEntry, - GasPriceOracle, - fee_history::{ - calculate_reward_percentiles_for_block, fee_history_cache_new_blocks_task, - }, - logs_utils::{self, ProviderOrBlock, append_matching_block_logs}, - receipt::EthReceiptConverter, - }, - types::{FilterBlockOption, FilteredParams}, - }, - tasks::TaskSpawner, -}; -use reth_chainspec::{BaseFeeParams, ChainSpec, ChainSpecProvider}; -use reth_node_api::BlockBody; -use reth_rpc_eth_api::{RpcBlock, RpcConvert, RpcReceipt, RpcTransaction}; -use signet_db::RuRevmState; -use signet_node_types::Pnt; -use signet_tx_cache::TxCache; -use signet_types::{MagicSig, constants::SignetSystemConstants}; -use std::{marker::PhantomData, sync::Arc}; -use tracing::{Level, instrument, trace}; -use trevm::{ - Cfg, - revm::{context::CfgEnv, database::StateBuilder, primitives::hardfork::SpecId}, -}; - -/// The maximum number of headers we read at once when handling a range filter. -const MAX_HEADERS_RANGE: u64 = 1_000; // with ~530bytes per header this is ~500kb - -/// Signet context. This struct contains all the necessary components for -/// accessing Signet node state, and serving RPC requests. -#[derive(Debug)] -pub struct SignetCtx -where - Inner: Pnt, -{ - // Basics - constants: SignetSystemConstants, - eth_config: EthConfig, - - // State stuff - provider: BlockchainProvider, - cache: EthStateCache, - - // Gas stuff - gas_oracle: GasPriceOracle>, - fee_history: FeeHistoryCache, - - // Tx stuff - tx_cache: Option, - - // Filter and subscription stuff - filters: FilterManager, - subs: SubscriptionManager, - - // Spooky ghost stuff - _pd: std::marker::PhantomData Inner>, -} - -impl SignetCtx -where - Inner: Pnt, -{ - /// Instantiate a new `SignetCtx`, spawning necessary tasks to keep the - /// relevant caches up to date. - /// - /// ## WARNING - /// - /// The [`BlockchainProvider`] passed in MUST be receiving updates from the - /// node wrt canonical chain changes. Some task MUST be calling relevant - /// [`CanonChainTracker`] methods on a clone of this [`BlockchainProvider`], - /// - /// If this is not correctly set up, [`BlockId`] resolution for `latest`, - /// `safe,` finalized, etc will not work correctly. - /// - /// [`CanonChainTracker`]: reth::providers::CanonChainTracker - pub fn new( - constants: SignetSystemConstants, - provider: BlockchainProvider, - eth_config: EthConfig, - tx_cache: Option, - spawner: Tasks, - ) -> ProviderResult - where - Tasks: TaskSpawner + Clone + 'static, - { - let cache = EthStateCache::spawn_with(provider.clone(), eth_config.cache, spawner.clone()); - let gas_oracle = - GasPriceOracle::new(provider.clone(), eth_config.gas_oracle, cache.clone()); - - let fee_history = FeeHistoryCache::new(eth_config.fee_history_cache); - - // The fee task pre-calculates and caches common percentiles for the - // `eth_feeHistory` RPC method. - let fee_task = fee_history_cache_new_blocks_task( - fee_history.clone(), - provider.canonical_state_stream().map(strip_signet_system_txns), - provider.clone(), - cache.clone(), - ); - - spawner.spawn_critical("fee_history_cache_new_blocks", Box::pin(fee_task)); - - let filters = FilterManager::new(eth_config.stale_filter_ttl, eth_config.stale_filter_ttl); - - let subs = SubscriptionManager::new(provider.clone(), eth_config.stale_filter_ttl); - - Ok(Self { - constants, - provider, - eth_config, - cache, - gas_oracle, - fee_history, - tx_cache, - filters, - subs, - _pd: PhantomData, - }) - } - - /// Access the signet constants - pub const fn constants(&self) -> &SignetSystemConstants { - &self.constants - } - - /// Access the signet DB - pub const fn provider(&self) -> &BlockchainProvider { - &self.provider - } - - /// Access the signet [`EthConfig`] - pub const fn config(&self) -> &EthConfig { - &self.eth_config - } - - /// Access the tx_cache - pub fn tx_cache(&self) -> Option { - self.tx_cache.clone() - } - - /// Access the [`ChainSpec`]. - pub fn chain_spec(&self) -> Arc { - self.provider.chain_spec() - } - - /// Get the EVM spec ID for a given block. - pub fn evm_spec_id(&self, header: &Header) -> SpecId { - signet_block_processor::revm_spec(&self.chain_spec(), header.timestamp()) - } - - /// Access the subscription manager. - pub const fn subscriptions(&self) -> &SubscriptionManager { - &self.subs - } - - /// Make a [`StateProviderDatabase`] from the read-write provider, suitable - /// for use with Trevm. - pub fn state_provider_database(&self, height: u64) -> Result { - // Get the state provider for the block number - let sp = self.provider.history_by_block_number(height)?; - - // Wrap in Revm compatibility layer - let spd = StateProviderDatabase::new(sp); - - let builder = StateBuilder::new_with_database(spd); - - Ok(builder.build()) - } - - /// Get the [`Header`] for a given block. - pub async fn raw_header( - &self, - t: impl Into, - ) -> Result, EthApiError> { - let Some(hash) = self.provider.block_hash_for_id(t.into())? else { - return Ok(None); - }; - - let header = self.cache.get_header(hash).await.map_err(EthApiError::from)?; - - Ok(Some((hash, header))) - } - - /// Get the block for a given block, returning the block hash and - /// the block itself. - pub async fn raw_block( - &self, - t: impl Into, - ) -> Result>)>, EthApiError> { - let Some(hash) = self.provider.block_hash_for_id(t.into())? else { - return Ok(None); - }; - - self.cache.get_recovered_block(hash).await.map_err(Into::into).map(|b| b.map(|b| (hash, b))) - } - - /// Create a transaction response builder for the RPC API. - pub fn rpc_converter(&self) -> EthRpcConverter { - EthRpcConverter::new(EthReceiptConverter::new(self.chain_spec())) - } - - /// Get the block for a given block, formatting the block for - /// the RPC API. - pub async fn block( - &self, - t: impl Into, - full: Option, - ) -> Result>, EthApiError> { - let Some(hash) = self.provider.block_hash_for_id(t.into())? else { - return Ok(None); - }; - - let Some(block) = self.cache.get_recovered_block(hash).await? else { - return Ok(None); - }; - - (*block) - .clone() - .into_rpc_block( - full.unwrap_or_default().into(), - |tx, tx_info| self.rpc_converter().fill(tx, tx_info), - |header, rlp_len| self.rpc_converter().convert_header(header, rlp_len), - ) - .map(Some) - } - - /// Get the tx count for a given block. - pub async fn tx_count(&self, t: impl Into) -> Result, EthApiError> { - let Some(hash) = self.provider.block_hash_for_id(t.into())? else { - return Ok(None); - }; - - if let Some(block) = self.cache.get_recovered_block(hash).await? { - // ambiguous function names - let txns = BlockBody::transactions(block.body()); - Ok(Some(U64::from(txns.len()))) - } else { - Ok(None) - } - } - - /// Get the receipts for a given block. - pub async fn raw_receipts( - &self, - t: impl Into, - ) -> Result>>, EthApiError> { - let Some(hash) = self.provider.block_hash_for_id(t.into())? else { - return Ok(None); - }; - - self.cache.get_receipts(hash).await.map_err(Into::into) - } - - /// Get the transaction for a given hash, returning the transaction and its - /// block-related metadata. - pub fn raw_transaction_by_hash( - &self, - h: B256, - ) -> Result, EthApiError> { - self.provider.transaction_by_hash_with_meta(h).map_err(Into::into) - } - - /// Format a transaction for the RPC API. - fn format_rpc_tx( - tx: TransactionSigned, - block_hash: B256, - block_number: u64, - index: usize, - base_fee: Option, - ) -> Result { - let sig = tx.signature(); - - let sender = - if let Some(sender) = MagicSig::try_from_signature(sig).map(|s| s.rollup_sender()) { - sender - } else { - tx.recover_signer().map_err(|_| EthApiError::InvalidTransactionSignature)? - }; - - let tx = Recovered::new_unchecked(tx, sender); - - let from = tx.signer(); - let hash = *tx.hash(); - let signature = *tx.signature(); - - let inner: TxEnvelope = match tx.into_inner().into_typed_transaction() { - reth::primitives::Transaction::Legacy(tx) => { - Signed::new_unchecked(tx, signature, hash).into() - } - reth::primitives::Transaction::Eip2930(tx) => { - Signed::new_unchecked(tx, signature, hash).into() - } - reth::primitives::Transaction::Eip1559(tx) => { - Signed::new_unchecked(tx, signature, hash).into() - } - reth::primitives::Transaction::Eip4844(tx) => { - Signed::new_unchecked(tx, signature, hash).into() - } - reth::primitives::Transaction::Eip7702(tx) => { - Signed::new_unchecked(tx, signature, hash).into() - } - }; - let inner = Recovered::new_unchecked(inner, from); - - let egp = base_fee - .map(|base_fee| { - inner.effective_tip_per_gas(base_fee).unwrap_or_default() as u64 + base_fee - }) - .unwrap_or_else(|| inner.max_fee_per_gas() as u64); - - Ok(alloy::rpc::types::Transaction { - inner, - block_hash: Some(block_hash), - block_number: Some(block_number), - transaction_index: Some(index as u64), - effective_gas_price: Some(egp as u128), - }) - } - - /// Get a transaction by its hash, and format it for the RPC API. - pub fn rpc_transaction_by_hash( - &self, - hash: B256, - ) -> Result>, EthApiError> { - let Some((tx, meta)) = self.raw_transaction_by_hash(hash)? else { - return Ok(None); - }; - - Self::format_rpc_tx( - tx, - meta.block_hash, - meta.block_number, - meta.index as usize, - meta.base_fee, - ) - .map(Some) - } - - /// Get a transaction by its block and index, and format it for the RPC API. - pub async fn rpc_transaction_by_block_idx( - &self, - id: impl Into, - index: usize, - ) -> Result>, EthApiError> { - let Some((hash, block)) = self.raw_block(id).await? else { - return Ok(None); - }; - - block - .body() - .transactions - .get(index) - .map(|tx| { - Self::format_rpc_tx( - tx.clone(), - hash, - block.number(), - index, - block.base_fee_per_gas(), - ) - }) - .transpose() - } - - /// Get a receipt by its hash, and format it for the RPC API. - pub async fn rpc_receipt_by_hash( - &self, - hash: B256, - ) -> Result>, EthApiError> { - let Some((tx, meta)) = self.raw_transaction_by_hash(hash)? else { - trace!(%hash, "Transaction not found for receipt hash"); - return Ok(None); - }; - - let Some(receipt) = self.provider.receipt_by_hash(hash)? else { - trace!(%hash, "Receipt not found for transaction hash"); - return Ok(None); - }; - - let Some(all_receipts) = self.cache.get_receipts(meta.block_hash).await? else { - trace!(%hash, "Block not found for transaction hash"); - return Ok(None); - }; - - build_signet_receipt(tx, meta, receipt, all_receipts.to_vec()).map(Some) - } - - /// Create the [`Header`] object for a specific [`BlockId`]. - pub async fn block_cfg(&self, mut block_id: BlockId) -> Result { - // If the block is pending, we'll load the latest and - let pending = block_id.is_pending(); - if pending { - block_id = BlockId::latest(); - } - - let Some((_, mut header)) = self.raw_header(block_id).await? else { - return Err(EthApiError::HeaderNotFound(block_id)); - }; - - // Modify the header for pending blocks, to simulate the next block. - if pending { - header.parent_hash = header.hash_slow(); - header.number += 1; - header.timestamp += 12; - header.base_fee_per_gas = header.next_block_base_fee(BaseFeeParams::ethereum()); - header.gas_limit = self.eth_config.rpc_gas_cap; - } - - Ok(header) - } - - /// Create a gas price oracle. - pub const fn gas_oracle(&self) -> &GasPriceOracle> { - &self.gas_oracle - } - - /// Approximates reward at a given percentile for a specific block - /// Based on the configured resolution - /// - /// Implementation reproduced from reth. - fn approximate_percentile(&self, entry: &FeeHistoryEntry, requested_percentile: f64) -> u128 { - let resolution = self.fee_history.resolution(); - let rounded_percentile = - (requested_percentile * resolution as f64).round() / resolution as f64; - let clamped_percentile = rounded_percentile.clamp(0.0, 100.0); - - // Calculate the index in the precomputed rewards array - let index = (clamped_percentile / (1.0 / resolution as f64)).round() as usize; - // Fetch the reward from the FeeHistoryEntry - entry.rewards.get(index).copied().unwrap_or_default() - } - - /// Implements the `eth_feeHistory` RPC method. - /// - /// Implementation reproduced from reth, trimmed of 4844 support. - pub async fn fee_history( - &self, - mut block_count: u64, - mut newest: BlockNumberOrTag, - reward_percentiles: Option>, - ) -> Result { - if block_count == 0 { - return Ok(FeeHistory::default()); - } - - // See https://github.com/ethereum/go-ethereum/blob/2754b197c935ee63101cbbca2752338246384fec/eth/gasprice/feehistory.go#L218C8-L225 - let max_fee_history = if reward_percentiles.is_none() { - self.gas_oracle().config().max_header_history - } else { - self.gas_oracle().config().max_block_history - }; - - if block_count > max_fee_history { - block_count = max_fee_history - } - - if newest.is_pending() { - // cap the target block since we don't have fee history for the pending block - newest = BlockNumberOrTag::Latest; - // account for missing pending block - block_count = block_count.saturating_sub(1); - } - - let end_block = self - .provider() - .block_number_for_id(newest.into())? - .ok_or(EthApiError::HeaderNotFound(newest.into()))?; - - // need to add 1 to the end block to get the correct (inclusive) range - let end_block_plus = end_block + 1; - // Ensure that we would not be querying outside of genesis - if end_block_plus < block_count { - block_count = end_block_plus; - } - - // If reward percentiles were specified, we - // need to validate that they are monotonically - // increasing and 0 <= p <= 100 - // Note: The types used ensure that the percentiles are never < 0 - if let Some(percentiles) = &reward_percentiles - && percentiles.windows(2).any(|w| w[0] > w[1] || w[0] > 100.) - { - return Err(EthApiError::InvalidRewardPercentiles); - } - - // Fetch the headers and ensure we got all of them - // - // Treat a request for 1 block as a request for `newest_block..=newest_block`, - // otherwise `newest_block - 2 - // NOTE: We ensured that block count is capped - let start_block = end_block_plus - block_count; - - // Collect base fees, gas usage ratios and (optionally) reward percentile data - let mut base_fee_per_gas: Vec = Vec::new(); - let mut gas_used_ratio: Vec = Vec::new(); - - let mut rewards: Vec> = Vec::new(); - - // Check if the requested range is within the cache bounds - let fee_entries = self.fee_history.get_history(start_block, end_block).await; - - if let Some(fee_entries) = fee_entries { - if fee_entries.len() != block_count as usize { - return Err(EthApiError::InvalidBlockRange); - } - - for entry in &fee_entries { - base_fee_per_gas.push( - entry.header.base_fee_per_gas().expect("signet only has post-eip1559 headers") - as u128, - ); - gas_used_ratio.push(entry.gas_used_ratio); - - if let Some(percentiles) = &reward_percentiles { - let mut block_rewards = Vec::with_capacity(percentiles.len()); - for &percentile in percentiles { - block_rewards.push(self.approximate_percentile(entry, percentile)); - } - rewards.push(block_rewards); - } - } - let last_entry = fee_entries.last().expect("is not empty"); - - // Also need to include the `base_fee_per_gas` and `base_fee_per_blob_gas` for the - // next block - base_fee_per_gas.push( - last_entry - .header - .next_block_base_fee(BaseFeeParams::ethereum()) - .expect("signet only has post-eip1559 headers") as u128, - ); - } else { - // read the requested header range - let headers = self.provider().sealed_headers_range(start_block..=end_block)?; - if headers.len() != block_count as usize { - return Err(EthApiError::InvalidBlockRange); - } - - for header in &headers { - base_fee_per_gas.push(header.base_fee_per_gas().unwrap_or_default() as u128); - gas_used_ratio.push(header.gas_used() as f64 / header.gas_limit() as f64); - - // Percentiles were specified, so we need to collect reward percentile ino - if let Some(percentiles) = &reward_percentiles { - let (block, receipts) = self - .cache - .get_block_and_receipts(header.hash()) - .await? - .ok_or(EthApiError::InvalidBlockRange)?; - rewards.push( - calculate_reward_percentiles_for_block( - percentiles, - header.gas_used(), - header.base_fee_per_gas().unwrap_or_default(), - &block.body().transactions, - &receipts, - ) - .unwrap_or_default(), - ); - } - } - - // The spec states that `base_fee_per_gas` "[..] includes the next block after the - // newest of the returned range, because this value can be derived from the - // newest block" - // - // The unwrap is safe since we checked earlier that we got at least 1 header. - let last_header = headers.last().expect("is present"); - base_fee_per_gas.push( - last_header - .next_block_base_fee( - self.provider() - .chain_spec() - .base_fee_params_at_timestamp(last_header.timestamp()), - ) - .unwrap_or_default() as u128, - ); - }; - - let base_fee_per_blob_gas = vec![0; base_fee_per_gas.len()]; - let blob_gas_used_ratio = vec![0.; gas_used_ratio.len()]; - - Ok(FeeHistory { - base_fee_per_gas, - gas_used_ratio, - base_fee_per_blob_gas, - blob_gas_used_ratio, - oldest_block: start_block, - reward: reward_percentiles.map(|_| rewards), - }) - } - - /// Get logs for a given block hash based on a filter - /// - /// ## Panics - /// - /// Panics if the filter is a range filter - async fn logs_at_hash(&self, filter: &Filter) -> Result, EthApiError> { - let hash = *filter.block_option.as_block_hash().expect("COU"); - - let (block, receipts) = tokio::try_join!(self.raw_block(hash), self.raw_receipts(hash),)?; - - // Return an error if the block isn't found - let (_, block) = block.ok_or(EthApiError::HeaderNotFound(hash.into()))?; - // Return an error if the receipts aren't found - let receipts = receipts.ok_or(EthApiError::HeaderNotFound(hash.into()))?; - - let block_num_hash = NumHash::new(block.number(), hash); - let timestamp = block.timestamp(); - - let mut all_logs = Vec::new(); - append_matching_block_logs( - &mut all_logs, - ProviderOrBlock::>::Block(block), - filter, - block_num_hash, - &receipts, - false, - timestamp, - )?; - - Ok(all_logs) - } - - /// Returns all logs in the given _inclusive_ range that match the filter - /// - /// Returns an error if: - /// - underlying database error - /// - amount of matches exceeds configured limit - /// - // https://github.com/paradigmxyz/reth/blob/d01658e516abbf2a1a76855a26d7123286865f22/crates/rpc/rpc/src/eth/filter.rs#L506 - async fn get_logs_in_block_range( - &self, - filter: &Filter, - from_block: u64, - to_block: u64, - ) -> Result, EthFilterError> { - trace!(target: "rpc::eth::filter", from=from_block, to=to_block, ?filter, "finding logs in range"); - - if to_block < from_block { - return Err(EthFilterError::InvalidBlockRangeParams); - } - let max_blocks = self.config().max_blocks_per_filter; - - if to_block - from_block > max_blocks { - return Err(EthFilterError::QueryExceedsMaxBlocks(max_blocks)); - } - - let mut all_logs = Vec::new(); - - // derive bloom filters from filter input, so we can check headers for matching logs - let address_filter = FilteredParams::address_filter(&filter.address); - let topics_filter = FilteredParams::topics_filter(&filter.topics); - - // loop over the range of new blocks and check logs if the filter matches the log's bloom - // filter - for (from, to) in BlockRangeInclusiveIter::new(from_block..=to_block, MAX_HEADERS_RANGE) { - let headers = self.provider().headers_range(from..=to)?; - - for (idx, header) in headers.iter().enumerate() { - // only if filter matches - if FilteredParams::matches_address(header.logs_bloom(), &address_filter) - && FilteredParams::matches_topics(header.logs_bloom(), &topics_filter) - { - // these are consecutive headers, so we can use the parent hash of the next - // block to get the current header's hash - let hash = match headers.get(idx + 1) { - Some(parent) => parent.parent_hash(), - None => self - .provider() - .block_hash(header.number())? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number().into()))?, - }; - - let (block, receipts) = - tokio::try_join!(self.raw_block(hash), self.raw_receipts(hash),)?; - - // Return an error if the block isn't found - let (_, block) = block.ok_or(EthApiError::HeaderNotFound(hash.into()))?; - // Return an error if the receipts aren't found - let receipts = receipts.ok_or(EthApiError::HeaderNotFound(hash.into()))?; - - let block_num_hash = NumHash::new(block.number(), hash); - let timestamp = block.timestamp(); - - append_matching_block_logs( - &mut all_logs, - ProviderOrBlock::>::Block(block), - filter, - block_num_hash, - &receipts, - false, - timestamp, - )?; - - // size check but only if range is multiple blocks, so we always return all - // logs of a single block - let max_logs = self.config().max_logs_per_response; - let is_multi_block_range = from_block != to_block; - if is_multi_block_range && all_logs.len() > max_logs { - return Err(EthFilterError::QueryExceedsMaxResults { - max_logs, - from_block, - to_block: block_num_hash.number.saturating_sub(1), - }); - } - } - } - } - - Ok(all_logs) - } - - /// Get logs for a given block range based on a filter - /// - /// ## Panics - /// - /// Panics if the filter is not a range filter - async fn logs_in_range(&self, filter: &Filter) -> Result, EthFilterError> { - // compute the range - let (from_block, to_block) = filter.block_option.as_range(); - - let info = self.provider().chain_info()?; - - // we start at the most recent block if unset in filter - let start_block = info.best_number; - let from = - from_block.map(|num| self.provider().convert_block_number(*num)).transpose()?.flatten(); - let to = - to_block.map(|num| self.provider().convert_block_number(*num)).transpose()?.flatten(); - let (from_block_number, to_block_number) = - logs_utils::get_filter_block_range(from, to, start_block, info)?; - self.get_logs_in_block_range(filter, from_block_number, to_block_number).await - } - - /// Logic for `eth_getLogs` RPC method. - pub async fn logs(&self, filter: &Filter) -> Result, EthError> { - if filter.block_option.is_range() { - self.logs_in_range(filter).await.map_err(Into::into) - } else { - self.logs_at_hash(filter).await.map_err(Into::into) - } - } - - /// Install a log filter. - pub fn install_log_filter(&self, filter: Filter) -> Result { - let chain_info = self.provider().chain_info()?; - - Ok(self.filters.install_log_filter(chain_info.best_number, filter)) - } - - /// Install a block filter. - pub fn install_block_filter(&self) -> Result { - let chain_info = self.provider().chain_info()?; - - Ok(self.filters.install_block_filter(chain_info.best_number)) - } - - /// Poll an active log filter for changes. - /// - /// # Panics - /// - /// Panics if the filter is not a Log filter - #[instrument(level = Level::DEBUG, skip_all, fields(since_last_poll = filter.time_since_last_poll().as_millis(), next_start_block = filter.next_start_block()))] - async fn get_log_filter_changes( - &self, - filter: &ActiveFilter, - ) -> Result<(u64, FilterOutput), EthError> { - debug_assert!(filter.is_filter()); - - // Load the current tip - let info = self.provider().chain_info()?; - let current_height = info.best_number; - - trace!(%filter, current_height, "Polling filter"); - - // If the filter was polled AFTER the current tip, we return an empty - // result - let start_block = filter.next_start_block(); - if start_block > current_height { - return Ok((current_height, FilterOutput::empty())); - } - - // Cast to a filter (this is checked by dbg_assert and by the caller) - let filter = filter.as_filter().unwrap(); - - let (from_block_number, to_block_number) = match filter.block_option { - FilterBlockOption::Range { from_block, to_block } => { - let from = from_block - .map(|num| self.provider().convert_block_number(num)) - .transpose()? - .flatten(); - let to = to_block - .map(|num| self.provider().convert_block_number(num)) - .transpose()? - .flatten(); - logs_utils::get_filter_block_range(from, to, start_block, info) - .map_err(EthFilterError::from)? - } - FilterBlockOption::AtBlockHash(_) => { - // blockHash is equivalent to fromBlock = toBlock = the block number with - // hash blockHash - // get_logs_in_block_range is inclusive - (start_block, current_height) - } - }; - let logs = self.get_logs_in_block_range(filter, from_block_number, to_block_number).await?; - - Ok((to_block_number, logs.into())) - } - - #[instrument(level = Level::DEBUG, skip_all, fields(since_last_poll = filter.time_since_last_poll().as_millis(), next_start_block = filter.next_start_block()))] - async fn get_block_filter_changes( - &self, - filter: &ActiveFilter, - ) -> Result<(u64, FilterOutput), EthError> { - debug_assert!(filter.is_block()); - // Get the current tip number - let info = self.provider().chain_info()?; - let current_height = info.best_number; - - trace!(%filter, current_height, "Polling filter"); - - let start_block = filter.next_start_block(); - if start_block > current_height { - return Ok((current_height, FilterOutput::empty())); - } - - // Note: we need to fetch the block hashes from inclusive range - // [start_block..best_block] - let end_block = current_height + 1; - - let block_hashes = self - .provider() - .canonical_hashes_range(start_block, end_block) - .map_err(|_| EthApiError::HeaderRangeNotFound(start_block.into(), end_block.into()))?; - Ok((current_height, block_hashes.into())) - } - - /// Get the changes for a filter - #[instrument(level = Level::DEBUG, skip(self))] - pub async fn filter_changes(&self, id: U64) -> Result { - let mut ref_mut = self - .filters - .get_mut(id) - .ok_or_else(|| EthFilterError::FilterNotFound(id.saturating_to::().into()))?; - let filter = ref_mut.value_mut(); - - let (polled_to_block, res) = if filter.is_block() { - self.get_block_filter_changes(filter).await? - } else { - self.get_log_filter_changes(filter).await? - }; - filter.mark_polled(polled_to_block); - - trace!(%filter, "Marked polled"); - Ok(res) - } - - /// Uninstall a filter. - pub fn uninstall_filter(&self, id: U64) -> bool { - self.filters.uninstall(id).is_some() - } -} - -impl Cfg for SignetCtx -where - Inner: Pnt, -{ - fn fill_cfg_env(&self, cfg_env: &mut CfgEnv) { - let CfgEnv { chain_id, .. } = cfg_env; - *chain_id = self.constants.ru_chain_id(); - } -} - -// Some code in this file has been copied and modified from reth -// -// The original license is included below: -// -// The MIT License (MIT) -// -// Copyright (c) 2022-2025 Reth Contributors -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -//. -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. diff --git a/crates/rpc/src/debug/endpoints.rs b/crates/rpc/src/debug/endpoints.rs index 293c0ba..882e793 100644 --- a/crates/rpc/src/debug/endpoints.rs +++ b/crates/rpc/src/debug/endpoints.rs @@ -1,164 +1,198 @@ +//! Debug namespace RPC endpoint implementations. + use crate::{ - DebugError, RpcCtx, - utils::{await_handler, response_tri}, + config::StorageRpcCtx, + debug::{ + DebugError, + types::{TraceBlockParams, TraceTransactionParams}, + }, + eth::helpers::{CfgFiller, await_handler, response_tri}, }; use ajj::{HandlerCtx, ResponsePayload}; -use alloy::{consensus::BlockHeader, eips::BlockId, primitives::B256}; -use itertools::Itertools; -use reth::rpc::{ - server_types::eth::EthApiError, - types::{ - TransactionInfo, - trace::geth::{GethDebugTracingOptions, GethTrace, TraceResult}, - }, +use alloy::{ + consensus::BlockHeader, + eips::BlockId, + rpc::types::trace::geth::{GethTrace, TraceResult}, }; -use reth_node_api::FullNodeComponents; +use itertools::Itertools; use signet_evm::EvmErrored; -use signet_node_types::Pnt; +use signet_hot::{HotKv, model::HotKvRead}; use signet_types::MagicSig; use tracing::Instrument; +use trevm::revm::database::DBErrorMarker; -/// Params for the `debug_traceBlockByNumber` and `debug_traceBlockByHash` -/// endpoints. -#[derive(Debug, serde::Deserialize)] -pub(super) struct TraceBlockParams(T, #[serde(default)] Option); - -/// Params type for `debug_traceTransaction`. -#[derive(Debug, serde::Deserialize)] -pub(super) struct TraceTransactionParams(B256, #[serde(default)] Option); - -/// `debug_traceBlockByNumber` and `debug_traceBlockByHash` endpoint handler. -pub(super) async fn trace_block( +/// `debug_traceBlockByNumber` and `debug_traceBlockByHash` handler. +pub(super) async fn trace_block( hctx: HandlerCtx, TraceBlockParams(id, opts): TraceBlockParams, - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> ResponsePayload, DebugError> where T: Into, - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let opts = response_tri!(opts.ok_or(DebugError::from(EthApiError::InvalidTracerConfig))); + let opts = response_tri!(opts.ok_or(DebugError::InvalidTracerConfig)); - let _permit = response_tri!( - ctx.acquire_tracing_permit() - .await - .map_err(|_| DebugError::rpc_error("Failed to acquire tracing permit".into())) - ); + // Acquire a tracing semaphore permit to limit concurrent debug + // requests. The permit is held for the entire handler lifetime and + // is dropped when the async block completes. + let _permit = ctx.acquire_tracing_permit().await; let id = id.into(); let span = tracing::debug_span!("traceBlock", ?id, tracer = ?opts.tracer.as_ref()); let fut = async move { - // Fetch the block by ID - let Some((hash, block)) = response_tri!(ctx.signet().raw_block(id).await) else { + let cold = ctx.cold(); + let block_num = response_tri!(ctx.resolve_block_id(id).map_err(|e| { + tracing::warn!(error = %e, ?id, "block resolution failed"); + DebugError::BlockNotFound(id) + })); + + let (header, txs) = response_tri!( + tokio::try_join!( + cold.get_header_by_number(block_num), + cold.get_transactions_in_block(block_num), + ) + .map_err(|e| { + tracing::warn!(error = %e, block_num, "cold storage read failed"); + DebugError::Cold(e.to_string()) + }) + ); + + let Some(header) = header else { return ResponsePayload::internal_error_message( - EthApiError::HeaderNotFound(id).to_string().into(), + format!("block not found: {id}").into(), ); }; - tracing::debug!(number = block.number(), "Loaded block"); + let block_hash = header.hash(); + let header = header.into_inner(); - // Allocate space for the frames - let mut frames = Vec::with_capacity(block.transaction_count()); + tracing::debug!(number = header.number, "Loaded block"); - // Instantiate the EVM with the block - let mut trevm = response_tri!(ctx.trevm(crate::LoadState::Before, block.header())); + let mut frames = Vec::with_capacity(txs.len()); - // Apply all transactions in the block up, tracing each one - tracing::trace!(?opts, "Tracing block transactions"); + // State BEFORE this block. + let db = + response_tri!(ctx.revm_state_at_height(header.number.saturating_sub(1)).map_err(|e| { + tracing::warn!(error = %e, block_num, "hot storage read failed"); + DebugError::Hot(e.to_string()) + })); - let mut txns = block.body().transactions().enumerate().peekable(); + let mut trevm = signet_evm::signet_evm(db, ctx.constants().clone()) + .fill_cfg(&CfgFiller(ctx.chain_id())) + .fill_block(&header); + + let mut txns = txs.iter().enumerate().peekable(); for (idx, tx) in txns .by_ref() .peeking_take_while(|(_, t)| MagicSig::try_from_signature(t.signature()).is_none()) { - let tx_info = TransactionInfo { - hash: Some(*tx.hash()), + let tx_info = alloy::rpc::types::TransactionInfo { + hash: Some(*tx.tx_hash()), index: Some(idx as u64), - block_hash: Some(hash), - block_number: Some(block.header().number()), - base_fee: block.header().base_fee_per_gas(), + block_hash: Some(block_hash), + block_number: Some(header.number), + base_fee: header.base_fee_per_gas(), }; let t = trevm.fill_tx(tx); - let frame; (frame, trevm) = response_tri!(crate::debug::tracer::trace(t, &opts, tx_info)); - frames.push(TraceResult::Success { result: frame, tx_hash: Some(*tx.hash()) }); + frames.push(TraceResult::Success { result: frame, tx_hash: Some(*tx.tx_hash()) }); - tracing::debug!(tx_index = idx, tx_hash = ?tx.hash(), "Traced transaction"); + tracing::debug!(tx_index = idx, tx_hash = ?tx.tx_hash(), "Traced transaction"); } ResponsePayload(Ok(frames)) } .instrument(span); - await_handler!(@response_option hctx.spawn_blocking(fut)) + await_handler!(@response_option hctx.spawn(fut)) } -/// Handle for `debug_traceTransaction`. -pub(super) async fn trace_transaction( +/// `debug_traceTransaction` handler. +pub(super) async fn trace_transaction( hctx: HandlerCtx, TraceTransactionParams(tx_hash, opts): TraceTransactionParams, - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> ResponsePayload where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let opts = response_tri!(opts.ok_or(DebugError::from(EthApiError::InvalidTracerConfig))); + let opts = response_tri!(opts.ok_or(DebugError::InvalidTracerConfig)); - let _permit = response_tri!( - ctx.acquire_tracing_permit() - .await - .map_err(|_| DebugError::rpc_error("Failed to acquire tracing permit".into())) - ); + // Held for the handler duration; dropped when the async block completes. + let _permit = ctx.acquire_tracing_permit().await; let span = tracing::debug_span!("traceTransaction", %tx_hash, tracer = ?opts.tracer.as_ref()); let fut = async move { - // Load the transaction by hash - let (tx, meta) = response_tri!( - response_tri!(ctx.signet().raw_transaction_by_hash(tx_hash)) - .ok_or(EthApiError::TransactionNotFound) + let cold = ctx.cold(); + + // Look up the transaction and its containing block. + let confirmed = response_tri!(cold.get_tx_by_hash(tx_hash).await.map_err(|e| { + tracing::warn!(error = %e, %tx_hash, "cold storage read failed"); + DebugError::Cold(e.to_string()) + })); + + let confirmed = response_tri!(confirmed.ok_or(DebugError::TransactionNotFound)); + let (_tx, meta) = confirmed.into_parts(); + + let block_num = meta.block_number(); + let block_hash = meta.block_hash(); + + let (header, txs) = response_tri!( + tokio::try_join!( + cold.get_header_by_number(block_num), + cold.get_transactions_in_block(block_num), + ) + .map_err(|e| { + tracing::warn!(error = %e, block_num, "cold storage read failed"); + DebugError::Cold(e.to_string()) + }) ); - tracing::debug!("Loaded transaction metadata"); + let block_id = BlockId::Number(block_num.into()); + let header = response_tri!(header.ok_or(DebugError::BlockNotFound(block_id))).into_inner(); - // Load the block containing the transaction - let res = response_tri!(ctx.signet().raw_block(meta.block_hash).await); - let (_, block) = - response_tri!(res.ok_or_else(|| EthApiError::HeaderNotFound(meta.block_hash.into()))); + tracing::debug!(number = block_num, "Loaded containing block"); - tracing::debug!(number = block.number(), "Loaded containing block"); + // State BEFORE this block. + let db = + response_tri!(ctx.revm_state_at_height(block_num.saturating_sub(1)).map_err(|e| { + tracing::warn!(error = %e, block_num, "hot storage read failed"); + DebugError::Hot(e.to_string()) + })); - // Load trevm at the start of the block (i.e. before any transactions are applied) - let mut trevm = response_tri!(ctx.trevm(crate::LoadState::Before, block.header())); + let mut trevm = signet_evm::signet_evm(db, ctx.constants().clone()) + .fill_cfg(&CfgFiller(ctx.chain_id())) + .fill_block(&header); - // Apply all transactions in the block up to (but not including) the - // target one - let mut txns = block.body().transactions().enumerate().peekable(); - for (_idx, tx) in txns.by_ref().peeking_take_while(|(_, t)| t.hash() != tx.hash()) { + // Replay all transactions up to (but not including) the target + let mut txns = txs.iter().enumerate().peekable(); + for (_idx, tx) in txns.by_ref().peeking_take_while(|(_, t)| t.tx_hash() != &tx_hash) { if MagicSig::try_from_signature(tx.signature()).is_some() { return ResponsePayload::internal_error_message( - EthApiError::TransactionNotFound.to_string().into(), + DebugError::TransactionNotFound.to_string().into(), ); } trevm = response_tri!(trevm.run_tx(tx).map_err(EvmErrored::into_error)).accept_state(); } - let (index, tx) = response_tri!(txns.next().ok_or(EthApiError::TransactionNotFound)); + let (index, tx) = response_tri!(txns.next().ok_or(DebugError::TransactionNotFound)); let trevm = trevm.fill_tx(tx); - let tx_info = TransactionInfo { - hash: Some(*tx.hash()), + let tx_info = alloy::rpc::types::TransactionInfo { + hash: Some(*tx.tx_hash()), index: Some(index as u64), - block_hash: Some(block.hash()), - block_number: Some(block.header().number()), - base_fee: block.header().base_fee_per_gas(), + block_hash: Some(block_hash), + block_number: Some(header.number), + base_fee: header.base_fee_per_gas(), }; let res = response_tri!(crate::debug::tracer::trace(trevm, &opts, tx_info)).0; @@ -167,5 +201,5 @@ where } .instrument(span); - await_handler!(@response_option hctx.spawn_blocking(fut)) + await_handler!(@response_option hctx.spawn(fut)) } diff --git a/crates/rpc/src/debug/error.rs b/crates/rpc/src/debug/error.rs index c4fde13..a078034 100644 --- a/crates/rpc/src/debug/error.rs +++ b/crates/rpc/src/debug/error.rs @@ -1,52 +1,42 @@ -use reth::{ - providers::ProviderError, - rpc::{eth::filter::EthFilterError, server_types::eth::EthApiError}, -}; -use std::borrow::Cow; +//! Error types for the debug namespace. -/// Errors that can occur when interacting with the `eth_` namespace. -#[derive(Debug, thiserror::Error, Clone)] -pub enum DebugError { - /// Provider error: [`ProviderError`]. - #[error("Provider error: {0}")] - Provider(#[from] ProviderError), - /// Filter error [`EthFilterError`]. - #[error("Filter error: {0}")] - Filter(Cow<'static, str>), - /// Eth API error: [`EthApiError`]. - #[error("Eth API error: {0}")] - Rpc(Cow<'static, str>), -} +use alloy::eips::BlockId; -impl DebugError { - /// Create a new filter error. - pub const fn filter_error(msg: Cow<'static, str>) -> Self { - Self::Filter(msg) - } - - /// Create a new RPC error. - pub const fn rpc_error(msg: Cow<'static, str>) -> Self { - Self::Rpc(msg) - } -} - -impl From for DebugError { - fn from(err: EthFilterError) -> Self { - Self::filter_error(err.to_string().into()) - } -} - -impl From for DebugError { - fn from(err: EthApiError) -> Self { - Self::rpc_error(err.to_string().into()) - } +/// Errors that can occur in the `debug` namespace. +/// +/// The [`serde::Serialize`] impl emits sanitized messages suitable for +/// API responses — internal storage details are not exposed to callers. +/// Use [`tracing`] to log the full error chain before constructing the +/// variant. +#[derive(Debug, Clone, thiserror::Error)] +pub enum DebugError { + /// Cold storage error. + #[error("cold storage error")] + Cold(String), + /// Hot storage error. + #[error("hot storage error")] + Hot(String), + /// Invalid tracer configuration. + #[error("invalid tracer config")] + InvalidTracerConfig, + /// Unsupported tracer type. + #[error("unsupported: {0}")] + Unsupported(&'static str), + /// EVM execution error. + #[error("evm execution error")] + Evm(String), + /// Block not found. + #[error("block not found: {0}")] + BlockNotFound(BlockId), + /// Transaction not found. + #[error("transaction not found")] + TransactionNotFound, } impl DebugError { - /// Turn into a string by value, allows for `.map_err(EthError::to_string)` - /// to be used. + /// Convert to a string by value. pub fn into_string(self) -> String { - ToString::to_string(&self) + self.to_string() } } diff --git a/crates/rpc/src/debug/mod.rs b/crates/rpc/src/debug/mod.rs index 5a8648e..11e373d 100644 --- a/crates/rpc/src/debug/mod.rs +++ b/crates/rpc/src/debug/mod.rs @@ -1,24 +1,25 @@ -mod endpoints; -use endpoints::*; +//! Debug namespace RPC router backed by storage. +mod endpoints; +use endpoints::{trace_block, trace_transaction}; mod error; pub use error::DebugError; +pub(crate) mod tracer; +mod types; -mod tracer; - -use crate::ctx::RpcCtx; +use crate::config::StorageRpcCtx; use alloy::{eips::BlockNumberOrTag, primitives::B256}; -use reth_node_api::FullNodeComponents; -use signet_node_types::Pnt; +use signet_hot::{HotKv, model::HotKvRead}; +use trevm::revm::database::DBErrorMarker; -/// Instantiate a `debug` API router. -pub fn debug() -> ajj::Router> +/// Instantiate a `debug` API router backed by storage. +pub(crate) fn debug() -> ajj::Router> where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { ajj::Router::new() - .route("traceBlockByNumber", trace_block::) - .route("traceBlockByHash", trace_block::) - .route("traceTransaction", trace_transaction) + .route("traceBlockByNumber", trace_block::) + .route("traceBlockByHash", trace_block::) + .route("traceTransaction", trace_transaction::) } diff --git a/crates/rpc/src/debug/tracer.rs b/crates/rpc/src/debug/tracer.rs index 36befa4..fa24073 100644 --- a/crates/rpc/src/debug/tracer.rs +++ b/crates/rpc/src/debug/tracer.rs @@ -1,16 +1,13 @@ -//! This file is largely adapted from reth: `crates/rpc/rpc/src/debug.rs` +//! Core tracing logic for the debug namespace. //! -//! In particular the `debug_trace_call` function. - -use crate::DebugError; -use reth::rpc::{ - server_types::eth::EthApiError, - types::{ - TransactionInfo, - trace::geth::{ - FourByteFrame, GethDebugBuiltInTracerType, GethDebugTracerConfig, GethDebugTracerType, - GethDebugTracingOptions, GethTrace, NoopFrame, - }, +//! Largely adapted from reth: `crates/rpc/rpc/src/debug.rs`. + +use crate::debug::DebugError; +use alloy::rpc::types::{ + TransactionInfo, + trace::geth::{ + FourByteFrame, GethDebugBuiltInTracerType, GethDebugTracerConfig, GethDebugTracerType, + GethDebugTracingOptions, GethTrace, NoopFrame, }, }; use revm_inspectors::tracing::{ @@ -34,83 +31,66 @@ where Db: Database + DatabaseCommit + DatabaseRef, Insp: Inspector>, { - let Some(tracer) = &config.tracer else { return Err(EthApiError::InvalidTracerConfig.into()) }; + let Some(tracer) = &config.tracer else { + return Err(DebugError::InvalidTracerConfig); + }; let GethDebugTracerType::BuiltInTracer(built_in) = tracer else { - return Err(EthApiError::Unsupported("JS tracer").into()); + return Err(DebugError::Unsupported("JS tracer")); }; match built_in { - GethDebugBuiltInTracerType::Erc7562Tracer => trace_erc7562(trevm).map_err(Into::into), - GethDebugBuiltInTracerType::FourByteTracer => trace_four_byte(trevm).map_err(Into::into), - GethDebugBuiltInTracerType::CallTracer => { - trace_call(&config.tracer_config, trevm).map_err(Into::into) + GethDebugBuiltInTracerType::Erc7562Tracer => { + Err(DebugError::Unsupported("ERC-7562 tracing is not yet implemented")) } + GethDebugBuiltInTracerType::FourByteTracer => trace_four_byte(trevm), + GethDebugBuiltInTracerType::CallTracer => trace_call(&config.tracer_config, trevm), GethDebugBuiltInTracerType::FlatCallTracer => { - trace_flat_call(&config.tracer_config, trevm, tx_info).map_err(Into::into) - } - GethDebugBuiltInTracerType::PreStateTracer => { - trace_pre_state(&config.tracer_config, trevm).map_err(Into::into) + trace_flat_call(&config.tracer_config, trevm, tx_info) } + GethDebugBuiltInTracerType::PreStateTracer => trace_pre_state(&config.tracer_config, trevm), GethDebugBuiltInTracerType::NoopTracer => Ok(( NoopFrame::default().into(), trevm .run() - .map_err(|err| EthApiError::EvmCustom(err.into_error().to_string()))? + .map_err(|err| DebugError::Evm(err.into_error().to_string()))? .accept_state(), )), - GethDebugBuiltInTracerType::MuxTracer => { - trace_mux(&config.tracer_config, trevm, tx_info).map_err(Into::into) - } + GethDebugBuiltInTracerType::MuxTracer => trace_mux(&config.tracer_config, trevm, tx_info), } } -fn trace_erc7562( - _trevm: EvmReady, -) -> Result<(GethTrace, EvmNeedsTx), EthApiError> -where - Db: Database + DatabaseCommit, - Insp: Inspector>, -{ - // ERC-7562 tracing is not yet implemented. - Err(EthApiError::Unsupported("ERC-7562 tracing is not yet implemented")) -} - -/// Traces a call using [`GethDebugBuiltInTracerType::FourByteTracer`]. fn trace_four_byte( trevm: EvmReady, -) -> Result<(GethTrace, EvmNeedsTx), EthApiError> +) -> Result<(GethTrace, EvmNeedsTx), DebugError> where Db: Database + DatabaseCommit, Insp: Inspector>, { let mut four_byte = FourByteInspector::default(); - - let trevm = trevm.try_with_inspector(&mut four_byte, |trevm| trevm.run()); - - let trevm = trevm.map_err(|e| EthApiError::EvmCustom(e.into_error().to_string()))?; - + let trevm = trevm + .try_with_inspector(&mut four_byte, |trevm| trevm.run()) + .map_err(|e| DebugError::Evm(e.into_error().to_string()))?; Ok((FourByteFrame::from(four_byte).into(), trevm.accept_state())) } -/// Traces a call using [`GethDebugBuiltInTracerType::CallTracer`]. fn trace_call( tracer_config: &GethDebugTracerConfig, trevm: EvmReady, -) -> Result<(GethTrace, EvmNeedsTx), EthApiError> +) -> Result<(GethTrace, EvmNeedsTx), DebugError> where Db: Database + DatabaseCommit, Insp: Inspector>, { let call_config = - tracer_config.clone().into_call_config().map_err(|_| EthApiError::InvalidTracerConfig)?; + tracer_config.clone().into_call_config().map_err(|_| DebugError::InvalidTracerConfig)?; let mut inspector = TracingInspector::new(TracingInspectorConfig::from_geth_call_config(&call_config)); - let trevm = trevm.try_with_inspector(&mut inspector, |trevm| trevm.run()); - - let trevm = trevm.map_err(|e| EthApiError::EvmCustom(e.into_error().to_string()))?; + let trevm = trevm + .try_with_inspector(&mut inspector, |trevm| trevm.run()) + .map_err(|e| DebugError::Evm(e.into_error().to_string()))?; let frame = inspector .with_transaction_gas_limit(trevm.gas_limit()) @@ -120,11 +100,10 @@ where Ok((frame.into(), trevm.accept_state())) } -/// Traces a call using [`GethDebugBuiltInTracerType::PreStateTracer`] fn trace_pre_state( tracer_config: &GethDebugTracerConfig, trevm: EvmReady, -) -> Result<(GethTrace, EvmNeedsTx), EthApiError> +) -> Result<(GethTrace, EvmNeedsTx), DebugError> where Db: Database + DatabaseCommit + DatabaseRef, Insp: Inspector>, @@ -132,28 +111,26 @@ where let prestate_config = tracer_config .clone() .into_pre_state_config() - .map_err(|_| EthApiError::InvalidTracerConfig)?; + .map_err(|_| DebugError::InvalidTracerConfig)?; let mut inspector = TracingInspector::new(TracingInspectorConfig::from_geth_prestate_config(&prestate_config)); - let trevm = trevm.try_with_inspector(&mut inspector, |trevm| trevm.run()); - - let trevm = trevm.map_err(|e| EthApiError::EvmCustom(e.into_error().to_string()))?; + let trevm = trevm + .try_with_inspector(&mut inspector, |trevm| trevm.run()) + .map_err(|e| DebugError::Evm(e.into_error().to_string()))?; let gas_limit = trevm.gas_limit(); - // NB: Normally we would call `trevm.accept_state()` here, but we need the - // state after execution to be UNCOMMITED when we compute the prestate - // diffs. + // NB: state must be UNCOMMITTED for prestate diff computation. let (result, mut trevm) = trevm.take_result_and_state(); let frame = inspector .with_transaction_gas_limit(gas_limit) .into_geth_builder() .geth_prestate_traces(&result, &prestate_config, trevm.inner_mut_unchecked().db_mut()) - .map_err(|err| EthApiError::EvmCustom(err.to_string()))?; + .map_err(|err| DebugError::Evm(err.to_string()))?; - // This is equivalent to calling `trevm.accept_state()`. + // Equivalent to `trevm.accept_state()`. trevm.inner_mut_unchecked().db_mut().commit(result.state); Ok((frame.into(), trevm)) @@ -163,7 +140,7 @@ fn trace_flat_call( tracer_config: &GethDebugTracerConfig, trevm: EvmReady, tx_info: TransactionInfo, -) -> Result<(GethTrace, EvmNeedsTx), EthApiError> +) -> Result<(GethTrace, EvmNeedsTx), DebugError> where Db: Database + DatabaseCommit, Insp: Inspector>, @@ -171,14 +148,14 @@ where let flat_call_config = tracer_config .clone() .into_flat_call_config() - .map_err(|_| EthApiError::InvalidTracerConfig)?; + .map_err(|_| DebugError::InvalidTracerConfig)?; let mut inspector = TracingInspector::new(TracingInspectorConfig::from_flat_call_config(&flat_call_config)); - let trevm = trevm.try_with_inspector(&mut inspector, |trevm| trevm.run()); - - let trevm = trevm.map_err(|e| EthApiError::EvmCustom(e.into_error().to_string()))?; + let trevm = trevm + .try_with_inspector(&mut inspector, |trevm| trevm.run()) + .map_err(|e| DebugError::Evm(e.into_error().to_string()))?; let frame = inspector .with_transaction_gas_limit(trevm.gas_limit()) @@ -192,30 +169,29 @@ fn trace_mux( tracer_config: &GethDebugTracerConfig, trevm: EvmReady, tx_info: TransactionInfo, -) -> Result<(GethTrace, EvmNeedsTx), EthApiError> +) -> Result<(GethTrace, EvmNeedsTx), DebugError> where Db: Database + DatabaseCommit + DatabaseRef, Insp: Inspector>, { let mux_config = - tracer_config.clone().into_mux_config().map_err(|_| EthApiError::InvalidTracerConfig)?; + tracer_config.clone().into_mux_config().map_err(|_| DebugError::InvalidTracerConfig)?; let mut inspector = MuxInspector::try_from_config(mux_config) - .map_err(|err| EthApiError::EvmCustom(err.to_string()))?; + .map_err(|err| DebugError::Evm(err.to_string()))?; - let trevm = trevm.try_with_inspector(&mut inspector, |trevm| trevm.run()); - let trevm = trevm.map_err(|e| EthApiError::EvmCustom(e.into_error().to_string()))?; + let trevm = trevm + .try_with_inspector(&mut inspector, |trevm| trevm.run()) + .map_err(|e| DebugError::Evm(e.into_error().to_string()))?; - // NB: Normally we would call `trevm.accept_state()` here, but we need the - // state after execution to be UNCOMMITED when we compute the prestate - // diffs. + // NB: state must be UNCOMMITTED for prestate diff computation. let (result, mut trevm) = trevm.take_result_and_state(); let frame = inspector .try_into_mux_frame(&result, trevm.inner_mut_unchecked().db_mut(), tx_info) - .map_err(|err| EthApiError::EvmCustom(err.to_string()))?; + .map_err(|err| DebugError::Evm(err.to_string()))?; - // This is equivalent to calling `trevm.accept_state()`. + // Equivalent to `trevm.accept_state()`. trevm.inner_mut_unchecked().db_mut().commit(result.state); Ok((frame.into(), trevm)) @@ -235,7 +211,7 @@ where // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: -//. +// // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // diff --git a/crates/rpc-storage/src/debug/types.rs b/crates/rpc/src/debug/types.rs similarity index 100% rename from crates/rpc-storage/src/debug/types.rs rename to crates/rpc/src/debug/types.rs diff --git a/crates/rpc/src/eth/endpoints.rs b/crates/rpc/src/eth/endpoints.rs index da34a30..1cf1078 100644 --- a/crates/rpc/src/eth/endpoints.rs +++ b/crates/rpc/src/eth/endpoints.rs @@ -1,503 +1,780 @@ +//! ETH namespace RPC endpoint implementations. + use crate::{ - ctx::RpcCtx, - eth::{CallErrorData, EthError}, + config::{EvmBlockContext, StorageRpcCtx, gas_oracle}, + eth::{ + error::{CallErrorData, EthError}, + helpers::{ + AddrWithBlock, BlockParams, CfgFiller, FeeHistoryArgs, StorageAtArgs, SubscribeArgs, + TxParams, await_handler, build_receipt, build_rpc_transaction, hot_reader_at_block, + normalize_gas_stateless, response_tri, + }, + types::{ + BlockTransactions, EmptyArray, LazyReceipts, RpcBlock, RpcHeader, RpcReceipt, + RpcTransaction, + }, + }, interest::{FilterOutput, InterestKind}, - receipts::build_signet_receipt, - utils::{await_handler, response_tri}, }; use ajj::{HandlerCtx, ResponsePayload}; use alloy::{ - consensus::{BlockHeader, TxEnvelope}, + consensus::Transaction, eips::{ BlockId, BlockNumberOrTag, + eip1559::BaseFeeParams, eip2718::{Decodable2718, Encodable2718}, + eip2930::AccessListResult, }, - network::Ethereum, - primitives::{Address, B256, U64, U256}, - rpc::types::{ - BlockOverrides, Filter, TransactionRequest, pubsub::SubscriptionKind, state::StateOverride, - }, + primitives::{B256, U64, U256}, + rpc::types::{FeeHistory, Filter, Log}, }; -use reth::{ - network::NetworkInfo, - primitives::TransactionMeta, - providers::{BlockNumReader, StateProviderFactory, TransactionsProvider}, -}; -use reth_node_api::FullNodeComponents; -use reth_rpc_eth_api::{RpcBlock, RpcHeader, RpcReceipt, RpcTransaction}; -use serde::Deserialize; -use signet_evm::EvmErrored; -use signet_node_types::Pnt; -use std::borrow::Cow; +use revm_inspectors::access_list::AccessListInspector; +use serde::Serialize; +use signet_cold::{HeaderSpecifier, ReceiptSpecifier}; +use signet_hot::{HistoryRead, HotKv, db::HotDbRead, model::HotKvRead}; use tracing::{Instrument, debug, trace_span}; -use trevm::{EstimationResult, MIN_TRANSACTION_GAS, revm::context::result::ExecutionResult}; - -/// Args for `eth_estimateGas` and `eth_call`. -#[derive(Debug, Deserialize)] -pub(super) struct TxParams( - TransactionRequest, - #[serde(default)] Option, - #[serde(default)] Option, - #[serde(default)] Option>, -); - -/// Args for `eth_getBlockByHash` and `eth_getBlockByNumber`. -#[derive(Debug, Deserialize)] -pub(super) struct BlockParams(T, #[serde(default)] Option); - -/// Args for `eth_feeHistory`. -#[derive(Debug, Deserialize)] -pub(super) struct FeeHistoryArgs(U64, BlockNumberOrTag, #[serde(default)] Option>); - -/// Args for `eth_getStorageAt`. -#[derive(Debug, Deserialize)] -pub(super) struct StorageAtArgs(Address, U256, #[serde(default)] Option); - -/// Args for `eth_getBalance`, `eth_getTransactionCount`, and `eth_getCode`. -#[derive(Debug, Deserialize)] -pub(super) struct AddrWithBlock(Address, #[serde(default)] Option); - -/// Args for `eth_subscribe`. -#[derive(Debug, Deserialize)] -pub struct SubscribeArgs(pub SubscriptionKind, #[serde(default)] pub Option>); - -impl TryFrom for InterestKind { - type Error = String; - - fn try_from(args: SubscribeArgs) -> Result { - match args.0 { - SubscriptionKind::Logs => { - if let Some(filter) = args.1 { - Ok(InterestKind::Log(filter)) - } else { - Err("missing filter for Logs subscription".to_string()) - } - } - SubscriptionKind::NewHeads => { - if args.1.is_some() { - Err("filter not supported for NewHeads subscription".to_string()) - } else { - Ok(InterestKind::Block) - } - } +use trevm::{ + EstimationResult, revm::context::result::ExecutionResult, revm::database::DBErrorMarker, +}; - _ => Err(format!("unsupported subscription kind: {:?}", args.0)), - } - } +// --------------------------------------------------------------------------- +// Not Supported +// --------------------------------------------------------------------------- + +pub(crate) async fn not_supported() -> ResponsePayload<(), ()> { + ResponsePayload::method_not_found() } -pub(super) async fn not_supported() -> ResponsePayload<(), ()> { - ResponsePayload::internal_error_message(Cow::Borrowed( - "Method not supported. See signet documentation for a list of unsupported methods: https://signet.sh/docs", - )) +/// Response for `eth_syncing`. +/// +/// Returns `false` when the node is fully synced, or a sync-status +/// object when it is still catching up. +#[derive(Debug, Clone, Serialize)] +#[serde(untagged)] +pub(crate) enum SyncingResponse { + /// Node is fully synced. + NotSyncing(bool), + /// Node is still syncing. + Syncing { + /// Block number the node started syncing from. + starting_block: U64, + /// Current block the node has synced to. + current_block: U64, + /// Highest known block number on the network. + highest_block: U64, + }, +} + +/// `eth_syncing` — returns sync status or `false` when fully synced. +pub(crate) async fn syncing(ctx: StorageRpcCtx) -> Result { + match ctx.tags().sync_status() { + Some(status) => Ok(SyncingResponse::Syncing { + starting_block: U64::from(status.starting_block), + current_block: U64::from(status.current_block), + highest_block: U64::from(status.highest_block), + }), + None => Ok(SyncingResponse::NotSyncing(false)), + } } /// Uncle count is always zero — Signet has no uncle blocks. -pub(super) async fn uncle_count() -> Result { +pub(crate) async fn uncle_count() -> Result { Ok(U64::ZERO) } /// Uncle block is always absent — Signet has no uncle blocks. -pub(super) async fn uncle_block() -> Result, ()> { +pub(crate) async fn uncle_block() -> Result, ()> { Ok(None) } -pub(super) async fn protocol_version(ctx: RpcCtx) -> Result -where - Host: FullNodeComponents, - Signet: Pnt, -{ - ctx.host() - .network() - .network_status() - .await - .map(|info| info.protocol_version) - .map(U64::from) - .map_err(|s| s.to_string()) +// --------------------------------------------------------------------------- +// Simple Queries +// --------------------------------------------------------------------------- + +/// `eth_blockNumber` — returns the latest block number from block tags. +pub(crate) async fn block_number(ctx: StorageRpcCtx) -> Result { + Ok(U64::from(ctx.tags().latest())) } -pub(super) async fn syncing(ctx: RpcCtx) -> Result +/// `eth_chainId` — returns the configured chain ID. +pub(crate) async fn chain_id(ctx: StorageRpcCtx) -> Result { + Ok(U64::from(ctx.chain_id())) +} + +// --------------------------------------------------------------------------- +// Gas & Fee Queries +// --------------------------------------------------------------------------- + +/// `eth_gasPrice` — suggests gas price based on recent block tips + base fee. +pub(crate) async fn gas_price(hctx: HandlerCtx, ctx: StorageRpcCtx) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - Ok(ctx.host().network().is_syncing()) + let task = async move { + let latest = ctx.tags().latest(); + let cold = ctx.cold(); + + let tip = gas_oracle::suggest_tip_cap(&cold, latest, ctx.config()) + .await + .map_err(|e| e.to_string())?; + + let base_fee = cold + .get_header_by_number(latest) + .await + .map_err(|e| e.to_string())? + .and_then(|h| h.base_fee_per_gas) + .unwrap_or_default(); + + Ok(tip + U256::from(base_fee)) + }; + + await_handler!(@option hctx.spawn(task)) } -pub(super) async fn block_number(ctx: RpcCtx) -> Result +/// `eth_maxPriorityFeePerGas` — suggests priority fee from recent block tips. +pub(crate) async fn max_priority_fee_per_gas( + hctx: HandlerCtx, + ctx: StorageRpcCtx, +) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - ctx.signet().provider().last_block_number().map(U64::from).map_err(|s| s.to_string()) + let task = async move { + let latest = ctx.tags().latest(); + gas_oracle::suggest_tip_cap(&ctx.cold(), latest, ctx.config()) + .await + .map_err(|e| e.to_string()) + }; + + await_handler!(@option hctx.spawn(task)) } -pub(super) async fn chain_id(ctx: RpcCtx) -> Result +/// `eth_feeHistory` — returns base fee and reward percentile data. +pub(crate) async fn fee_history( + hctx: HandlerCtx, + FeeHistoryArgs(block_count, newest, reward_percentiles): FeeHistoryArgs, + ctx: StorageRpcCtx, +) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - Ok(U64::from(ctx.signet().constants().ru_chain_id())) + let task = async move { + let mut block_count = block_count.to::(); + + if block_count == 0 { + return Ok(FeeHistory::default()); + } + + let max_fee_history = if reward_percentiles.is_none() { + ctx.config().max_header_history + } else { + ctx.config().max_block_history + }; + + block_count = block_count.min(max_fee_history); + + let newest = if newest.is_pending() { + block_count = block_count.saturating_sub(1); + BlockNumberOrTag::Latest + } else { + newest + }; + + let end_block = ctx.resolve_block_tag(newest); + let end_block_plus = end_block + 1; + + block_count = block_count.min(end_block_plus); + + // Validate percentiles + if let Some(percentiles) = &reward_percentiles + && percentiles.windows(2).any(|w| w[0] > w[1] || w[0] > 100.) + { + return Err("invalid reward percentiles".to_string()); + } + + let start_block = end_block_plus - block_count; + let cold = ctx.cold(); + + let specs: Vec<_> = (start_block..=end_block).map(HeaderSpecifier::Number).collect(); + let headers = cold.get_headers(specs).await.map_err(|e| e.to_string())?; + + let mut base_fee_per_gas: Vec = Vec::with_capacity(headers.len() + 1); + let mut gas_used_ratio: Vec = Vec::with_capacity(headers.len()); + let mut rewards: Vec> = Vec::new(); + + for (offset, maybe_header) in headers.iter().enumerate() { + let Some(header) = maybe_header else { + return Err(format!("missing header at block {}", start_block + offset as u64)); + }; + + base_fee_per_gas.push(header.base_fee_per_gas.unwrap_or_default() as u128); + gas_used_ratio.push(if header.gas_limit > 0 { + header.gas_used as f64 / header.gas_limit as f64 + } else { + 0.0 + }); + + if let Some(percentiles) = &reward_percentiles { + let block_num = start_block + offset as u64; + + let (txs, receipts) = tokio::try_join!( + cold.get_transactions_in_block(block_num), + cold.get_receipts_in_block(block_num), + ) + .map_err(|e| e.to_string())?; + + let block_rewards = calculate_reward_percentiles( + percentiles, + header.gas_used, + header.base_fee_per_gas.unwrap_or_default(), + &txs, + &receipts, + ); + rewards.push(block_rewards); + } + } + + // Next block base fee + if let Some(last_header) = headers.last().and_then(|h| h.as_ref()) { + base_fee_per_gas.push( + last_header.next_block_base_fee(BaseFeeParams::ethereum()).unwrap_or_default() + as u128, + ); + } + + let base_fee_per_blob_gas = vec![0; base_fee_per_gas.len()]; + let blob_gas_used_ratio = vec![0.; gas_used_ratio.len()]; + + Ok(FeeHistory { + base_fee_per_gas, + gas_used_ratio, + base_fee_per_blob_gas, + blob_gas_used_ratio, + oldest_block: start_block, + reward: reward_percentiles.map(|_| rewards), + }) + }; + + await_handler!(@option hctx.spawn(task)) } -pub(super) async fn block( +/// Calculate reward percentiles for a single block. +/// +/// Sorts transactions by effective tip ascending, then walks +/// cumulative gas used to find the tip value at each percentile. +fn calculate_reward_percentiles( + percentiles: &[f64], + gas_used: u64, + base_fee: u64, + txs: &[signet_storage_types::RecoveredTx], + receipts: &[signet_cold::ColdReceipt], +) -> Vec { + if gas_used == 0 || txs.is_empty() { + return vec![0; percentiles.len()]; + } + + // Pair each tx's effective tip with its gas used. + let mut tx_gas_and_tip: Vec<(u64, u128)> = txs + .iter() + .zip(receipts.iter()) + .map(|(tx, receipt)| { + let tip = tx.effective_tip_per_gas(base_fee).unwrap_or_default(); + (receipt.gas_used, tip) + }) + .collect(); + + // Sort by tip ascending + tx_gas_and_tip.sort_by_key(|&(_, tip)| tip); + + let mut result = Vec::with_capacity(percentiles.len()); + let mut cumulative_gas: u64 = 0; + let mut tx_idx = 0; + + for &percentile in percentiles { + let threshold = (gas_used as f64 * percentile / 100.0) as u64; + + while tx_idx < tx_gas_and_tip.len() - 1 { + cumulative_gas += tx_gas_and_tip[tx_idx].0; + if cumulative_gas >= threshold { + break; + } + tx_idx += 1; + } + + result.push(tx_gas_and_tip[tx_idx].1); + } + + result +} + +// --------------------------------------------------------------------------- +// Block Queries +// --------------------------------------------------------------------------- + +/// `eth_getBlockByHash` / `eth_getBlockByNumber` — resolve block, fetch +/// header + transactions from cold storage, assemble RPC block response. +pub(crate) async fn block( hctx: HandlerCtx, BlockParams(t, full): BlockParams, - ctx: RpcCtx, -) -> Result>, String> + ctx: StorageRpcCtx, +) -> Result, String> where T: Into, - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { let id = t.into(); - let task = async move { ctx.signet().block(id, full).await.map_err(|e| e.to_string()) }; + let full = full.unwrap_or(false); - await_handler!(@option hctx.spawn_blocking(task)) + let task = async move { + let cold = ctx.cold(); + let block_num = ctx.resolve_block_id(id).map_err(|e| e.to_string())?; + + let (header, txs) = tokio::try_join!( + cold.get_header_by_number(block_num), + cold.get_transactions_in_block(block_num), + ) + .map_err(|e| e.to_string())?; + + let Some(header) = header else { + return Ok(None); + }; + + let block_hash = header.hash(); + let base_fee = header.base_fee_per_gas; + + let transactions = if full { + BlockTransactions::Full { txs, block_num, block_hash, base_fee } + } else { + BlockTransactions::Hashes(txs) + }; + + Ok(Some(RpcBlock { + header: alloy::rpc::types::Header { + inner: header.into_inner(), + hash: block_hash, + total_difficulty: None, + size: None, + }, + transactions, + uncles: EmptyArray, + })) + }; + + await_handler!(@option hctx.spawn(task)) } -pub(super) async fn block_tx_count( +/// `eth_getBlockTransactionCount*` — transaction count in a block. +pub(crate) async fn block_tx_count( hctx: HandlerCtx, (t,): (T,), - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> Result, String> where T: Into, - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { let id = t.into(); - let task = async move { ctx.signet().tx_count(id).await.map_err(|e| e.to_string()) }; - await_handler!(@option hctx.spawn_blocking(task)) + let task = async move { + let cold = ctx.cold(); + let block_num = ctx.resolve_block_id(id).map_err(|e| e.to_string())?; + + cold.get_transaction_count(block_num) + .await + .map(|c| Some(U64::from(c))) + .map_err(|e| e.to_string()) + }; + + await_handler!(@option hctx.spawn(task)) } -pub(super) async fn block_receipts( +/// `eth_getBlockReceipts` — all receipts in a block. +pub(crate) async fn block_receipts( hctx: HandlerCtx, (id,): (BlockId,), - ctx: RpcCtx, -) -> Result>>, String> + ctx: StorageRpcCtx, +) -> Result, String> where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { let task = async move { - let Some(receipts) = ctx.signet().raw_receipts(id).await.map_err(|e| e.to_string())? else { - return Ok(None); - }; + let cold = ctx.cold(); + let block_num = ctx.resolve_block_id(id).map_err(|e| e.to_string())?; - let Some((block_hash, block)) = - ctx.signet().raw_block(id).await.map_err(|e| e.to_string())? - else { + let (header, txs, receipts) = tokio::try_join!( + cold.get_header_by_number(block_num), + cold.get_transactions_in_block(block_num), + cold.get_receipts_in_block(block_num), + ) + .map_err(|e| e.to_string())?; + + let Some(header) = header else { return Ok(None); }; - let header = block.header(); - let block_number = header.number; let base_fee = header.base_fee_per_gas; - let excess_blob_gas = None; - let timestamp = header.timestamp; - - block - .body() - .transactions() - .zip(receipts.iter()) - .enumerate() - .map(|(idx, (tx, receipt))| { - let meta = TransactionMeta { - tx_hash: *tx.hash(), - index: idx as u64, - block_hash, - block_number, - base_fee, - excess_blob_gas, - timestamp, - }; - build_signet_receipt(tx.to_owned(), meta, receipt.to_owned(), receipts.to_vec()) + + Ok(Some(LazyReceipts { txs, receipts, base_fee })) + }; + + await_handler!(@option hctx.spawn(task)) +} + +/// `eth_getBlockHeaderByHash` / `eth_getBlockHeaderByNumber`. +pub(crate) async fn header_by( + hctx: HandlerCtx, + (t,): (T,), + ctx: StorageRpcCtx, +) -> Result, String> +where + T: Into, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let id = t.into(); + + let task = async move { + ctx.resolve_header(id) + .map(|opt| { + opt.map(|sh| { + let hash = sh.hash(); + alloy::rpc::types::Header { + inner: sh.into_inner(), + hash, + total_difficulty: None, + size: None, + } + }) }) - .collect::, _>>() - .map(Some) .map_err(|e| e.to_string()) }; await_handler!(@option hctx.spawn_blocking(task)) } -pub(super) async fn raw_transaction_by_hash( +// --------------------------------------------------------------------------- +// Transaction Queries +// --------------------------------------------------------------------------- + +/// `eth_getTransactionByHash` — look up transaction by hash from cold storage. +pub(crate) async fn transaction_by_hash( hctx: HandlerCtx, (hash,): (B256,), - ctx: RpcCtx, -) -> Result, String> + ctx: StorageRpcCtx, +) -> Result, String> where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { let task = async move { - ctx.signet() - .provider() - .transaction_by_hash(hash) - .map_err(|e| e.to_string()) - .map(|tx| tx.as_ref().map(Encodable2718::encoded_2718).map(Into::into)) + let cold = ctx.cold(); + let Some(confirmed) = cold.get_tx_by_hash(hash).await.map_err(|e| e.to_string())? else { + return Ok(None); + }; + + let (tx, meta) = confirmed.into_parts(); + + // Fetch header for base_fee + let header = + cold.get_header_by_number(meta.block_number()).await.map_err(|e| e.to_string())?; + let base_fee = header.and_then(|h| h.base_fee_per_gas); + + Ok(Some(build_rpc_transaction(&tx, &meta, base_fee))) }; - await_handler!(@option hctx.spawn_blocking(task)) + await_handler!(@option hctx.spawn(task)) } -pub(super) async fn transaction_by_hash( +/// `eth_getRawTransactionByHash` — RLP-encoded transaction bytes. +pub(crate) async fn raw_transaction_by_hash( hctx: HandlerCtx, (hash,): (B256,), - ctx: RpcCtx, -) -> Result>, String> + ctx: StorageRpcCtx, +) -> Result, String> where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let task = async move { ctx.signet().rpc_transaction_by_hash(hash).map_err(|e| e.to_string()) }; + let task = async move { + ctx.cold() + .get_tx_by_hash(hash) + .await + .map(|opt| opt.map(|c| c.into_inner().encoded_2718().into())) + .map_err(|e| e.to_string()) + }; - await_handler!(@option hctx.spawn_blocking(task)) + await_handler!(@option hctx.spawn(task)) } -pub(super) async fn raw_transaction_by_block_and_index( +/// `eth_getTransactionByBlock*AndIndex` — transaction by position in block. +pub(crate) async fn transaction_by_block_and_index( hctx: HandlerCtx, (t, index): (T, U64), - ctx: RpcCtx, -) -> Result, String> + ctx: StorageRpcCtx, +) -> Result, String> where T: Into, - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let id: BlockId = t.into(); + let id = t.into(); + let task = async move { - let Some((_, block)) = ctx.signet().raw_block(id).await.map_err(|e| e.to_string())? else { + let cold = ctx.cold(); + let block_num = ctx.resolve_block_id(id).map_err(|e| e.to_string())?; + + let Some(confirmed) = cold + .get_tx_by_block_and_index(block_num, index.to::()) + .await + .map_err(|e| e.to_string())? + else { return Ok(None); }; - Ok(block.body().transactions.get(index.to::()).map(|tx| tx.encoded_2718().into())) + let (tx, meta) = confirmed.into_parts(); + let header = + cold.get_header_by_number(meta.block_number()).await.map_err(|e| e.to_string())?; + let base_fee = header.and_then(|h| h.base_fee_per_gas); + + Ok(Some(build_rpc_transaction(&tx, &meta, base_fee))) }; - await_handler!(@option hctx.spawn_blocking(task)) + await_handler!(@option hctx.spawn(task)) } -pub(super) async fn transaction_by_block_and_index( +/// `eth_getRawTransactionByBlock*AndIndex` — raw RLP bytes by position. +pub(crate) async fn raw_transaction_by_block_and_index( hctx: HandlerCtx, (t, index): (T, U64), - ctx: RpcCtx, -) -> Result>, String> + ctx: StorageRpcCtx, +) -> Result, String> where T: Into, - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { let id = t.into(); let task = async move { - ctx.signet() - .rpc_transaction_by_block_idx(id, index.to::()) + let cold = ctx.cold(); + let block_num = ctx.resolve_block_id(id).map_err(|e| e.to_string())?; + + cold.get_tx_by_block_and_index(block_num, index.to::()) .await + .map(|opt| opt.map(|c| c.into_inner().encoded_2718().into())) .map_err(|e| e.to_string()) }; - await_handler!(@option hctx.spawn_blocking(task)) + await_handler!(@option hctx.spawn(task)) } -pub(super) async fn transaction_receipt( +/// `eth_getTransactionReceipt` — receipt by tx hash. Fetches the receipt, +/// then the associated transaction and header for derived fields. +pub(crate) async fn transaction_receipt( hctx: HandlerCtx, (hash,): (B256,), - ctx: RpcCtx, -) -> Result>, String> + ctx: StorageRpcCtx, +) -> Result, String> where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let task = - async move { ctx.signet().rpc_receipt_by_hash(hash).await.map_err(|e| e.to_string()) }; + let task = async move { + let cold = ctx.cold(); - await_handler!(@option hctx.spawn_blocking(task)) + let Some(cr) = + cold.get_receipt(ReceiptSpecifier::TxHash(hash)).await.map_err(|e| e.to_string())? + else { + return Ok(None); + }; + + let (tx, header) = tokio::try_join!( + cold.get_tx_by_hash(cr.tx_hash), + cold.get_header_by_number(cr.block_number), + ) + .map_err(|e| e.to_string())?; + + let tx = tx.ok_or(EthError::TransactionMissing).map_err(|e| e.to_string())?.into_inner(); + let base_fee = header.and_then(|h| h.base_fee_per_gas); + + Ok(Some(build_receipt(&cr, &tx, base_fee))) + }; + + await_handler!(@option hctx.spawn(task)) } -pub(super) async fn balance( +// --------------------------------------------------------------------------- +// Account State (Hot Storage) +// --------------------------------------------------------------------------- + +/// `eth_getBalance` — account balance at a given block from hot storage. +pub(crate) async fn balance( hctx: HandlerCtx, AddrWithBlock(address, block): AddrWithBlock, - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let block = block.unwrap_or(BlockId::latest()); + let id = block.unwrap_or(BlockId::latest()); + let task = async move { - let state = ctx.signet().provider().state_by_block_id(block).map_err(|e| e.to_string())?; - let bal = state.account_balance(&address).map_err(|e| e.to_string())?; - Ok(bal.unwrap_or_default()) + let (reader, height) = hot_reader_at_block(&ctx, id)?; + let acct = + reader.get_account_at_height(&address, Some(height)).map_err(|e| e.to_string())?; + + Ok(acct.map(|a| a.balance).unwrap_or_default()) }; await_handler!(@option hctx.spawn_blocking(task)) } -pub(super) async fn storage_at( +/// `eth_getStorageAt` — contract storage slot at a given block. +pub(crate) async fn storage_at( hctx: HandlerCtx, StorageAtArgs(address, key, block): StorageAtArgs, - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let block = block.unwrap_or(BlockId::latest()); + let id = block.unwrap_or(BlockId::latest()); + let task = async move { - let state = ctx.signet().provider().state_by_block_id(block).map_err(|e| e.to_string())?; - let val = state.storage(address, key.into()).map_err(|e| e.to_string())?; + let (reader, height) = hot_reader_at_block(&ctx, id)?; + let val = reader + .get_storage_at_height(&address, &key, Some(height)) + .map_err(|e| e.to_string())?; + Ok(val.unwrap_or_default().to_be_bytes().into()) }; await_handler!(@option hctx.spawn_blocking(task)) } -pub(super) async fn addr_tx_count( +/// `eth_getTransactionCount` — account nonce at a given block. +pub(crate) async fn addr_tx_count( hctx: HandlerCtx, AddrWithBlock(address, block): AddrWithBlock, - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let block = block.unwrap_or(BlockId::latest()); + let id = block.unwrap_or(BlockId::latest()); + let task = async move { - let state = ctx.signet().provider().state_by_block_id(block).map_err(|e| e.to_string())?; - let count = state.account_nonce(&address).map_err(|e| e.to_string())?; - Ok(U64::from(count.unwrap_or_default())) + let (reader, height) = hot_reader_at_block(&ctx, id)?; + let acct = + reader.get_account_at_height(&address, Some(height)).map_err(|e| e.to_string())?; + + Ok(U64::from(acct.map(|a| a.nonce).unwrap_or_default())) }; await_handler!(@option hctx.spawn_blocking(task)) } -pub(super) async fn code_at( +/// `eth_getCode` — contract bytecode at a given block. +pub(crate) async fn code_at( hctx: HandlerCtx, AddrWithBlock(address, block): AddrWithBlock, - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let block = block.unwrap_or(BlockId::latest()); + let id = block.unwrap_or(BlockId::latest()); + let task = async move { - let state = ctx.signet().provider().state_by_block_id(block).map_err(|e| e.to_string())?; - let code = state.account_code(&address).map_err(|e| e.to_string())?; - Ok(code.unwrap_or_default().original_bytes()) + let (reader, height) = hot_reader_at_block(&ctx, id)?; + let acct = + reader.get_account_at_height(&address, Some(height)).map_err(|e| e.to_string())?; + + let Some(acct) = acct else { + return Ok(alloy::primitives::Bytes::new()); + }; + + let Some(code_hash) = acct.bytecode_hash else { + return Ok(alloy::primitives::Bytes::new()); + }; + + let code = reader.get_bytecode(&code_hash).map_err(|e| e.to_string())?; + + Ok(code.map(|c| c.original_bytes()).unwrap_or_default()) }; await_handler!(@option hctx.spawn_blocking(task)) } -pub(super) async fn header_by( - hctx: HandlerCtx, - (t,): (T,), - ctx: RpcCtx, -) -> Result>, String> -where - T: Into, - Host: FullNodeComponents, - Signet: Pnt, -{ - let id = t.into(); - - await_handler!(@option hctx.spawn_blocking_with_ctx(|hctx| async move { - Ok(block(hctx, BlockParams(id, None), ctx).await?.map(|block| block.header)) - })) -} +// --------------------------------------------------------------------------- +// EVM Execution +// --------------------------------------------------------------------------- -/// Normalize transaction request gas, without making DB reads +/// Shared EVM call execution used by `eth_call` and `eth_estimateGas`. /// -/// Does the following: -/// - If the gas is below `MIN_TRANSACTION_GAS`, set it to `None` -/// - If the gas is above the `rpc_gas_cap`, set it to the `rpc_gas_cap` -/// - Otherwise, do nothing -const fn normalize_gas_stateless(request: &mut TransactionRequest, max_gas: u64) { - match request.gas { - Some(..MIN_TRANSACTION_GAS) => request.gas = None, - Some(val) if val > max_gas => request.gas = Some(max_gas), - _ => {} - } -} - -/// We want to ensure that req.gas is not less than `MIN_TRANSACTION_GAS` -/// coming into this. -pub(super) async fn run_call( +/// Resolves the block, builds a revm instance with the requested state +/// and block overrides, then executes the transaction request. +pub(crate) async fn run_call( hctx: HandlerCtx, TxParams(request, block, state_overrides, block_overrides): TxParams, - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> ResponsePayload where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { let id = block.unwrap_or(BlockId::latest()); - - // this span is verbose yo. - let span = trace_span!( - "run_call", - ?request, - block_id = %id, - state_overrides = ?state_overrides.as_ref().map(StateOverride::len).unwrap_or_default(), - block_overrides = ?block_overrides.is_some(), - block_cfg = tracing::field::Empty, - ); + let span = trace_span!("run_call", block_id = %id); let task = async move { - let block_cfg = match ctx.signet().block_cfg(id).await { - Ok(block_cfg) => block_cfg, - Err(e) => { - return ResponsePayload::internal_error_with_message_and_obj( - "error while loading block cfg".into(), - e.to_string().into(), - ); - } - }; + let EvmBlockContext { header, db } = response_tri!(ctx.resolve_evm_block(id)); - // Set up trevm - let trevm = response_tri!(ctx.trevm(id.into(), &block_cfg)); + let trevm = signet_evm::signet_evm(db, ctx.constants().clone()) + .fill_cfg(&CfgFiller(ctx.chain_id())) + .fill_block(&header); - let mut trevm = response_tri!(trevm.maybe_apply_state_overrides(state_overrides.as_ref())) + let trevm = response_tri!(trevm.maybe_apply_state_overrides(state_overrides.as_ref())) .maybe_apply_block_overrides(block_overrides.as_deref()) .fill_tx(&request); - // AFTER applying overrides and filling the tx, we want to statefully - // modify the gas cap. + let mut trevm = trevm; let new_gas = response_tri!(trevm.cap_tx_gas()); if Some(new_gas) != request.gas { - debug!( - req_gas = ?request.gas, - new_gas, - "capping gas for call", - ); + debug!(req_gas = ?request.gas, new_gas, "capping gas for call"); } - let execution_result = response_tri!(trevm.call().map_err(EvmErrored::into_error)).0; - - ResponsePayload(Ok(execution_result)) + let result = response_tri!(trevm.call().map_err(signet_evm::EvmErrored::into_error)); + ResponsePayload(Ok(result.0)) } .instrument(span); await_handler!(@response_option hctx.spawn_blocking(task)) } -pub(super) async fn call( +/// `eth_call` — execute a call and return the output bytes. +/// +/// Delegates to [`run_call`], then maps the execution result to raw +/// output bytes, revert data, or halt reason. +pub(crate) async fn call( hctx: HandlerCtx, mut params: TxParams, - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> ResponsePayload where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - // Stateless gas normalization. We will do stateful gas normalization later - // in [`run_call`]. - // - // This check is done greedily, as it is a simple comparison. - let max_gas = ctx.signet().config().rpc_gas_cap; + let max_gas = ctx.config().rpc_gas_cap; normalize_gas_stateless(&mut params.0, max_gas); await_handler!(@response_option hctx.spawn_with_ctx(|hctx| async move { @@ -526,56 +803,35 @@ where })) } -/// Estimate the gas cost of a transaction. -pub(super) async fn estimate_gas( +/// `eth_estimateGas` — estimate gas required for a transaction. +pub(crate) async fn estimate_gas( hctx: HandlerCtx, TxParams(mut request, block, state_overrides, block_overrides): TxParams, - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> ResponsePayload where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let id = block.unwrap_or(BlockId::pending()); - - // this span is verbose yo. - let span = trace_span!( - "estimate_gas", - ?request, - block_id = %id, - state_overrides = ?state_overrides.as_ref().map(StateOverride::len).unwrap_or_default(), - block_overrides = ?block_overrides.is_some(), - block_cfg = tracing::field::Empty, - ); - - // Stateless gas normalization. - let max_gas = ctx.signet().config().rpc_gas_cap; + let max_gas = ctx.config().rpc_gas_cap; normalize_gas_stateless(&mut request, max_gas); + let id = block.unwrap_or(BlockId::pending()); + let span = trace_span!("eth_estimateGas", block_id = %id); + let task = async move { - // Get the block cfg from backend, erroring if it fails - let block_cfg = match ctx.signet().block_cfg(id).await { - Ok(block_cfg) => block_cfg, - Err(e) => { - return ResponsePayload::internal_error_with_message_and_obj( - "error while loading block cfg".into(), - e.to_string().into(), - ); - } - }; + let EvmBlockContext { header, db } = response_tri!(ctx.resolve_evm_block(id)); - let trevm = response_tri!(ctx.trevm(id.into(), &block_cfg)); + let trevm = signet_evm::signet_evm(db, ctx.constants().clone()) + .fill_cfg(&CfgFiller(ctx.chain_id())) + .fill_block(&header); - // Apply state and block overrides (state overrides are fallible as - // they require DB access) let trevm = response_tri!(trevm.maybe_apply_state_overrides(state_overrides.as_ref())) .maybe_apply_block_overrides(block_overrides.as_deref()) .fill_tx(&request); - // in eth_call we cap gas here. in eth_estimate gas it is done by - // trevm - - let (estimate, _) = response_tri!(trevm.estimate_gas().map_err(EvmErrored::into_error)); + let (estimate, _) = + response_tri!(trevm.estimate_gas().map_err(signet_evm::EvmErrored::into_error)); match estimate { EstimationResult::Success { limit, .. } => ResponsePayload(Ok(U64::from(limit))), @@ -598,83 +854,84 @@ where await_handler!(@response_option hctx.spawn_blocking(task)) } -pub(super) async fn gas_price( +/// `eth_createAccessList` — generate an access list for a transaction. +pub(crate) async fn create_access_list( hctx: HandlerCtx, - ctx: RpcCtx, -) -> Result + TxParams(mut request, block, state_overrides, block_overrides): TxParams, + ctx: StorageRpcCtx, +) -> ResponsePayload where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { + let max_gas = ctx.config().rpc_gas_cap; + normalize_gas_stateless(&mut request, max_gas); + + let id = block.unwrap_or(BlockId::pending()); + let span = trace_span!("eth_createAccessList", block_id = %id); + let task = async move { - let (header, suggested) = tokio::try_join!( - ctx.signet().raw_header(BlockId::latest()), - ctx.signet().gas_oracle().suggest_tip_cap(), - ) - .map_err(|e| e.to_string())?; + let EvmBlockContext { header, db } = response_tri!(ctx.resolve_evm_block(id)); - let base_fee = header.and_then(|h| h.1.base_fee_per_gas()).unwrap_or_default(); - Ok(suggested + U256::from(base_fee)) - }; + let trevm = signet_evm::signet_evm(db, ctx.constants().clone()) + .fill_cfg(&CfgFiller(ctx.chain_id())) + .fill_block(&header); - await_handler!(@option hctx.spawn_blocking(task)) -} + let trevm = response_tri!(trevm.maybe_apply_state_overrides(state_overrides.as_ref())) + .maybe_apply_block_overrides(block_overrides.as_deref()) + .fill_tx(&request); -pub(super) async fn max_priority_fee_per_gas( - hctx: HandlerCtx, - ctx: RpcCtx, -) -> Result -where - Host: FullNodeComponents, - Signet: Pnt, -{ - let task = - async move { ctx.signet().gas_oracle().suggest_tip_cap().await.map_err(|e| e.to_string()) }; + let initial = request.access_list.clone().unwrap_or_default(); + let mut inspector = AccessListInspector::new(initial); - await_handler!(@option hctx.spawn_blocking(task)) -} + let result = trevm + .try_with_inspector(&mut inspector, |trevm| trevm.run()) + .map_err(signet_evm::EvmErrored::into_error); -pub(super) async fn fee_history( - hctx: HandlerCtx, - FeeHistoryArgs(block_count, newest, reward_percentiles): FeeHistoryArgs, - ctx: RpcCtx, -) -> Result -where - Host: FullNodeComponents, - Signet: Pnt, -{ - let task = async move { - ctx.signet() - .fee_history(block_count.to::(), newest, reward_percentiles) - .await - .map_err(|e| e.to_string()) - }; + let (gas_used, error) = match result { + Ok(ref trevm) => (U256::from(trevm.gas_used()), None), + Err(ref e) => (U256::ZERO, Some(e.to_string())), + }; - await_handler!(@option hctx.spawn_blocking(task)) + let access_list = inspector.into_access_list(); + + ResponsePayload(Ok(AccessListResult { access_list, gas_used, error })) + } + .instrument(span); + + await_handler!(@response_option hctx.spawn_blocking(task)) } -pub(super) async fn send_raw_transaction( +// --------------------------------------------------------------------------- +// Transaction Submission +// --------------------------------------------------------------------------- + +/// `eth_sendRawTransaction` — decode and forward a signed transaction. +/// +/// The transaction is forwarded to the tx cache in a fire-and-forget +/// task; the hash is returned immediately. +pub(crate) async fn send_raw_transaction( hctx: HandlerCtx, (tx,): (alloy::primitives::Bytes,), - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let task = |hctx: HandlerCtx| async move { - let Some(tx_cache) = ctx.signet().tx_cache() else { - return Err("tx-cache URL not provided".to_string()); - }; + let Some(tx_cache) = ctx.tx_cache().cloned() else { + return Err("tx-cache URL not provided".to_string()); + }; - let envelope = match TxEnvelope::decode_2718(&mut tx.as_ref()) { - Ok(envelope) => envelope, - Err(e) => return Err(e.to_string()), - }; + let task = |hctx: HandlerCtx| async move { + let envelope = alloy::consensus::TxEnvelope::decode_2718(&mut tx.as_ref()) + .map_err(|e| e.to_string())?; let hash = *envelope.tx_hash(); hctx.spawn(async move { - tx_cache.forward_raw_transaction(envelope).await.map_err(|e| e.to_string()) + if let Err(e) = tx_cache.forward_raw_transaction(envelope).await { + tracing::warn!(error = %e, %hash, "failed to forward raw transaction"); + } }); Ok(hash) @@ -683,107 +940,223 @@ where await_handler!(@option hctx.spawn_blocking_with_ctx(task)) } -pub(super) async fn get_logs( +// --------------------------------------------------------------------------- +// Logs +// --------------------------------------------------------------------------- + +/// Drain a [`signet_cold::LogStream`] into a `Vec`. +/// +/// Errors from the stream (deadline exceeded, too many logs, reorg) are +/// propagated as the first encountered error. +async fn collect_log_stream(stream: signet_cold::LogStream) -> signet_cold::ColdResult> { + use tokio_stream::StreamExt; + let mut logs = Vec::new(); + let mut stream = std::pin::pin!(stream); + while let Some(log) = stream.next().await { + logs.push(log?); + } + Ok(logs) +} + +/// `eth_getLogs` — query logs from cold storage with filter criteria. +/// +/// Uses `stream_logs` for deadline enforcement and dedicated concurrency +/// control. The stream is collected into a `Vec` for the JSON-RPC response. +pub(crate) async fn get_logs( hctx: HandlerCtx, - (filter,): (alloy::rpc::types::Filter,), - ctx: RpcCtx, -) -> Result, String> + (filter,): (Filter,), + ctx: StorageRpcCtx, +) -> Result, String> where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let task = async move { ctx.signet().logs(&filter).await.map_err(EthError::into_string) }; + let task = async move { + let cold = ctx.cold(); + + let resolved_filter = match filter.block_option { + alloy::rpc::types::FilterBlockOption::AtBlockHash(_) => filter, + alloy::rpc::types::FilterBlockOption::Range { from_block, to_block } => { + let from = from_block + .map(|b| ctx.resolve_block_tag(b)) + .unwrap_or_else(|| ctx.tags().latest()); + let to = to_block + .map(|b| ctx.resolve_block_tag(b)) + .unwrap_or_else(|| ctx.tags().latest()); + + if from > to { + return Err("fromBlock must not exceed toBlock".to_string()); + } + let max_blocks = ctx.config().max_blocks_per_filter; + if to - from > max_blocks { + return Err(format!("query exceeds max block range ({max_blocks})")); + } - await_handler!(@option hctx.spawn_blocking(task)) + Filter { + block_option: alloy::rpc::types::FilterBlockOption::Range { + from_block: Some(BlockNumberOrTag::Number(from)), + to_block: Some(BlockNumberOrTag::Number(to)), + }, + ..filter + } + } + }; + + let max_logs = ctx.config().max_logs_per_response; + let deadline = ctx.config().max_log_query_deadline; + + let stream = cold + .stream_logs(resolved_filter, max_logs, deadline) + .await + .map_err(|e| e.to_string())?; + + collect_log_stream(stream).await.map_err(|e| e.to_string()) + }; + + await_handler!(@option hctx.spawn(task)) } -pub(super) async fn new_filter( +// --------------------------------------------------------------------------- +// Filters +// --------------------------------------------------------------------------- + +/// `eth_newFilter` — install a log filter for polling. +pub(crate) async fn new_filter( hctx: HandlerCtx, - (filter,): (alloy::rpc::types::Filter,), - ctx: RpcCtx, + (filter,): (Filter,), + ctx: StorageRpcCtx, ) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let task = - async move { ctx.signet().install_log_filter(filter).map_err(EthError::into_string) }; + let task = async move { + let latest = ctx.tags().latest(); + Ok(ctx.filter_manager().install_log_filter(latest, filter)) + }; await_handler!(@option hctx.spawn_blocking(task)) } -pub(super) async fn new_block_filter( +/// `eth_newBlockFilter` — install a block hash filter for polling. +pub(crate) async fn new_block_filter( hctx: HandlerCtx, - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let task = async move { ctx.signet().install_block_filter().map_err(EthError::into_string) }; + let task = async move { + let latest = ctx.tags().latest(); + Ok(ctx.filter_manager().install_block_filter(latest)) + }; await_handler!(@option hctx.spawn_blocking(task)) } -pub(super) async fn uninstall_filter( +/// `eth_uninstallFilter` — remove a filter. +pub(crate) async fn uninstall_filter( hctx: HandlerCtx, (id,): (U64,), - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let task = async move { Ok(ctx.signet().uninstall_filter(id)) }; - + let task = async move { Ok(ctx.filter_manager().uninstall(id).is_some()) }; await_handler!(@option hctx.spawn_blocking(task)) } -pub(super) async fn get_filter_changes( +/// `eth_getFilterChanges` / `eth_getFilterLogs` — poll a filter for new +/// results since the last poll. Fetches matching data from cold storage. +pub(crate) async fn get_filter_changes( hctx: HandlerCtx, (id,): (U64,), - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let task = async move { ctx.signet().filter_changes(id).await.map_err(EthError::into_string) }; + let task = async move { + let fm = ctx.filter_manager(); + let mut entry = fm.get_mut(id).ok_or_else(|| format!("filter not found: {id}"))?; - await_handler!(@option hctx.spawn_blocking(task)) + let latest = ctx.tags().latest(); + let start = entry.next_start_block(); + + if start > latest { + entry.mark_polled(latest); + return Ok(entry.empty_output()); + } + + let cold = ctx.cold(); + + if entry.is_block() { + let specs: Vec<_> = (start..=latest).map(HeaderSpecifier::Number).collect(); + let headers = cold.get_headers(specs).await.map_err(|e| e.to_string())?; + let hashes: Vec = headers.into_iter().flatten().map(|h| h.hash()).collect(); + entry.mark_polled(latest); + Ok(FilterOutput::from(hashes)) + } else { + let stored = entry.as_filter().cloned().unwrap(); + let resolved = Filter { + block_option: alloy::rpc::types::FilterBlockOption::Range { + from_block: Some(BlockNumberOrTag::Number(start)), + to_block: Some(BlockNumberOrTag::Number(latest)), + }, + ..stored + }; + + let max_logs = ctx.config().max_logs_per_response; + let deadline = ctx.config().max_log_query_deadline; + + let stream = + cold.stream_logs(resolved, max_logs, deadline).await.map_err(|e| e.to_string())?; + + let logs = collect_log_stream(stream).await.map_err(|e| e.to_string())?; + + entry.mark_polled(latest); + Ok(FilterOutput::from(logs)) + } + }; + + await_handler!(@option hctx.spawn(task)) } -pub(super) async fn subscribe( +// --------------------------------------------------------------------------- +// Subscriptions +// --------------------------------------------------------------------------- + +/// `eth_subscribe` — register a push-based subscription (WebSocket/SSE). +pub(crate) async fn subscribe( hctx: HandlerCtx, sub: SubscribeArgs, - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let kind = sub.try_into()?; + let interest: InterestKind = sub.try_into()?; - let task = |hctx| async move { - ctx.signet() - .subscriptions() - .subscribe(&hctx, kind) - .ok_or_else(|| "pubsub not enabled".to_string()) - }; - - await_handler!(@option hctx.spawn_blocking_with_ctx(task)) + ctx.sub_manager() + .subscribe(&hctx, interest) + .ok_or_else(|| "notifications not enabled on this transport".to_string()) } -pub(super) async fn unsubscribe( +/// `eth_unsubscribe` — cancel a push-based subscription. +pub(crate) async fn unsubscribe( hctx: HandlerCtx, (id,): (U64,), - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let task = async move { Ok(ctx.signet().subscriptions().unsubscribe(id)) }; - + let task = async move { Ok(ctx.sub_manager().unsubscribe(id)) }; await_handler!(@option hctx.spawn_blocking(task)) } diff --git a/crates/rpc/src/eth/error.rs b/crates/rpc/src/eth/error.rs index 2d85cce..135b6b8 100644 --- a/crates/rpc/src/eth/error.rs +++ b/crates/rpc/src/eth/error.rs @@ -1,26 +1,61 @@ -use reth::{ - providers::ProviderError, - rpc::{eth::filter::EthFilterError, server_types::eth::EthApiError}, -}; +//! Error types for the storage-backed ETH RPC. -/// Errors that can occur when interacting with the `eth_` namespace. +use alloy::{eips::BlockId, primitives::Bytes}; +use serde::Serialize; + +/// Errors from the storage-backed ETH RPC. #[derive(Debug, thiserror::Error)] pub enum EthError { - /// Provider error: [`ProviderError`]. - #[error("Provider error: {0}")] - Provider(#[from] ProviderError), - /// Filter error [`EthFilterError`]. - #[error("Filter error: {0}")] - Filter(#[from] EthFilterError), - /// Eth API error: [`EthApiError`]. - #[error("Eth API error: {0}")] - Rpc(#[from] EthApiError), + /// Cold storage error. + #[error("cold storage: {0}")] + Cold(#[from] signet_cold::ColdStorageError), + /// Hot storage error. + #[error("hot storage: {0}")] + Hot(#[from] signet_storage::StorageError), + /// Block resolution error. + #[error("resolve: {0}")] + Resolve(#[from] crate::config::resolve::ResolveError), + /// Invalid transaction signature. + #[error("invalid transaction signature")] + InvalidSignature, + /// Block not found. + #[error("block not found: {0}")] + BlockNotFound(BlockId), + /// Receipt found but the corresponding transaction is missing. + #[error("receipt found but transaction missing")] + TransactionMissing, + /// EVM execution error. + #[error("evm: {0}")] + Evm(String), } impl EthError { - /// Turn into a string by value, allows for - /// `.map_err(EthError::into_string)` to be used. + /// Convert the error to a string for JSON-RPC responses. pub fn into_string(self) -> String { - ToString::to_string(&self) + self.to_string() + } +} + +/// Error data for `eth_call` and `eth_estimateGas` responses. +/// +/// Serialized as JSON in the error response `data` field. +#[derive(Debug, Clone, Serialize)] +#[serde(untagged)] +pub(crate) enum CallErrorData { + /// Revert data bytes. + Bytes(Bytes), + /// Error message string. + String(String), +} + +impl From for CallErrorData { + fn from(b: Bytes) -> Self { + Self::Bytes(b) + } +} + +impl From for CallErrorData { + fn from(s: String) -> Self { + Self::String(s) } } diff --git a/crates/rpc/src/eth/helpers.rs b/crates/rpc/src/eth/helpers.rs index 5462624..399a177 100644 --- a/crates/rpc/src/eth/helpers.rs +++ b/crates/rpc/src/eth/helpers.rs @@ -1,31 +1,239 @@ -use alloy::{eips::BlockId, primitives::Bytes}; - -/// Error output of `eth_call`. -#[derive(Debug, Clone, serde::Serialize)] -#[serde(untagged)] -pub enum CallErrorData { - /// Error output is a byte array, usually a revert message. - Bytes(Bytes), - /// Output is a block id. - BlockId(BlockId), - /// Error message. - String(String), -} - -impl From for CallErrorData { - fn from(bytes: Bytes) -> Self { - Self::Bytes(bytes) +//! Parameter types, macros, and utility helpers for ETH RPC endpoints. + +use super::types::{RpcReceipt, RpcTransaction}; +use crate::interest::InterestKind; +use alloy::{ + consensus::{ + ReceiptEnvelope, ReceiptWithBloom, Transaction, TxReceipt, transaction::Recovered, + }, + eips::BlockId, + primitives::{Address, TxKind, U256}, + rpc::types::{ + BlockOverrides, Log, TransactionReceipt, TransactionRequest, pubsub::SubscriptionKind, + state::StateOverride, + }, +}; +use serde::Deserialize; +use signet_cold::ColdReceipt; +use signet_storage_types::ConfirmationMeta; +use trevm::MIN_TRANSACTION_GAS; + +/// Args for `eth_call` and `eth_estimateGas`. +#[derive(Debug, Deserialize)] +pub(crate) struct TxParams( + pub TransactionRequest, + #[serde(default)] pub Option, + #[serde(default)] pub Option, + #[serde(default)] pub Option>, +); + +/// Args for `eth_getBlockByHash` and `eth_getBlockByNumber`. +#[derive(Debug, Deserialize)] +pub(crate) struct BlockParams(pub T, #[serde(default)] pub Option); + +/// Args for `eth_getStorageAt`. +#[derive(Debug, Deserialize)] +pub(crate) struct StorageAtArgs(pub Address, pub U256, #[serde(default)] pub Option); + +/// Args for `eth_getBalance`, `eth_getTransactionCount`, and `eth_getCode`. +#[derive(Debug, Deserialize)] +pub(crate) struct AddrWithBlock(pub Address, #[serde(default)] pub Option); + +/// Args for `eth_feeHistory`. +#[derive(Debug, Deserialize)] +pub(crate) struct FeeHistoryArgs( + pub alloy::primitives::U64, + pub alloy::eips::BlockNumberOrTag, + #[serde(default)] pub Option>, +); + +/// Args for `eth_subscribe`. +#[derive(Debug, Deserialize)] +pub(crate) struct SubscribeArgs( + pub alloy::rpc::types::pubsub::SubscriptionKind, + #[serde(default)] pub Option>, +); + +impl TryFrom for InterestKind { + type Error = String; + + fn try_from(args: SubscribeArgs) -> Result { + match args.0 { + SubscriptionKind::Logs => args + .1 + .map(InterestKind::Log) + .ok_or_else(|| "missing filter for Logs subscription".to_string()), + SubscriptionKind::NewHeads => { + if args.1.is_some() { + Err("filter not supported for NewHeads subscription".to_string()) + } else { + Ok(InterestKind::Block) + } + } + other => Err(format!("unsupported subscription kind: {other:?}")), + } } } -impl From for CallErrorData { - fn from(id: BlockId) -> Self { - Self::BlockId(id) +/// Normalize transaction request gas without making DB reads. +/// +/// - If the gas is below `MIN_TRANSACTION_GAS`, set it to `None` +/// - If the gas is above the `rpc_gas_cap`, set it to the `rpc_gas_cap` +pub(crate) const fn normalize_gas_stateless(request: &mut TransactionRequest, max_gas: u64) { + match request.gas { + Some(..MIN_TRANSACTION_GAS) => request.gas = None, + Some(val) if val > max_gas => request.gas = Some(max_gas), + _ => {} + } +} + +/// Await a handler task, returning an error string on panic/cancel. +macro_rules! await_handler { + ($h:expr) => { + match $h.await { + Ok(res) => res, + Err(_) => return Err("task panicked or cancelled".to_string()), + } + }; + + (@option $h:expr) => { + match $h.await { + Ok(Some(res)) => res, + _ => return Err("task panicked or cancelled".to_string()), + } + }; + + (@response_option $h:expr) => { + match $h.await { + Ok(Some(res)) => res, + _ => { + return ajj::ResponsePayload::internal_error_message(std::borrow::Cow::Borrowed( + "task panicked or cancelled", + )) + } + } + }; +} +pub(crate) use await_handler; + +/// Try-operator for `ResponsePayload`. +macro_rules! response_tri { + ($h:expr) => { + match $h { + Ok(res) => res, + Err(err) => return ajj::ResponsePayload::internal_error_message(err.to_string().into()), + } + }; +} +pub(crate) use response_tri; + +/// Resolve a block ID and open a hot storage reader at that height. +/// +/// Shared by account-state endpoints (`balance`, `storage_at`, +/// `addr_tx_count`, `code_at`) which all follow the same +/// resolve → open reader → query pattern. +pub(crate) fn hot_reader_at_block( + ctx: &crate::config::StorageRpcCtx, + id: BlockId, +) -> Result<(H::RoTx, u64), String> +where + H: signet_hot::HotKv, + ::Error: std::error::Error + Send + Sync + 'static, +{ + let height = ctx.resolve_block_id(id).map_err(|e| e.to_string())?; + let reader = ctx.hot_reader().map_err(|e| e.to_string())?; + Ok((reader, height)) +} + +/// Small wrapper implementing [`trevm::Cfg`] to set the chain ID. +pub(crate) struct CfgFiller(pub u64); + +impl trevm::Cfg for CfgFiller { + fn fill_cfg_env(&self, cfg: &mut trevm::revm::context::CfgEnv) { + cfg.chain_id = self.0; + } +} + +/// Build an [`alloy::rpc::types::Transaction`] from cold storage types. +pub(crate) fn build_rpc_transaction( + tx: &signet_storage_types::RecoveredTx, + meta: &ConfirmationMeta, + base_fee: Option, +) -> RpcTransaction { + let signer = tx.signer(); + let tx_envelope: alloy::consensus::TxEnvelope = tx.clone().into_inner().into(); + let inner = Recovered::new_unchecked(tx_envelope, signer); + + let egp = base_fee + .map(|bf| inner.effective_tip_per_gas(bf).unwrap_or_default() as u64 + bf) + .unwrap_or_else(|| inner.max_fee_per_gas() as u64); + + alloy::rpc::types::Transaction { + inner, + block_hash: Some(meta.block_hash()), + block_number: Some(meta.block_number()), + transaction_index: Some(meta.transaction_index()), + effective_gas_price: Some(egp as u128), + } +} + +/// Build a [`TransactionReceipt`] from a [`ColdReceipt`] and its transaction. +/// +/// The transaction is needed for `to`, `contract_address`, and +/// `effective_gas_price` which are not stored on the receipt. +pub(crate) fn build_receipt( + cr: &ColdReceipt, + tx: &signet_storage_types::RecoveredTx, + base_fee: Option, +) -> RpcReceipt { + let logs_bloom = cr.receipt.bloom(); + let status = cr.receipt.status; + let cumulative_gas_used = cr.receipt.cumulative_gas_used; + + let rpc_receipt = alloy::rpc::types::eth::Receipt { + status, + cumulative_gas_used, + logs: cr.receipt.logs.clone(), + }; + + let (contract_address, to) = match tx.kind() { + TxKind::Create => (Some(cr.from.create(tx.nonce())), None), + TxKind::Call(addr) => (None, Some(Address(*addr))), + }; + + let egp = base_fee + .map(|bf| tx.effective_tip_per_gas(bf).unwrap_or_default() as u64 + bf) + .unwrap_or_else(|| tx.max_fee_per_gas() as u64); + + TransactionReceipt { + inner: build_receipt_envelope( + ReceiptWithBloom { receipt: rpc_receipt, logs_bloom }, + cr.tx_type, + ), + transaction_hash: cr.tx_hash, + transaction_index: Some(cr.transaction_index), + block_hash: Some(cr.block_hash), + block_number: Some(cr.block_number), + from: cr.from, + to, + gas_used: cr.gas_used, + contract_address, + effective_gas_price: egp as u128, + blob_gas_price: None, + blob_gas_used: None, } } -impl From for CallErrorData { - fn from(s: String) -> Self { - Self::String(s) +/// Wrap a receipt in the appropriate [`ReceiptEnvelope`] variant. +const fn build_receipt_envelope( + receipt: ReceiptWithBloom>, + tx_type: alloy::consensus::TxType, +) -> ReceiptEnvelope { + match tx_type { + alloy::consensus::TxType::Legacy => ReceiptEnvelope::Legacy(receipt), + alloy::consensus::TxType::Eip2930 => ReceiptEnvelope::Eip2930(receipt), + alloy::consensus::TxType::Eip1559 => ReceiptEnvelope::Eip1559(receipt), + alloy::consensus::TxType::Eip4844 => ReceiptEnvelope::Eip4844(receipt), + alloy::consensus::TxType::Eip7702 => ReceiptEnvelope::Eip7702(receipt), } } diff --git a/crates/rpc/src/eth/mod.rs b/crates/rpc/src/eth/mod.rs index ed906b6..2c51abb 100644 --- a/crates/rpc/src/eth/mod.rs +++ b/crates/rpc/src/eth/mod.rs @@ -1,73 +1,82 @@ +//! ETH namespace RPC router backed by storage. + mod endpoints; -use endpoints::*; +use endpoints::{ + addr_tx_count, balance, block, block_number, block_receipts, block_tx_count, call, chain_id, + code_at, create_access_list, estimate_gas, fee_history, gas_price, get_filter_changes, + get_logs, header_by, max_priority_fee_per_gas, new_block_filter, new_filter, not_supported, + raw_transaction_by_block_and_index, raw_transaction_by_hash, send_raw_transaction, storage_at, + subscribe, syncing, transaction_by_block_and_index, transaction_by_hash, transaction_receipt, + uncle_block, uncle_count, uninstall_filter, unsubscribe, +}; mod error; pub use error::EthError; -mod helpers; -pub use helpers::CallErrorData; +pub(crate) mod helpers; +pub(crate) mod types; -use crate::ctx::RpcCtx; +use crate::config::StorageRpcCtx; use alloy::{eips::BlockNumberOrTag, primitives::B256}; -use reth_node_api::FullNodeComponents; -use signet_node_types::Pnt; +use signet_hot::{HotKv, model::HotKvRead}; +use trevm::revm::database::DBErrorMarker; -/// Instantiate the `eth` API router. -pub fn eth() -> ajj::Router> +/// Instantiate the `eth` API router backed by storage. +pub(crate) fn eth() -> ajj::Router> where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { ajj::Router::new() - .route("protocolVersion", protocol_version) - .route("syncing", syncing) - .route("blockNumber", block_number) - .route("chainId", chain_id) - .route("getBlockByHash", block::) - .route("getBlockByNumber", block::) - .route("getBlockTransactionCountByHash", block_tx_count::) - .route("getBlockTransactionCountByNumber", block_tx_count::) - .route("getBlockReceipts", block_receipts) - .route("getRawTransactionByHash", raw_transaction_by_hash) - .route("getTransactionByHash", transaction_by_hash) + .route("blockNumber", block_number::) + .route("chainId", chain_id::) + .route("getBlockByHash", block::) + .route("getBlockByNumber", block::) + .route("getBlockTransactionCountByHash", block_tx_count::) + .route("getBlockTransactionCountByNumber", block_tx_count::) + .route("getBlockReceipts", block_receipts::) + .route("getRawTransactionByHash", raw_transaction_by_hash::) + .route("getTransactionByHash", transaction_by_hash::) .route( "getRawTransactionByBlockHashAndIndex", - raw_transaction_by_block_and_index::, + raw_transaction_by_block_and_index::, ) .route( "getRawTransactionByBlockNumberAndIndex", - raw_transaction_by_block_and_index::, + raw_transaction_by_block_and_index::, ) - .route("getTransactionByBlockHashAndIndex", transaction_by_block_and_index::) + .route("getTransactionByBlockHashAndIndex", transaction_by_block_and_index::) .route( "getTransactionByBlockNumberAndIndex", - transaction_by_block_and_index::, + transaction_by_block_and_index::, ) - .route("getTransactionReceipt", transaction_receipt) - .route("getBalance", balance) - .route("getStorageAt", storage_at) - .route("getTransactionCount", addr_tx_count) - .route("getCode", code_at) - .route("getBlockHeaderByHash", header_by::) - .route("getBlockHeaderByNumber", header_by::) - .route("call", call) - .route("estimateGas", estimate_gas) - .route("gasPrice", gas_price) - .route("maxPriorityFeePerGas", max_priority_fee_per_gas) - .route("feeHistory", fee_history) - .route("sendRawTransaction", send_raw_transaction) - .route("getLogs", get_logs) - .route("newFilter", new_filter) - .route("newBlockFilter", new_block_filter) - .route("uninstallFilter", uninstall_filter) - .route("getFilterChanges", get_filter_changes) - .route("getFilterLogs", get_filter_changes) - .route("subscribe", subscribe) - .route("unsubscribe", unsubscribe) - // --------------- - // - // Unsupported methods: - // + .route("getTransactionReceipt", transaction_receipt::) + .route("getBlockHeaderByHash", header_by::) + .route("getBlockHeaderByNumber", header_by::) + .route("getBalance", balance::) + .route("getStorageAt", storage_at::) + .route("getTransactionCount", addr_tx_count::) + .route("getCode", code_at::) + .route("call", call::) + .route("estimateGas", estimate_gas::) + .route("sendRawTransaction", send_raw_transaction::) + .route("getLogs", get_logs::) + .route("syncing", syncing::) + .route("gasPrice", gas_price::) + .route("maxPriorityFeePerGas", max_priority_fee_per_gas::) + .route("feeHistory", fee_history::) + .route("createAccessList", create_access_list::) + .route("newFilter", new_filter::) + .route("newBlockFilter", new_block_filter::) + .route("uninstallFilter", uninstall_filter::) + .route("getFilterChanges", get_filter_changes::) + .route("getFilterLogs", get_filter_changes::) + .route("subscribe", subscribe::) + .route("unsubscribe", unsubscribe::) + // --- + // Unsupported methods + // --- + .route("protocolVersion", not_supported) .route("coinbase", not_supported) .route("accounts", not_supported) .route("blobBaseFee", not_supported) @@ -85,6 +94,5 @@ where .route("signTransaction", not_supported) .route("signTypedData", not_supported) .route("getProof", not_supported) - .route("createAccessList", not_supported) .route("newPendingTransactionFilter", not_supported) } diff --git a/crates/rpc-storage/src/eth/types.rs b/crates/rpc/src/eth/types.rs similarity index 100% rename from crates/rpc-storage/src/eth/types.rs rename to crates/rpc/src/eth/types.rs diff --git a/crates/rpc/src/inspect/db.rs b/crates/rpc/src/inspect/db.rs deleted file mode 100644 index 73f2cde..0000000 --- a/crates/rpc/src/inspect/db.rs +++ /dev/null @@ -1,164 +0,0 @@ -use ajj::serde_json; -use eyre::WrapErr; -use reth::providers::{ProviderFactory, providers::ProviderNodeTypes}; -use reth_db::{Database, TableViewer, mdbx, table::Table}; -use reth_db_common::{DbTool, ListFilter}; -use signet_node_types::Pnt; -use std::sync::{Arc, OnceLock}; -use tracing::instrument; - -/// Modeled on the `Command` struct from `reth/crates/cli/commands/src/db/list.rs` -#[derive(Debug, serde::Deserialize)] -pub(crate) struct DbArgs( - /// The table name - String, // 0 - /// Skip first N entries - #[serde(default)] - usize, // 1 - /// How many items to take from the walker - #[serde(default)] - Option, // 2 - /// Search parameter for both keys and values. Prefix it with `0x` to search for binary data, - /// and text otherwise. - /// - /// ATTENTION! For compressed tables (`Transactions` and `Receipts`), there might be - /// missing results since the search uses the raw uncompressed value from the database. - #[serde(default)] - Option, // 3 -); - -impl DbArgs { - /// Get the table name. - pub(crate) fn table_name(&self) -> &str { - &self.0 - } - - /// Parse the table name into a [`reth_db::Tables`] enum. - pub(crate) fn table(&self) -> Result { - self.table_name().parse() - } - - /// Get the skip value. - pub(crate) const fn skip(&self) -> usize { - self.1 - } - - /// Get the length value. - pub(crate) fn len(&self) -> usize { - self.2.unwrap_or(5) - } - - /// Get the search value. - pub(crate) fn search(&self) -> Vec { - self.3 - .as_ref() - .map(|search| { - if let Some(search) = search.strip_prefix("0x") { - return alloy::primitives::hex::decode(search).unwrap(); - } - search.as_bytes().to_vec() - }) - .unwrap_or_default() - } - - /// Generate [`ListFilter`] from command. - pub(crate) fn list_filter(&self) -> ListFilter { - ListFilter { - skip: self.skip(), - len: self.len(), - search: self.search(), - min_row_size: 0, - min_key_size: 0, - min_value_size: 0, - reverse: false, - only_count: false, - } - } -} - -pub(crate) struct ListTableViewer<'a, 'b, N: Pnt> { - pub(crate) factory: &'b ProviderFactory, - pub(crate) args: &'a DbArgs, - - pub(crate) output: OnceLock>, -} - -impl<'a, 'b, N: Pnt> ListTableViewer<'a, 'b, N> { - /// Create a new `ListTableViewer`. - pub(crate) fn new(factory: &'b ProviderFactory, args: &'a DbArgs) -> Self { - Self { factory, args, output: Default::default() } - } - - /// Take the output if it has been initialized, otherwise return `None`. - pub(crate) fn take_output(self) -> Option> { - self.output.into_inner() - } -} - -impl>> TableViewer<()> - for ListTableViewer<'_, '_, N> -{ - type Error = eyre::Report; - - #[instrument(skip(self), err)] - fn view(&self) -> eyre::Result<()> { - let tool = DbTool { provider_factory: self.factory.clone() }; - - self.factory.db_ref().view(|tx| { - let table_db = - tx.inner.open_db(Some(self.args.table_name())).wrap_err("Could not open db.")?; - let stats = tx - .inner - .db_stat(table_db.dbi()) - .wrap_err(format!("Could not find table: {}", stringify!($table)))?; - let total_entries = stats.entries(); - let final_entry_idx = total_entries.saturating_sub(1); - eyre::ensure!( - self.args.skip() >= final_entry_idx, - "Skip value {} is greater than total entries {}", - self.args.skip(), - total_entries - ); - - let list_filter = self.args.list_filter(); - - let (list, _) = tool.list::(&list_filter)?; - - let json = - serde_json::value::to_raw_value(&list).wrap_err("Failed to serialize list")?; - - self.output.get_or_init(|| json); - - Ok(()) - })??; - - Ok(()) - } -} - -// Some code in this file is adapted from github.com/paradigmxyz/reth. -// -// Particularly the `reth/crates/cli/commands/src/db/list.rs` file. It is -// reproduced here under the terms of the MIT license, -// -// The MIT License (MIT) -// -// Copyright (c) 2022-2025 Reth Contributors -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. diff --git a/crates/rpc/src/inspect/endpoints.rs b/crates/rpc/src/inspect/endpoints.rs deleted file mode 100644 index e8f4c6d..0000000 --- a/crates/rpc/src/inspect/endpoints.rs +++ /dev/null @@ -1,37 +0,0 @@ -use crate::{ - inspect::db::{DbArgs, ListTableViewer}, - utils::{await_handler, response_tri}, -}; -use ajj::{HandlerCtx, ResponsePayload}; -use reth::providers::{ProviderFactory, providers::ProviderNodeTypes}; -use reth_db::mdbx; -use signet_node_types::Pnt; -use std::sync::Arc; - -/// Handler for the `db` endpoint in the `inspect` module. -pub(super) async fn db( - hctx: HandlerCtx, - args: DbArgs, - ctx: ProviderFactory, -) -> ResponsePayload, String> -where - Signet: Pnt + ProviderNodeTypes>, -{ - let task = async move { - let table: reth_db::Tables = response_tri!(args.table(), "invalid table name"); - - let viewer = ListTableViewer::new(&ctx, &args); - - response_tri!(table.view(&viewer), "Failed to view table"); - - let Some(output) = viewer.take_output() else { - return ResponsePayload::internal_error_message( - "No output generated. The task may have panicked or been cancelled. This is a bug, please report it.".into(), - ); - }; - - ResponsePayload(Ok(output)) - }; - - await_handler!(@response_option hctx.spawn_blocking(task)) -} diff --git a/crates/rpc/src/inspect/mod.rs b/crates/rpc/src/inspect/mod.rs deleted file mode 100644 index e0cb981..0000000 --- a/crates/rpc/src/inspect/mod.rs +++ /dev/null @@ -1,16 +0,0 @@ -pub(crate) mod db; - -mod endpoints; - -use reth::providers::{ProviderFactory, providers::ProviderNodeTypes}; -use reth_db::mdbx; -use signet_node_types::Pnt; -use std::sync::Arc; - -/// Instantiate the `inspect` API router. -pub fn inspect() -> ajj::Router> -where - Signet: Pnt + ProviderNodeTypes>, -{ - ajj::Router::new().route("db", endpoints::db::) -} diff --git a/crates/rpc-storage/src/interest/buffer.rs b/crates/rpc/src/interest/buffer.rs similarity index 100% rename from crates/rpc-storage/src/interest/buffer.rs rename to crates/rpc/src/interest/buffer.rs diff --git a/crates/rpc/src/interest/filters.rs b/crates/rpc/src/interest/filters.rs index df5b867..ae09367 100644 --- a/crates/rpc/src/interest/filters.rs +++ b/crates/rpc/src/interest/filters.rs @@ -1,11 +1,12 @@ -use crate::interest::InterestKind; +//! Filter management for `eth_newFilter` / `eth_getFilterChanges`. + +use crate::interest::{InterestKind, buffer::EventBuffer}; use alloy::{ primitives::{B256, U64}, - rpc::types::{Filter, Log}, + rpc::types::Filter, }; use dashmap::{DashMap, mapref::one::RefMut}; use std::{ - collections::VecDeque, sync::{ Arc, Weak, atomic::{AtomicU64, Ordering}, @@ -16,117 +17,13 @@ use tracing::trace; type FilterId = U64; -/// Either type for filter outputs. -#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize)] -#[serde(untagged)] -pub enum Either { - /// Log - Log(Log), - /// Block hash - Block(B256), -} - -/// The output of a filter. -/// -/// This will be either a list of logs or a list of block hashes. Pending tx -/// filters are not supported by Signet. For convenience, there is a special -/// variant for empty results. -#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize)] -#[serde(untagged)] -pub enum FilterOutput { - /// Empty output. Holds a `[(); 0]` to make sure it serializes as an empty - /// array. - Empty([(); 0]), - /// Logs - Log(VecDeque), - /// Block hashes - Block(VecDeque), -} - -impl FilterOutput { - /// Create an empty filter output. - pub const fn empty() -> Self { - Self::Empty([]) - } - - /// True if this is an empty filter output. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// The length of this filter output. - pub fn len(&self) -> usize { - match self { - Self::Empty(_) => 0, - Self::Log(logs) => logs.len(), - Self::Block(blocks) => blocks.len(), - } - } - - /// Extend this filter output with another. - /// - /// # Panics - /// - /// If the two filter outputs are of different types. - pub fn extend(&mut self, other: Self) { - match (self, other) { - // If we're a log, we can extend with other logs - (Self::Log(logs), Self::Log(other_logs)) => logs.extend(other_logs), - // If we're a block, we can extend with other blocks - (Self::Block(blocks), Self::Block(other_blocks)) => blocks.extend(other_blocks), - // Extending with empty is a noop - (_, Self::Empty(_)) => (), - // If we're empty, just take the other value - (this @ Self::Empty(_), other) => *this = other, - // This will occur when trying to mix log and block outputs - _ => panic!("attempted to mix log and block outputs"), - } - } - - /// Pop a value from the front of the filter output. - pub fn pop_front(&mut self) -> Option { - match self { - Self::Log(logs) => logs.pop_front().map(Either::Log), - Self::Block(blocks) => blocks.pop_front().map(Either::Block), - Self::Empty(_) => None, - } - } -} - -impl From> for FilterOutput { - fn from(block_hashes: Vec) -> Self { - Self::Block(block_hashes.into()) - } -} - -impl From> for FilterOutput { - fn from(logs: Vec) -> Self { - Self::Log(logs.into()) - } -} - -impl FromIterator for FilterOutput { - fn from_iter>(iter: T) -> Self { - let inner: VecDeque<_> = iter.into_iter().collect(); - if inner.is_empty() { Self::empty() } else { Self::Log(inner) } - } -} - -impl FromIterator for FilterOutput { - fn from_iter>(iter: T) -> Self { - let inner: VecDeque<_> = iter.into_iter().collect(); - if inner.is_empty() { Self::empty() } else { Self::Block(inner) } - } -} +/// Output of a polled filter: log entries or block hashes. +pub(crate) type FilterOutput = EventBuffer; /// An active filter. /// -/// This struct records -/// - the filter details -/// - the [`Instant`] at which the filter was last polled -/// - the first block whose contents should be considered by the filter -/// -/// These are updated via the [`Self::mark_polled`] method. +/// Records the filter details, the [`Instant`] at which the filter was last +/// polled, and the first block whose contents should be considered. #[derive(Debug, Clone, PartialEq, Eq)] pub(crate) struct ActiveFilter { next_start_block: u64, @@ -147,11 +44,6 @@ impl core::fmt::Display for ActiveFilter { } impl ActiveFilter { - /// True if this is a log filter. - pub(crate) const fn is_filter(&self) -> bool { - self.kind.is_filter() - } - /// True if this is a block filter. pub(crate) const fn is_block(&self) -> bool { self.kind.is_block() @@ -168,7 +60,7 @@ impl ActiveFilter { self.last_poll_time = Instant::now(); } - /// Get the last block for which the filter was polled. + /// Get the next start block for the filter. pub(crate) const fn next_start_block(&self) -> u64 { self.next_start_block } @@ -177,6 +69,11 @@ impl ActiveFilter { pub(crate) fn time_since_last_poll(&self) -> Duration { self.last_poll_time.elapsed() } + + /// Return an empty output of the same kind as this filter. + pub(crate) const fn empty_output(&self) -> FilterOutput { + self.kind.empty_output() + } } /// Inner logic for [`FilterManager`]. @@ -206,7 +103,6 @@ impl FilterManagerInner { fn install(&self, current_block: u64, kind: InterestKind) -> FilterId { let id = self.next_id(); let next_start_block = current_block + 1; - // discard the result, as we'll not reuse ever. let _ = self .filters .insert(id, ActiveFilter { next_start_block, last_poll_time: Instant::now(), kind }); @@ -313,7 +209,7 @@ impl FilterCleanTask { // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: -//. +// // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // diff --git a/crates/rpc/src/interest/kind.rs b/crates/rpc/src/interest/kind.rs index 3d8ecf7..f883e5a 100644 --- a/crates/rpc/src/interest/kind.rs +++ b/crates/rpc/src/interest/kind.rs @@ -1,106 +1,86 @@ -use crate::interest::{filters::FilterOutput, subs::SubscriptionBuffer}; -use alloy::{ - consensus::BlockHeader, - rpc::types::{Filter, Header, Log}, -}; -use reth::{providers::CanonStateNotification, rpc::types::FilteredParams}; +//! Filter kinds for subscriptions and polling filters. + +use crate::interest::{NewBlockNotification, filters::FilterOutput, subs::SubscriptionBuffer}; +use alloy::rpc::types::{Filter, Header, Log}; use std::collections::VecDeque; /// The different kinds of filters that can be created. /// /// Pending tx filters are not supported by Signet. #[derive(Debug, Clone, PartialEq, Eq)] -pub enum InterestKind { +pub(crate) enum InterestKind { + /// Log filter with a user-supplied [`Filter`]. Log(Box), + /// New-block filter. Block, } impl InterestKind { - /// True if this is a log filter. - pub const fn is_filter(&self) -> bool { - matches!(self, Self::Log(_)) - } - /// True if this is a block filter. - pub const fn is_block(&self) -> bool { + pub(crate) const fn is_block(&self) -> bool { matches!(self, Self::Block) } /// Fallible cast to a filter. - pub const fn as_filter(&self) -> Option<&Filter> { + pub(crate) const fn as_filter(&self) -> Option<&Filter> { match self { Self::Log(f) => Some(f), _ => None, } } - fn apply_block(&self, notif: &CanonStateNotification) -> SubscriptionBuffer { - notif - .committed() - .blocks_iter() - .map(|b| Header { - hash: b.hash(), - inner: b.clone_header(), - total_difficulty: None, - size: None, - }) - .collect() + fn apply_block(notif: &NewBlockNotification) -> SubscriptionBuffer { + let header = Header { + hash: notif.header.hash_slow(), + inner: notif.header.clone(), + total_difficulty: None, + size: None, + }; + SubscriptionBuffer::Block(VecDeque::from([header])) } - fn apply_filter(&self, notif: &CanonStateNotification) -> SubscriptionBuffer { - // NB: borrowing OUTSIDE the top-level closure prevents this value from - // being moved into the closure, which would result in the inner - // closures violating borrowing rules. + fn apply_filter(&self, notif: &NewBlockNotification) -> SubscriptionBuffer { let filter = self.as_filter().unwrap(); + let block_hash = notif.header.hash_slow(); + let block_number = notif.header.number; + let block_timestamp = notif.header.timestamp; - let address_filter = FilteredParams::address_filter(&filter.address); - let topics_filter = FilteredParams::topics_filter(&filter.topics); - - notif - .committed() - .blocks_and_receipts() - .filter(|(block, _)| { - let bloom = block.header().logs_bloom(); - FilteredParams::matches_address(bloom, &address_filter) - && FilteredParams::matches_topics(bloom, &topics_filter) + let logs: VecDeque = notif + .receipts + .iter() + .enumerate() + .flat_map(|(tx_idx, receipt)| { + let tx_hash = *notif.transactions[tx_idx].tx_hash(); + receipt.inner.logs.iter().map(move |log| (tx_idx, tx_hash, log)) }) - .flat_map(move |(block, receipts)| { - let block_num_hash = block.num_hash(); - - receipts.iter().enumerate().flat_map(move |(transaction_index, receipt)| { - let transaction_hash = *block.body().transactions[transaction_index].hash(); - - receipt.logs.iter().enumerate().filter_map(move |(log_index, log)| { - if filter.matches(log) { - Some(Log { - inner: log.clone(), - block_hash: Some(block_num_hash.hash), - block_number: Some(block_num_hash.number), - block_timestamp: Some(block.header().timestamp()), - transaction_hash: Some(transaction_hash), - transaction_index: Some(transaction_index as u64), - log_index: Some(log_index as u64), - removed: false, - }) - } else { - None - } - }) - }) + .enumerate() + .filter(|(_, (_, _, log))| filter.matches(log)) + .map(|(log_idx, (tx_idx, tx_hash, log))| Log { + inner: log.clone(), + block_hash: Some(block_hash), + block_number: Some(block_number), + block_timestamp: Some(block_timestamp), + transaction_hash: Some(tx_hash), + transaction_index: Some(tx_idx as u64), + log_index: Some(log_idx as u64), + removed: false, }) - .collect() + .collect(); + + SubscriptionBuffer::Log(logs) } - /// Apply the filter to a [`CanonStateNotification`] - pub fn filter_notification_for_sub( + /// Apply the filter to a [`NewBlockNotification`], producing a + /// subscription buffer. + pub(crate) fn filter_notification_for_sub( &self, - notif: &CanonStateNotification, + notif: &NewBlockNotification, ) -> SubscriptionBuffer { - if self.is_block() { self.apply_block(notif) } else { self.apply_filter(notif) } + if self.is_block() { Self::apply_block(notif) } else { self.apply_filter(notif) } } /// Return an empty output of the same kind as this filter. - pub const fn empty_output(&self) -> FilterOutput { + pub(crate) const fn empty_output(&self) -> FilterOutput { match self { Self::Log(_) => FilterOutput::Log(VecDeque::new()), Self::Block => FilterOutput::Block(VecDeque::new()), @@ -108,7 +88,7 @@ impl InterestKind { } /// Return an empty subscription buffer of the same kind as this filter. - pub const fn empty_sub_buffer(&self) -> SubscriptionBuffer { + pub(crate) const fn empty_sub_buffer(&self) -> SubscriptionBuffer { match self { Self::Log(_) => SubscriptionBuffer::Log(VecDeque::new()), Self::Block => SubscriptionBuffer::Block(VecDeque::new()), diff --git a/crates/rpc/src/interest/mod.rs b/crates/rpc/src/interest/mod.rs index f33044b..644d64b 100644 --- a/crates/rpc/src/interest/mod.rs +++ b/crates/rpc/src/interest/mod.rs @@ -1,8 +1,58 @@ -mod filters; -pub(crate) use filters::{ActiveFilter, FilterManager, FilterOutput}; +//! Filter and subscription management for storage-backed RPC. +//! +//! This module implements two managers that track client-registered +//! interests in chain events: +//! +//! - **[`FilterManager`]** — manages poll-based filters created via +//! `eth_newFilter` and `eth_newBlockFilter`. Clients poll with +//! `eth_getFilterChanges` to retrieve accumulated results. +//! +//! - **[`SubscriptionManager`]** — manages push-based subscriptions +//! created via `eth_subscribe`. Matching events are forwarded to +//! the client over the notification channel (WebSocket / SSE). +//! +//! # Architecture +//! +//! Both managers wrap a shared `Arc` containing a [`DashMap`] +//! that maps client-assigned IDs to their active state. This makes +//! both types cheaply clonable — cloning just increments an `Arc` +//! reference count. +//! +//! # Resource lifecycle +//! +//! Each manager spawns a **background OS thread** that periodically +//! cleans up stale entries. The cleanup threads hold a [`Weak`] +//! reference to the `Arc`, so they self-terminate once all +//! strong references are dropped. +//! +//! OS threads are used (rather than tokio tasks) because +//! [`DashMap::retain`] can deadlock if called from an async context +//! that also holds a `DashMap` read guard on the same shard. Running +//! cleanup on a dedicated OS thread ensures the retain lock is never +//! contended with an in-flight async handler. +//! +//! [`Weak`]: std::sync::Weak +//! [`DashMap`]: dashmap::DashMap +//! [`DashMap::retain`]: dashmap::DashMap::retain +mod buffer; +mod filters; +pub(crate) use filters::{FilterManager, FilterOutput}; mod kind; pub(crate) use kind::InterestKind; - mod subs; pub(crate) use subs::SubscriptionManager; + +/// Notification sent when a new block is available in storage. +/// +/// The caller constructs and sends these via a +/// [`tokio::sync::broadcast::Sender`]. +#[derive(Debug, Clone)] +pub struct NewBlockNotification { + /// The block header. + pub header: alloy::consensus::Header, + /// Transactions in the block. + pub transactions: Vec, + /// Receipts for the block. + pub receipts: Vec, +} diff --git a/crates/rpc/src/interest/subs.rs b/crates/rpc/src/interest/subs.rs index c46b749..3df1a00 100644 --- a/crates/rpc/src/interest/subs.rs +++ b/crates/rpc/src/interest/subs.rs @@ -1,15 +1,13 @@ -use crate::interest::InterestKind; +//! Subscription management for `eth_subscribe` / `eth_unsubscribe`. + +use crate::interest::{ + InterestKind, NewBlockNotification, + buffer::{EventBuffer, EventItem}, +}; use ajj::HandlerCtx; -use alloy::{primitives::U64, rpc::types::Log}; +use alloy::primitives::U64; use dashmap::DashMap; -use reth::{ - providers::{CanonStateNotifications, CanonStateSubscriptions, providers::BlockchainProvider}, - rpc::types::Header, -}; -use signet_node_types::Pnt; use std::{ - cmp::min, - collections::VecDeque, future::pending, sync::{ Arc, Weak, @@ -17,17 +15,12 @@ use std::{ }, time::Duration, }; -use tokio::sync::broadcast::error::RecvError; +use tokio::sync::broadcast::{self, error::RecvError}; use tokio_util::sync::{CancellationToken, WaitForCancellationFutureOwned}; use tracing::{Instrument, debug, debug_span, enabled, trace}; -/// Either type for subscription outputs. -#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize)] -#[serde(untagged)] -pub enum Either { - Log(Box), - Block(Box
), -} +/// Buffer for subscription outputs: log entries or block headers. +pub(crate) type SubscriptionBuffer = EventBuffer; /// JSON-RPC subscription notification envelope. #[derive(serde::Serialize)] @@ -40,79 +33,10 @@ struct SubscriptionNotification<'a> { /// Params field of a subscription notification. #[derive(serde::Serialize)] struct SubscriptionParams<'a> { - result: &'a Either, + result: &'a EventItem, subscription: U64, } -/// Buffer for subscription outputs. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum SubscriptionBuffer { - Log(VecDeque), - Block(VecDeque
), -} - -impl SubscriptionBuffer { - /// True if the buffer is empty. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Get the number of items in the buffer. - pub fn len(&self) -> usize { - match self { - Self::Log(buf) => buf.len(), - Self::Block(buf) => buf.len(), - } - } - - /// Extend this buffer with another buffer. - /// - /// # Panics - /// - /// Panics if the buffers are of different types. - pub fn extend(&mut self, other: Self) { - match (self, other) { - (Self::Log(buf), Self::Log(other)) => buf.extend(other), - (Self::Block(buf), Self::Block(other)) => buf.extend(other), - _ => panic!("mismatched buffer types"), - } - } - - /// Pop the front of the buffer. - pub fn pop_front(&mut self) -> Option { - match self { - Self::Log(buf) => buf.pop_front().map(|log| Either::Log(Box::new(log))), - Self::Block(buf) => buf.pop_front().map(|header| Either::Block(Box::new(header))), - } - } -} - -impl From> for SubscriptionBuffer { - fn from(logs: Vec) -> Self { - Self::Log(logs.into()) - } -} - -impl FromIterator for SubscriptionBuffer { - fn from_iter>(iter: T) -> Self { - let inner: VecDeque<_> = iter.into_iter().collect(); - Self::Log(inner) - } -} - -impl From> for SubscriptionBuffer { - fn from(headers: Vec
) -> Self { - Self::Block(headers.into()) - } -} - -impl FromIterator
for SubscriptionBuffer { - fn from_iter>(iter: T) -> Self { - let inner: VecDeque<_> = iter.into_iter().collect(); - Self::Block(inner) - } -} - /// Tracks ongoing subscription tasks. /// /// Performs the following functions: @@ -124,30 +48,33 @@ impl FromIterator
for SubscriptionBuffer { /// This task runs on a separate thread to avoid [`DashMap::retain`] deadlock. /// See [`DashMap`] documentation for more information. #[derive(Clone)] -pub struct SubscriptionManager { - inner: Arc>, +pub(crate) struct SubscriptionManager { + inner: Arc, } -impl SubscriptionManager { +impl SubscriptionManager { /// Instantiate a new subscription manager, start a task to clean up - /// subscriptions cancelled by user disconnection - pub fn new(provider: BlockchainProvider, clean_interval: Duration) -> Self { - let inner = Arc::new(SubscriptionManagerInner::new(provider)); + /// subscriptions cancelled by user disconnection. + pub(crate) fn new( + notif_sender: broadcast::Sender, + clean_interval: Duration, + ) -> Self { + let inner = Arc::new(SubscriptionManagerInner::new(notif_sender)); let task = SubCleanerTask::new(Arc::downgrade(&inner), clean_interval); task.spawn(); Self { inner } } } -impl core::ops::Deref for SubscriptionManager { - type Target = SubscriptionManagerInner; +impl core::ops::Deref for SubscriptionManager { + type Target = SubscriptionManagerInner; fn deref(&self) -> &Self::Target { &self.inner } } -impl core::fmt::Debug for SubscriptionManager { +impl core::fmt::Debug for SubscriptionManager { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("SubscriptionManager").finish_non_exhaustive() } @@ -155,19 +82,16 @@ impl core::fmt::Debug for SubscriptionManager { /// Inner logic for [`SubscriptionManager`]. #[derive(Debug)] -pub struct SubscriptionManagerInner -where - N: Pnt, -{ +pub(crate) struct SubscriptionManagerInner { next_id: AtomicU64, tasks: DashMap, - provider: BlockchainProvider, + notif_sender: broadcast::Sender, } -impl SubscriptionManagerInner { +impl SubscriptionManagerInner { /// Create a new subscription manager. - pub fn new(provider: BlockchainProvider) -> Self { - Self { next_id: AtomicU64::new(1), tasks: DashMap::new(), provider } + fn new(notif_sender: broadcast::Sender) -> Self { + Self { next_id: AtomicU64::new(1), tasks: DashMap::new(), notif_sender } } /// Assign a new subscription ID. @@ -176,7 +100,7 @@ impl SubscriptionManagerInner { } /// Cancel a subscription task. - pub fn unsubscribe(&self, id: U64) -> bool { + pub(crate) fn unsubscribe(&self, id: U64) -> bool { if let Some(task) = self.tasks.remove(&id) { task.1.cancel(); true @@ -187,18 +111,19 @@ impl SubscriptionManagerInner { /// Subscribe to notifications. Returns `None` if notifications are /// disabled. - pub fn subscribe(&self, ajj_ctx: &HandlerCtx, filter: InterestKind) -> Option { + pub(crate) fn subscribe(&self, ajj_ctx: &HandlerCtx, filter: InterestKind) -> Option { if !ajj_ctx.notifications_enabled() { return None; } let id = self.next_id(); let token = CancellationToken::new(); + self.tasks.insert(id, token.clone()); let task = SubscriptionTask { id, filter, token: token.clone(), - notifs: self.provider.subscribe_to_canonical_state(), + notifs: self.notif_sender.subscribe(), }; task.spawn(ajj_ctx); @@ -214,16 +139,12 @@ struct SubscriptionTask { id: U64, filter: InterestKind, token: CancellationToken, - notifs: CanonStateNotifications, + notifs: broadcast::Receiver, } impl SubscriptionTask { /// Create the task future. - pub(crate) async fn task_future( - self, - ajj_ctx: HandlerCtx, - ajj_cancel: WaitForCancellationFutureOwned, - ) { + async fn task_future(self, ajj_ctx: HandlerCtx, ajj_cancel: WaitForCancellationFutureOwned) { let SubscriptionTask { id, filter, token, mut notifs } = self; if !ajj_ctx.notifications_enabled() { @@ -245,7 +166,7 @@ impl SubscriptionTask { let permit_fut = async { if !notif_buffer.is_empty() { ajj_ctx - .permit_many(min(ajj_ctx.notification_capacity() / 2, notif_buffer.len())) + .permit_many((ajj_ctx.notification_capacity() / 2).min(notif_buffer.len())) .await } else { pending().await @@ -292,7 +213,7 @@ impl SubscriptionTask { continue; } Err(e) => { - trace!(?e, "CanonStateNotifications stream closed"); + trace!(?e, "notification stream closed"); break; } }; @@ -308,7 +229,7 @@ impl SubscriptionTask { } /// Spawn on the ajj [`HandlerCtx`]. - pub(crate) fn spawn(self, ctx: &HandlerCtx) { + fn spawn(self, ctx: &HandlerCtx) { ctx.spawn_graceful_with_ctx(|ctx, ajj_cancel| self.task_future(ctx, ajj_cancel)); } } @@ -317,29 +238,27 @@ impl SubscriptionTask { /// /// This task runs on a separate thread to avoid [`DashMap::retain`] deadlocks. #[derive(Debug)] -pub(super) struct SubCleanerTask { - inner: Weak>, - interval: std::time::Duration, +struct SubCleanerTask { + inner: Weak, + interval: Duration, } -impl SubCleanerTask { +impl SubCleanerTask { /// Create a new subscription cleaner task. - pub(super) const fn new( - inner: Weak>, - interval: std::time::Duration, - ) -> Self { + const fn new(inner: Weak, interval: Duration) -> Self { Self { inner, interval } } /// Run the task. This task runs on a separate thread, which ensures that /// [`DashMap::retain`]'s deadlock condition is not met. See [`DashMap`] /// documentation for more information. - pub(super) fn spawn(self) { + fn spawn(self) { std::thread::spawn(move || { loop { std::thread::sleep(self.interval); - if let Some(inner) = self.inner.upgrade() { - inner.tasks.retain(|_, task| !task.is_cancelled()); + match self.inner.upgrade() { + Some(inner) => inner.tasks.retain(|_, task| !task.is_cancelled()), + None => break, } } }); diff --git a/crates/rpc/src/lib.rs b/crates/rpc/src/lib.rs index ea1a5aa..b1e68d6 100644 --- a/crates/rpc/src/lib.rs +++ b/crates/rpc/src/lib.rs @@ -1,11 +1,4 @@ -//! Signet RPC. -//! -//! This crate provides RPC endpoint definitions for the Signet node, as well -//! as the glue between the node and the RPC server. This RPC server is deeply -//! integrated with `reth`, and expects a variety of `reth`-specific types to be -//! passed in. As such, it is mostly useful within the context of a `signet` -//! node. - +#![doc = include_str!("../README.md")] #![warn( missing_copy_implementations, missing_debug_implementations, @@ -18,53 +11,30 @@ #![deny(unused_must_use, rust_2018_idioms)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -mod ctx; -pub use ctx::{LoadState, RpcCtx, SignetCtx}; - -mod debug; -pub use debug::{DebugError, debug}; +pub(crate) mod config; +pub use config::{BlockTags, StorageRpcConfig, StorageRpcCtx, SyncStatus}; mod eth; -pub use eth::{CallErrorData, EthError, eth}; - -mod signet; -pub use signet::{error::SignetError, signet}; - -mod inspect; -pub use inspect::inspect; +pub use eth::EthError; mod interest; +pub use interest::NewBlockNotification; -pub mod receipts; - -mod utils; - -/// Re-exported for convenience -pub use ::ajj; +mod debug; +pub use debug::DebugError; -use ajj::Router; -use reth::providers::{ProviderFactory, providers::ProviderNodeTypes}; -use reth_db::mdbx::DatabaseEnv; -use reth_node_api::FullNodeComponents; -use signet_node_types::Pnt; -use std::sync::Arc; +mod signet; +pub use signet::error::SignetError; -/// Create a new router with the given host and signet types. -pub fn router() -> Router> +/// Instantiate a combined router with `eth`, `debug`, and `signet` +/// namespaces. +pub fn router() -> ajj::Router> where - Host: FullNodeComponents, - Signet: Pnt, + H: signet_hot::HotKv + Send + Sync + 'static, + ::Error: trevm::revm::database::DBErrorMarker, { ajj::Router::new() - .nest("eth", eth::()) - .nest("signet", signet::()) - .nest("debug", debug::()) -} - -/// Create a new hazmat router that exposes the `inspect` API. -pub fn hazmat_router() -> Router> -where - Signet: Pnt + ProviderNodeTypes>, -{ - ajj::Router::new().nest("inspect", inspect::inspect::()) + .nest("eth", eth::eth()) + .nest("debug", debug::debug()) + .nest("signet", signet::signet()) } diff --git a/crates/rpc/src/receipts.rs b/crates/rpc/src/receipts.rs deleted file mode 100644 index 6e45e95..0000000 --- a/crates/rpc/src/receipts.rs +++ /dev/null @@ -1,134 +0,0 @@ -//! Signet RPC receipt response builder. - -use alloy::{ - consensus::{ReceiptEnvelope, Transaction, TxReceipt, transaction::TransactionMeta}, - primitives::{Address, TxKind}, - rpc::types::eth::{Log, ReceiptWithBloom, TransactionReceipt}, -}; -use reth::primitives::{Receipt, TransactionSigned}; -use reth::rpc::server_types::eth::{EthApiError, EthResult}; -use reth::{core::primitives::SignerRecoverable, primitives::TxType}; -use signet_types::MagicSig; - -/// Builds an [`TransactionReceipt`] obtaining the inner receipt envelope from the given closure. -pub fn build_signet_receipt( - transaction: TransactionSigned, - meta: TransactionMeta, - receipt: Receipt, - all_receipts: Vec, -) -> EthResult>> { - // Recover the transaction sender. - // Some transactions are emitted by Signet itself in behalf of the sender, - // in which case they'll use [`MagicSig`]s to preserve the sender with additional metadata. - // Therefore, in case recovering the signer fails, we try to parse the signature as a magic signature. - let from = MagicSig::try_from_signature(transaction.signature()) - .map(|magic_sig| magic_sig.rollup_sender()) - .or_else(|| transaction.recover_signer_unchecked().ok()) - .ok_or_else(|| EthApiError::InvalidTransactionSignature)?; - - // get the previous transaction cumulative gas used - let prev_cumulative = meta - .index - .checked_sub(1) - .and_then(|i| all_receipts.get(i as usize)) - .map(|r| r.cumulative_gas_used()) - .unwrap_or_default(); - - let gas_used = receipt.cumulative_gas_used() - prev_cumulative; - - let logs_bloom = receipt.bloom(); - let receipt_status = receipt.status_or_post_state(); - let receipt_cumulative_gas_used = receipt.cumulative_gas_used(); - - // get number of logs in the block - let num_logs: u64 = - all_receipts.iter().take(meta.index as usize).map(|r| r.logs().len() as u64).sum(); - - // Retrieve all corresponding logs for the receipt. - let logs: Vec = receipt - .logs - .into_iter() - .enumerate() - .map(|(tx_log_idx, log)| Log { - inner: log, - block_hash: Some(meta.block_hash), - block_number: Some(meta.block_number), - block_timestamp: Some(meta.timestamp), - transaction_hash: Some(meta.tx_hash), - transaction_index: Some(meta.index), - log_index: Some(num_logs + tx_log_idx as u64), - removed: false, - }) - .collect(); - - let rpc_receipt = alloy::rpc::types::eth::Receipt { - status: receipt_status, - cumulative_gas_used: receipt_cumulative_gas_used, - logs, - }; - - let (contract_address, to) = match transaction.kind() { - TxKind::Create => (Some(from.create(transaction.nonce())), None), - TxKind::Call(addr) => (None, Some(Address(*addr))), - }; - - Ok(TransactionReceipt { - inner: build_envelope( - ReceiptWithBloom { receipt: rpc_receipt, logs_bloom }, - transaction.tx_type(), - ), - transaction_hash: meta.tx_hash, - transaction_index: Some(meta.index), - block_hash: Some(meta.block_hash), - block_number: Some(meta.block_number), - from, - to, - gas_used, - contract_address, - effective_gas_price: transaction.effective_gas_price(meta.base_fee), - // Signet does not support EIP-4844, so these fields are always None. - blob_gas_price: None, - blob_gas_used: None, - }) -} - -fn build_envelope( - receipt_with_bloom: ReceiptWithBloom>, - tx_type: TxType, -) -> ReceiptEnvelope { - match tx_type { - TxType::Legacy => ReceiptEnvelope::Legacy(receipt_with_bloom), - TxType::Eip2930 => ReceiptEnvelope::Eip2930(receipt_with_bloom), - TxType::Eip1559 => ReceiptEnvelope::Eip1559(receipt_with_bloom), - TxType::Eip4844 => ReceiptEnvelope::Eip4844(receipt_with_bloom), - TxType::Eip7702 => ReceiptEnvelope::Eip7702(receipt_with_bloom), - #[allow(unreachable_patterns)] - _ => unreachable!(), - } -} - -// Some code in this file has been copied and modified from reth -// -// The original license is included below: -// -// The MIT License (MIT) -// -// Copyright (c) 2022-2025 Reth Contributors -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -//. -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. diff --git a/crates/rpc/src/signet/endpoints.rs b/crates/rpc/src/signet/endpoints.rs index 10ed087..a863d65 100644 --- a/crates/rpc/src/signet/endpoints.rs +++ b/crates/rpc/src/signet/endpoints.rs @@ -1,66 +1,78 @@ +//! Signet namespace RPC endpoint implementations. + use crate::{ - ctx::RpcCtx, + config::{EvmBlockContext, StorageRpcCtx}, + eth::helpers::{CfgFiller, await_handler, response_tri}, signet::error::SignetError, - utils::{await_handler, response_tri}, }; use ajj::{HandlerCtx, ResponsePayload}; -use reth_node_api::FullNodeComponents; +use alloy::eips::BlockId; use signet_bundle::{SignetBundleDriver, SignetCallBundle, SignetCallBundleResponse}; -use signet_node_types::Pnt; +use signet_hot::{HotKv, model::HotKvRead}; use signet_types::SignedOrder; use std::time::Duration; use tokio::select; +use trevm::revm::database::DBErrorMarker; -pub(super) async fn send_order( +/// `signet_sendOrder` handler. +pub(super) async fn send_order( hctx: HandlerCtx, order: SignedOrder, - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> Result<(), String> where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let task = |hctx: HandlerCtx| async move { - let Some(tx_cache) = ctx.signet().tx_cache() else { - return Err(SignetError::TxCacheUrlNotProvided.into_string()); - }; - - hctx.spawn(async move { tx_cache.forward_order(order).await.map_err(|e| e.to_string()) }); + let Some(tx_cache) = ctx.tx_cache().cloned() else { + return Err(SignetError::TxCacheNotProvided.to_string()); + }; + let task = |hctx: HandlerCtx| async move { + hctx.spawn(async move { + if let Err(e) = tx_cache.forward_order(order).await { + tracing::warn!(error = %e, "failed to forward order"); + } + }); Ok(()) }; await_handler!(@option hctx.spawn_blocking_with_ctx(task)) } -pub(super) async fn call_bundle( +/// `signet_callBundle` handler. +pub(super) async fn call_bundle( hctx: HandlerCtx, bundle: SignetCallBundle, - ctx: RpcCtx, -) -> ResponsePayload + ctx: StorageRpcCtx, +) -> ResponsePayload where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let timeout = bundle.bundle.timeout.unwrap_or(1000); + let timeout = bundle.bundle.timeout.unwrap_or(ctx.config().default_bundle_timeout_ms); let task = async move { let id = bundle.state_block_number(); - let block_cfg = match ctx.signet().block_cfg(id.into()).await { - Ok(block_cfg) => block_cfg, - Err(e) => { - return ResponsePayload::internal_error_with_message_and_obj( - "error while loading block cfg".into(), - e.to_string(), - ); - } - }; + let block_id: BlockId = id.into(); + + let EvmBlockContext { header, db } = + response_tri!(ctx.resolve_evm_block(block_id).map_err(|e| { + tracing::warn!(error = %e, ?block_id, "block resolution failed for bundle"); + SignetError::Resolve(e.to_string()) + })); let mut driver = SignetBundleDriver::from(&bundle); - let trevm = response_tri!(ctx.trevm(id.into(), &block_cfg)); + let trevm = signet_evm::signet_evm(db, ctx.constants().clone()) + .fill_cfg(&CfgFiller(ctx.chain_id())) + .fill_block(&header); - response_tri!(trevm.drive_bundle(&mut driver).map_err(|e| e.into_error())); + response_tri!(trevm.drive_bundle(&mut driver).map_err(|e| { + let e = e.into_error(); + tracing::warn!(error = %e, "evm error during bundle simulation"); + SignetError::Evm(e.to_string()) + })); ResponsePayload(Ok(driver.into_response())) }; @@ -69,7 +81,7 @@ where select! { _ = tokio::time::sleep(Duration::from_millis(timeout)) => { ResponsePayload::internal_error_message( - "timeout during bundle simulation".into(), + SignetError::Timeout.to_string().into(), ) } result = task => { @@ -78,5 +90,5 @@ where } }; - await_handler!(@response_option hctx.spawn_blocking(task)) + await_handler!(@response_option hctx.spawn(task)) } diff --git a/crates/rpc/src/signet/error.rs b/crates/rpc/src/signet/error.rs index 2225c1d..83570ab 100644 --- a/crates/rpc/src/signet/error.rs +++ b/crates/rpc/src/signet/error.rs @@ -1,23 +1,27 @@ -//! Signet RPC errors. +//! Error types for the signet namespace. -use reth::rpc::server_types::eth::EthApiError; - -/// Errors that can occur when interacting with the `signet` namespace. -#[derive(Debug, thiserror::Error)] +/// Errors that can occur in the `signet` namespace. +#[derive(Debug, Clone, thiserror::Error)] pub enum SignetError { - /// The transaction cache URL was not provided. - #[error("transaction cache URL not provided")] - TxCacheUrlNotProvided, - /// An error coming from interacting with components - /// that could emit `EthApiError`s, such as the tx-cache. - #[error(transparent)] - EthApiError(#[from] EthApiError), + /// The transaction cache was not provided. + #[error("transaction cache not provided")] + TxCacheNotProvided, + /// Block resolution failed. + #[error("block resolution error")] + Resolve(String), + /// EVM execution error. + #[error("evm execution error")] + Evm(String), + /// Bundle simulation timed out. + #[error("timeout during bundle simulation")] + Timeout, } -impl SignetError { - /// Turn into a string by value, allows for `.map_err(SignetError::to_string)` - /// to be used. - pub fn into_string(self) -> String { - ToString::to_string(&self) +impl serde::Serialize for SignetError { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(&self.to_string()) } } diff --git a/crates/rpc/src/signet/mod.rs b/crates/rpc/src/signet/mod.rs index c882000..ad3fdd7 100644 --- a/crates/rpc/src/signet/mod.rs +++ b/crates/rpc/src/signet/mod.rs @@ -1,19 +1,18 @@ //! Signet RPC methods and related code. mod endpoints; -use endpoints::*; - +use endpoints::{call_bundle, send_order}; pub(crate) mod error; -use crate::ctx::RpcCtx; -use reth_node_api::FullNodeComponents; -use signet_node_types::Pnt; +use crate::config::StorageRpcCtx; +use signet_hot::{HotKv, model::HotKvRead}; +use trevm::revm::database::DBErrorMarker; -/// Instantiate a `signet` API router. -pub fn signet() -> ajj::Router> +/// Instantiate a `signet` API router backed by storage. +pub(crate) fn signet() -> ajj::Router> where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - ajj::Router::new().route("sendOrder", send_order).route("callBundle", call_bundle) + ajj::Router::new().route("sendOrder", send_order::).route("callBundle", call_bundle::) } diff --git a/crates/rpc/src/utils.rs b/crates/rpc/src/utils.rs deleted file mode 100644 index 0092d8e..0000000 --- a/crates/rpc/src/utils.rs +++ /dev/null @@ -1,105 +0,0 @@ -use std::{iter::StepBy, ops::RangeInclusive}; - -macro_rules! await_handler { - ($h:expr) => { - match $h.await { - Ok(res) => res, - Err(_) => return Err("task panicked or cancelled".to_string()), - } - }; - - (@option $h:expr) => { - match $h.await { - Ok(Some(res)) => res, - _ => return Err("task panicked or cancelled".to_string()), - } - }; - - (@response $h:expr) => { - match $h.await { - Ok(res) => res, - _ => { - return ResponsePayload::internal_error_message(std::borrow::Cow::Borrowed( - "task panicked or cancelled", - )) - } - } - }; - - (@response_option $h:expr) => { - match $h.await { - Ok(Some(res)) => res, - _ => { - return ResponsePayload::internal_error_message(std::borrow::Cow::Borrowed( - "task panicked or cancelled", - )) - } - } - }; -} - -pub(crate) use await_handler; - -macro_rules! response_tri { - ($h:expr) => { - match $h { - Ok(res) => res, - Err(err) => return ResponsePayload::internal_error_message(err.to_string().into()), - } - }; - - ($h:expr, $msg:literal) => { - match $h { - Ok(res) => res, - Err(_) => return ResponsePayload::internal_error_message($msg.into()), - } - }; - - ($h:expr, $obj:expr) => { - match $h { - Ok(res) => res, - Err(err) => returnResponsePayload::internal_error_with_message_and_obj( - err.to_string().into(), - $obj, - ), - } - }; - - ($h:expr, $msg:literal, $obj:expr) => { - match $h { - Ok(res) => res, - Err(err) => { - return ResponsePayload::internal_error_with_message_and_obj($msg.into(), $obj) - } - } - }; -} - -pub(crate) use response_tri; - -/// An iterator that yields _inclusive_ block ranges of a given step size -#[derive(Debug)] -pub(crate) struct BlockRangeInclusiveIter { - iter: StepBy>, - step: u64, - end: u64, -} - -impl BlockRangeInclusiveIter { - pub(crate) fn new(range: RangeInclusive, step: u64) -> Self { - Self { end: *range.end(), iter: range.step_by(step as usize + 1), step } - } -} - -impl Iterator for BlockRangeInclusiveIter { - type Item = (u64, u64); - - fn next(&mut self) -> Option { - let start = self.iter.next()?; - let end = (start + self.step).min(self.end); - if start > end { - return None; - } - Some((start, end)) - } -} diff --git a/crates/rpc-storage/tests/eth_rpc.rs b/crates/rpc/tests/eth_rpc.rs similarity index 97% rename from crates/rpc-storage/tests/eth_rpc.rs rename to crates/rpc/tests/eth_rpc.rs index 24d7d8b..09675b8 100644 --- a/crates/rpc-storage/tests/eth_rpc.rs +++ b/crates/rpc/tests/eth_rpc.rs @@ -1,4 +1,4 @@ -//! Integration tests for the `signet-rpc-storage` ETH RPC endpoints. +//! Integration tests for the `signet-rpc` ETH RPC endpoints. //! //! Tests exercise the public router API via the axum service layer, using //! in-memory storage backends (`MemKv` + `MemColdBackend`). @@ -9,6 +9,7 @@ use alloy::{ TxType, transaction::Recovered, }, primitives::{Address, B256, Log as PrimitiveLog, LogData, TxKind, U256, address, logs_bloom}, + signers::{SignerSync, local::PrivateKeySigner}, }; use axum::body::Body; use http::Request; @@ -16,12 +17,14 @@ use serde_json::{Value, json}; use signet_cold::{BlockData, ColdStorageHandle, ColdStorageTask, mem::MemColdBackend}; use signet_constants::SignetSystemConstants; use signet_hot::{HotKv, db::UnsafeDbWrite, mem::MemKv}; -use signet_rpc_storage::{BlockTags, NewBlockNotification, StorageRpcConfig, StorageRpcCtx}; +use signet_rpc::{BlockTags, NewBlockNotification, StorageRpcConfig, StorageRpcCtx}; use signet_storage::UnifiedStorage; -use signet_storage_types::{Receipt, SealedHeader}; +use signet_storage_types::{Account, Receipt, SealedHeader}; +use std::sync::Arc; use tokio::sync::broadcast; use tokio_util::sync::CancellationToken; use tower::ServiceExt; +use trevm::revm::bytecode::Bytecode; // --------------------------------------------------------------------------- // Test helpers @@ -51,14 +54,14 @@ impl TestHarness { let tags = BlockTags::new(latest, latest.saturating_sub(2), 0); let (notif_tx, _) = broadcast::channel::(16); let ctx = StorageRpcCtx::new( - storage, + Arc::new(storage), constants, tags.clone(), None, StorageRpcConfig::default(), notif_tx.clone(), ); - let app = signet_rpc_storage::router::().into_axum("/").with_state(ctx.clone()); + let app = signet_rpc::router::().into_axum("/").with_state(ctx.clone()); Self { app, cold, hot, tags, notif_tx, ctx, _cancel: cancel } } @@ -136,8 +139,6 @@ fn make_signed_tx_with_gas_price( nonce: u64, gas_price: u128, ) -> (signet_storage_types::RecoveredTx, Address) { - use alloy::signers::{SignerSync, local::PrivateKeySigner}; - let signer = PrivateKeySigner::from_signing_key( alloy::signers::k256::ecdsa::SigningKey::from_slice( &B256::repeat_byte((nonce as u8).wrapping_add(1)).0, @@ -410,9 +411,6 @@ async fn test_get_block_receipts() { /// Populate hot storage with a test account. fn setup_hot_account(hot: &MemKv) { - use signet_storage_types::Account; - use trevm::revm::bytecode::Bytecode; - let writer = hot.writer().unwrap(); let code = alloy::primitives::Bytes::from_static(&[0x60, 0x00, 0x60, 0x00, 0xf3]); @@ -621,7 +619,7 @@ async fn test_syncing_not_syncing() { #[tokio::test] async fn test_syncing_in_progress() { let h = TestHarness::new(0).await; - h.tags.set_sync_status(signet_rpc_storage::SyncStatus { + h.tags.set_sync_status(signet_rpc::SyncStatus { starting_block: 0, current_block: 50, highest_block: 100, @@ -685,8 +683,8 @@ async fn test_gas_price_empty_blocks() { let result = rpc_call(&h.app, "eth_gasPrice", json!([])).await; - // No txs means tip = 0, gasPrice = base_fee = 1e9 = 0x3b9aca00 - assert_eq!(result, json!("0x3b9aca00")); + // No txs → tip defaults to 1 Gwei, gasPrice = base_fee + tip = 2 Gwei + assert_eq!(result, json!("0x77359400")); } #[tokio::test] @@ -817,8 +815,6 @@ async fn test_uninstall_filter() { /// The genesis header at block 0 is required so `revm_reader_at_height` /// can validate height bounds. Without it, MemKv returns `NoBlocks`. fn setup_hot_for_evm(hot: &MemKv, addr: Address, balance: U256) { - use signet_storage_types::{Account, SealedHeader}; - let writer = hot.writer().unwrap(); // Write a genesis header so the hot storage tracks block 0. From 61043ac973f75043da3bc476ad7f08ef759c14c7 Mon Sep 17 00:00:00 2001 From: James Date: Mon, 16 Feb 2026 08:12:44 -0500 Subject: [PATCH 31/31] refactor: address remaining PR #75 review threads - Remove `not_supported` pattern; unregistered methods return `method_not_found` by default in ajj (threads 14, 15) - Simplify `SubscriptionNotification`: hardcode `jsonrpc`/`method` in manual `Serialize` impl instead of storing as fields (threads 18, 19) - Change `send_order` return type to `ResponsePayload<(), SignetError>` and add fire-and-forget doc comment (threads 5, 7) - Use `ctx.resolve_header()` (hot storage) instead of `cold.get_header_by_number()` in debug endpoints (threads 12, 13) - Replace `DebugError::Cold(String)` / `Hot(String)` with concrete `#[from]` error types; remove `Clone` derive and `into_string` (thread 10) - Parallelize gas oracle transaction reads with `JoinSet` (thread 21) - Add `tokio-stream` and `tokio-util` to workspace deps (thread 22) - Add blob gas vec compatibility comment (thread 23) - Add `State` / `DatabaseCommit` documentation (thread 20) - Update README with namespace overview and unsupported methods (thread 16) - Add blank lines in lib.rs between module groups (thread 2) Co-Authored-By: Claude Opus 4.6 --- Cargo.toml | 2 ++ crates/rpc/Cargo.toml | 6 ++-- crates/rpc/README.md | 53 ++++++++++++++++++++++----- crates/rpc/src/config/ctx.rs | 17 +++++++-- crates/rpc/src/config/gas_oracle.rs | 28 +++++++++++---- crates/rpc/src/debug/endpoints.rs | 56 ++++++++++++++--------------- crates/rpc/src/debug/error.rs | 13 ++----- crates/rpc/src/eth/endpoints.rs | 9 +---- crates/rpc/src/eth/mod.rs | 27 +++++--------- crates/rpc/src/interest/subs.rs | 23 +++++++++--- crates/rpc/src/signet/endpoints.rs | 12 ++++--- 11 files changed, 150 insertions(+), 96 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index af7af2e..c404c5b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -87,6 +87,8 @@ foundry-blob-explorers = "0.17" # Async tokio = { version = "1.43.0", features = ["macros"] } +tokio-stream = "0.1" +tokio-util = "0.7" # Misc eyre = "0.6.12" diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index fdf1910..23c35ba 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -22,8 +22,8 @@ signet-bundle.workspace = true alloy.workspace = true ajj.workspace = true tokio.workspace = true -tokio-stream = "0.1" -tokio-util = "0.7" +tokio-stream.workspace = true +tokio-util.workspace = true tracing.workspace = true thiserror.workspace = true serde.workspace = true @@ -33,7 +33,7 @@ itertools.workspace = true [dev-dependencies] tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } -tokio-util = "0.7" +tokio-util.workspace = true signet-cold = { workspace = true, features = ["test-utils"] } signet-hot = { workspace = true, features = ["test-utils"] } signet-storage.workspace = true diff --git a/crates/rpc/README.md b/crates/rpc/README.md index 1046e59..ff0eede 100644 --- a/crates/rpc/README.md +++ b/crates/rpc/README.md @@ -2,14 +2,51 @@ Ethereum JSON-RPC server backed by `signet-storage`'s unified storage backend. -This crate provides a standalone ETH RPC implementation that uses hot storage +This crate provides a standalone RPC implementation that uses hot storage for state queries and cold storage for block, transaction, and receipt data. -## Supported Methods +## Namespaces -- Block queries: `eth_blockNumber`, `eth_getBlockByHash`, `eth_getBlockByNumber`, etc. -- Transaction queries: `eth_getTransactionByHash`, `eth_getTransactionReceipt`, etc. -- Account state: `eth_getBalance`, `eth_getStorageAt`, `eth_getCode`, `eth_getTransactionCount` -- EVM execution: `eth_call`, `eth_estimateGas` -- Logs: `eth_getLogs` -- Transaction submission: `eth_sendRawTransaction` (optional, via `TxCache`) +### `eth` + +Standard Ethereum JSON-RPC methods: + +- Block queries: `blockNumber`, `getBlockByHash`, `getBlockByNumber`, + `getBlockTransactionCount*`, `getBlockReceipts`, `getBlockHeader*` +- Transaction queries: `getTransactionByHash`, `getTransactionReceipt`, + `getTransactionByBlock*AndIndex`, `getRawTransaction*` +- Account state: `getBalance`, `getStorageAt`, `getCode`, `getTransactionCount` +- EVM execution: `call`, `estimateGas`, `createAccessList` +- Gas/fees: `gasPrice`, `maxPriorityFeePerGas`, `feeHistory` +- Logs & filters: `getLogs`, `newFilter`, `newBlockFilter`, + `getFilterChanges`, `getFilterLogs`, `uninstallFilter` +- Subscriptions: `subscribe`, `unsubscribe` +- Transaction submission: `sendRawTransaction` (optional, via `TxCache`) +- Uncle queries: `getUncleCountByBlock*`, `getUncleByBlock*AndIndex` + (always return 0 / null — Signet has no uncle blocks) +- Misc: `chainId`, `syncing` + +### `debug` + +- `traceBlockByNumber`, `traceBlockByHash` — trace all transactions in a block +- `traceTransaction` — trace a single transaction by hash + +### `signet` + +- `sendOrder` — forward a signed order to the transaction cache +- `callBundle` — simulate a bundle against a specific block + +## Unsupported Methods + +The following `eth` methods are **not supported** and return +`method_not_found`: + +- **Mining**: `getWork`, `hashrate`, `mining`, `submitHashrate`, `submitWork` + — Signet does not use proof-of-work. +- **Account management**: `accounts`, `sign`, `signTransaction`, + `signTypedData`, `sendTransaction` — the RPC server does not hold keys. + Use `sendRawTransaction` with a pre-signed transaction instead. +- **Blob transactions**: `blobBaseFee` — Signet does not support EIP-4844 + blob transactions. +- **Other**: `protocolVersion`, `getProof`, `newPendingTransactionFilter`, + `coinbase`. diff --git a/crates/rpc/src/config/ctx.rs b/crates/rpc/src/config/ctx.rs index ac6c064..46f31a6 100644 --- a/crates/rpc/src/config/ctx.rs +++ b/crates/rpc/src/config/ctx.rs @@ -30,7 +30,13 @@ use trevm::revm::database::{DBErrorMarker, StateBuilder}; pub(crate) struct EvmBlockContext { /// The resolved block header. pub header: alloy::consensus::Header, - /// The revm database at the resolved height. + /// The revm database at the resolved height, wrapped in [`State`] to + /// provide [`DatabaseCommit`] support required by `eth_estimateGas` + /// (binary-search commits), `eth_createAccessList`, `signet_callBundle`, + /// and debug tracing. + /// + /// [`State`]: trevm::revm::database::State + /// [`DatabaseCommit`]: trevm::revm::database::DatabaseCommit pub db: trevm::revm::database::State, } @@ -225,8 +231,13 @@ impl StorageRpcCtx { /// Create a revm-compatible database at a specific block height. /// - /// The returned `State>` implements both `Database` and - /// `DatabaseCommit`, making it suitable for use with `signet_evm`. + /// Wraps the underlying `RevmRead` in [`State`] so that the returned + /// database implements both `Database` and `DatabaseCommit`. This is + /// needed by EVM operations that mutate intermediate state (gas + /// estimation, access-list generation, bundle simulation, debug + /// tracing). + /// + /// [`State`]: trevm::revm::database::State pub fn revm_state_at_height( &self, height: u64, diff --git a/crates/rpc/src/config/gas_oracle.rs b/crates/rpc/src/config/gas_oracle.rs index 787c64c..76b1872 100644 --- a/crates/rpc/src/config/gas_oracle.rs +++ b/crates/rpc/src/config/gas_oracle.rs @@ -31,15 +31,29 @@ pub(crate) async fn suggest_tip_cap( let specs: Vec<_> = (start..=latest).map(HeaderSpecifier::Number).collect(); let headers = cold.get_headers(specs).await?; - let mut all_tips: Vec = Vec::new(); - - for (offset, maybe_header) in headers.into_iter().enumerate() { - let Some(header) = maybe_header else { continue }; - let base_fee = header.base_fee_per_gas.unwrap_or_default(); - let block_num = start + offset as u64; + // Collect blocks that have headers, then read their transactions + // in parallel to avoid sequential cold-storage round-trips. + let blocks_with_fees: Vec<_> = headers + .into_iter() + .enumerate() + .filter_map(|(offset, h)| { + h.map(|header| (start + offset as u64, header.base_fee_per_gas.unwrap_or_default())) + }) + .collect(); - let txs = cold.get_transactions_in_block(block_num).await?; + let mut join_set = tokio::task::JoinSet::new(); + for (block_num, base_fee) in &blocks_with_fees { + let cold = cold.clone(); + let block_num = *block_num; + let base_fee = *base_fee; + join_set.spawn(async move { + cold.get_transactions_in_block(block_num).await.map(|txs| (txs, base_fee)) + }); + } + let mut all_tips: Vec = Vec::new(); + while let Some(result) = join_set.join_next().await { + let (txs, base_fee) = result.expect("tx read task panicked")?; for tx in &txs { if let Some(tip) = tx.effective_tip_per_gas(base_fee) && config.ignore_price.is_none_or(|floor| tip >= floor) diff --git a/crates/rpc/src/debug/endpoints.rs b/crates/rpc/src/debug/endpoints.rs index 882e793..de6aa62 100644 --- a/crates/rpc/src/debug/endpoints.rs +++ b/crates/rpc/src/debug/endpoints.rs @@ -49,25 +49,25 @@ where DebugError::BlockNotFound(id) })); - let (header, txs) = response_tri!( - tokio::try_join!( - cold.get_header_by_number(block_num), - cold.get_transactions_in_block(block_num), - ) - .map_err(|e| { - tracing::warn!(error = %e, block_num, "cold storage read failed"); - DebugError::Cold(e.to_string()) - }) - ); - - let Some(header) = header else { + let sealed = + response_tri!(ctx.resolve_header(BlockId::Number(block_num.into())).map_err(|e| { + tracing::warn!(error = %e, block_num, "header resolution failed"); + DebugError::BlockNotFound(id) + })); + + let Some(sealed) = sealed else { return ResponsePayload::internal_error_message( format!("block not found: {id}").into(), ); }; - let block_hash = header.hash(); - let header = header.into_inner(); + let block_hash = sealed.hash(); + let header = sealed.into_inner(); + + let txs = response_tri!(cold.get_transactions_in_block(block_num).await.map_err(|e| { + tracing::warn!(error = %e, block_num, "cold storage read failed"); + DebugError::from(e) + })); tracing::debug!(number = header.number, "Loaded block"); @@ -77,7 +77,7 @@ where let db = response_tri!(ctx.revm_state_at_height(header.number.saturating_sub(1)).map_err(|e| { tracing::warn!(error = %e, block_num, "hot storage read failed"); - DebugError::Hot(e.to_string()) + DebugError::from(e) })); let mut trevm = signet_evm::signet_evm(db, ctx.constants().clone()) @@ -135,7 +135,7 @@ where // Look up the transaction and its containing block. let confirmed = response_tri!(cold.get_tx_by_hash(tx_hash).await.map_err(|e| { tracing::warn!(error = %e, %tx_hash, "cold storage read failed"); - DebugError::Cold(e.to_string()) + DebugError::from(e) })); let confirmed = response_tri!(confirmed.ok_or(DebugError::TransactionNotFound)); @@ -144,19 +144,17 @@ where let block_num = meta.block_number(); let block_hash = meta.block_hash(); - let (header, txs) = response_tri!( - tokio::try_join!( - cold.get_header_by_number(block_num), - cold.get_transactions_in_block(block_num), - ) - .map_err(|e| { - tracing::warn!(error = %e, block_num, "cold storage read failed"); - DebugError::Cold(e.to_string()) - }) - ); - let block_id = BlockId::Number(block_num.into()); - let header = response_tri!(header.ok_or(DebugError::BlockNotFound(block_id))).into_inner(); + let sealed = response_tri!(ctx.resolve_header(block_id).map_err(|e| { + tracing::warn!(error = %e, block_num, "header resolution failed"); + DebugError::BlockNotFound(block_id) + })); + let header = response_tri!(sealed.ok_or(DebugError::BlockNotFound(block_id))).into_inner(); + + let txs = response_tri!(cold.get_transactions_in_block(block_num).await.map_err(|e| { + tracing::warn!(error = %e, block_num, "cold storage read failed"); + DebugError::from(e) + })); tracing::debug!(number = block_num, "Loaded containing block"); @@ -164,7 +162,7 @@ where let db = response_tri!(ctx.revm_state_at_height(block_num.saturating_sub(1)).map_err(|e| { tracing::warn!(error = %e, block_num, "hot storage read failed"); - DebugError::Hot(e.to_string()) + DebugError::from(e) })); let mut trevm = signet_evm::signet_evm(db, ctx.constants().clone()) diff --git a/crates/rpc/src/debug/error.rs b/crates/rpc/src/debug/error.rs index a078034..6b8a8cd 100644 --- a/crates/rpc/src/debug/error.rs +++ b/crates/rpc/src/debug/error.rs @@ -8,14 +8,14 @@ use alloy::eips::BlockId; /// API responses — internal storage details are not exposed to callers. /// Use [`tracing`] to log the full error chain before constructing the /// variant. -#[derive(Debug, Clone, thiserror::Error)] +#[derive(Debug, thiserror::Error)] pub enum DebugError { /// Cold storage error. #[error("cold storage error")] - Cold(String), + Cold(#[from] signet_cold::ColdStorageError), /// Hot storage error. #[error("hot storage error")] - Hot(String), + Hot(#[from] signet_storage::StorageError), /// Invalid tracer configuration. #[error("invalid tracer config")] InvalidTracerConfig, @@ -33,13 +33,6 @@ pub enum DebugError { TransactionNotFound, } -impl DebugError { - /// Convert to a string by value. - pub fn into_string(self) -> String { - self.to_string() - } -} - impl serde::Serialize for DebugError { fn serialize(&self, serializer: S) -> Result where diff --git a/crates/rpc/src/eth/endpoints.rs b/crates/rpc/src/eth/endpoints.rs index 1cf1078..e54fd9d 100644 --- a/crates/rpc/src/eth/endpoints.rs +++ b/crates/rpc/src/eth/endpoints.rs @@ -37,14 +37,6 @@ use trevm::{ EstimationResult, revm::context::result::ExecutionResult, revm::database::DBErrorMarker, }; -// --------------------------------------------------------------------------- -// Not Supported -// --------------------------------------------------------------------------- - -pub(crate) async fn not_supported() -> ResponsePayload<(), ()> { - ResponsePayload::method_not_found() -} - /// Response for `eth_syncing`. /// /// Returns `false` when the node is fully synced, or a sync-status @@ -245,6 +237,7 @@ where ); } + // NB: Signet has no blob transactions; zero-filled for client compatibility. let base_fee_per_blob_gas = vec![0; base_fee_per_gas.len()]; let blob_gas_used_ratio = vec![0.; gas_used_ratio.len()]; diff --git a/crates/rpc/src/eth/mod.rs b/crates/rpc/src/eth/mod.rs index 2c51abb..4e1309a 100644 --- a/crates/rpc/src/eth/mod.rs +++ b/crates/rpc/src/eth/mod.rs @@ -4,7 +4,7 @@ mod endpoints; use endpoints::{ addr_tx_count, balance, block, block_number, block_receipts, block_tx_count, call, chain_id, code_at, create_access_list, estimate_gas, fee_history, gas_price, get_filter_changes, - get_logs, header_by, max_priority_fee_per_gas, new_block_filter, new_filter, not_supported, + get_logs, header_by, max_priority_fee_per_gas, new_block_filter, new_filter, raw_transaction_by_block_and_index, raw_transaction_by_hash, send_raw_transaction, storage_at, subscribe, syncing, transaction_by_block_and_index, transaction_by_hash, transaction_receipt, uncle_block, uncle_count, uninstall_filter, unsubscribe, @@ -73,26 +73,15 @@ where .route("getFilterLogs", get_filter_changes::) .route("subscribe", subscribe::) .route("unsubscribe", unsubscribe::) - // --- - // Unsupported methods - // --- - .route("protocolVersion", not_supported) - .route("coinbase", not_supported) - .route("accounts", not_supported) - .route("blobBaseFee", not_supported) + // Uncle queries return semantically correct values (0 / null) + // because Signet has no uncle blocks. .route("getUncleCountByBlockHash", uncle_count) .route("getUncleCountByBlockNumber", uncle_count) .route("getUncleByBlockHashAndIndex", uncle_block) .route("getUncleByBlockNumberAndIndex", uncle_block) - .route("getWork", not_supported) - .route("hashrate", not_supported) - .route("mining", not_supported) - .route("submitHashrate", not_supported) - .route("submitWork", not_supported) - .route("sendTransaction", not_supported) - .route("sign", not_supported) - .route("signTransaction", not_supported) - .route("signTypedData", not_supported) - .route("getProof", not_supported) - .route("newPendingTransactionFilter", not_supported) + // Unsupported methods (return method_not_found by default): + // - protocolVersion, coinbase, accounts, blobBaseFee + // - getWork, hashrate, mining, submitHashrate, submitWork + // - sendTransaction, sign, signTransaction, signTypedData + // - getProof, newPendingTransactionFilter } diff --git a/crates/rpc/src/interest/subs.rs b/crates/rpc/src/interest/subs.rs index 3df1a00..9c1bada 100644 --- a/crates/rpc/src/interest/subs.rs +++ b/crates/rpc/src/interest/subs.rs @@ -23,13 +23,28 @@ use tracing::{Instrument, debug, debug_span, enabled, trace}; pub(crate) type SubscriptionBuffer = EventBuffer; /// JSON-RPC subscription notification envelope. -#[derive(serde::Serialize)] +/// +/// The `jsonrpc` and `method` fields are always `"2.0"` and +/// `"eth_subscription"` respectively, so they are hardcoded in the +/// [`serde::Serialize`] impl rather than stored as struct fields. struct SubscriptionNotification<'a> { - jsonrpc: &'static str, - method: &'static str, params: SubscriptionParams<'a>, } +impl serde::Serialize for SubscriptionNotification<'_> { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut s = serializer.serialize_struct("SubscriptionNotification", 3)?; + s.serialize_field("jsonrpc", "2.0")?; + s.serialize_field("method", "eth_subscription")?; + s.serialize_field("params", &self.params)?; + s.end() + } +} + /// Params field of a subscription notification. #[derive(serde::Serialize)] struct SubscriptionParams<'a> { @@ -198,8 +213,6 @@ impl SubscriptionTask { for permit in permits { let Some(item) = notif_buffer.pop_front() else { break }; let notification = SubscriptionNotification { - jsonrpc: "2.0", - method: "eth_subscription", params: SubscriptionParams { result: &item, subscription: id }, }; let _ = permit.send(¬ification); diff --git a/crates/rpc/src/signet/endpoints.rs b/crates/rpc/src/signet/endpoints.rs index a863d65..b82280d 100644 --- a/crates/rpc/src/signet/endpoints.rs +++ b/crates/rpc/src/signet/endpoints.rs @@ -15,17 +15,21 @@ use tokio::select; use trevm::revm::database::DBErrorMarker; /// `signet_sendOrder` handler. +/// +/// Forwards the order to the transaction cache asynchronously. The +/// response is returned immediately — forwarding errors are logged +/// but not propagated to the caller (fire-and-forget). pub(super) async fn send_order( hctx: HandlerCtx, order: SignedOrder, ctx: StorageRpcCtx, -) -> Result<(), String> +) -> ResponsePayload<(), SignetError> where H: HotKv + Send + Sync + 'static, ::Error: DBErrorMarker, { let Some(tx_cache) = ctx.tx_cache().cloned() else { - return Err(SignetError::TxCacheNotProvided.to_string()); + return ResponsePayload(Err(SignetError::TxCacheNotProvided.into())); }; let task = |hctx: HandlerCtx| async move { @@ -34,10 +38,10 @@ where tracing::warn!(error = %e, "failed to forward order"); } }); - Ok(()) + ResponsePayload(Ok(())) }; - await_handler!(@option hctx.spawn_blocking_with_ctx(task)) + await_handler!(@response_option hctx.spawn_blocking_with_ctx(task)) } /// `signet_callBundle` handler.