diff --git a/Cargo.toml b/Cargo.toml index e98d70b..c404c5b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,12 +36,10 @@ incremental = false [workspace.dependencies] signet-blobber = { version = "0.16.0-rc.7", path = "crates/blobber" } signet-block-processor = { version = "0.16.0-rc.7", path = "crates/block-processor" } -signet-db = { version = "0.16.0-rc.7", path = "crates/db" } signet-genesis = { version = "0.16.0-rc.7", path = "crates/genesis" } signet-node = { version = "0.16.0-rc.7", path = "crates/node" } signet-node-config = { version = "0.16.0-rc.7", path = "crates/node-config" } signet-node-tests = { version = "0.16.0-rc.7", path = "crates/node-tests" } -signet-node-types = { version = "0.16.0-rc.7", path = "crates/node-types" } signet-rpc = { version = "0.16.0-rc.7", path = "crates/rpc" } init4-bin-base = { version = "0.18.0-rc.8", features = ["alloy"] } @@ -55,9 +53,13 @@ signet-tx-cache = "0.16.0-rc.8" signet-types = "0.16.0-rc.8" signet-zenith = "0.16.0-rc.8" signet-journal = "0.16.0-rc.8" +signet-storage = "0.6.2" +signet-cold = "0.6.2" +signet-hot = "0.6.2" +signet-storage-types = "0.6.2" # ajj -ajj = { version = "0.3.4" } +ajj = { version = "0.6.0" } # trevm trevm = { version = "0.34.0", features = ["full_env_cfg"] } @@ -71,28 +73,13 @@ alloy = { version = "1.4.0", features = [ "genesis", "arbitrary", ] } -alloy-contract = { version = "1.4.0", features = ["pubsub"] } # Reth reth = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } reth-chainspec = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-codecs = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-db-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-db-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-eth-wire-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-evm-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } reth-exex = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } reth-exex-test-utils = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-libmdbx = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-network-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-network-peers = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } reth-node-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-prune-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-rpc-eth-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } -reth-stages-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } reth-transaction-pool = { git = "https://github.com/paradigmxyz/reth", tag = "v1.10.2" } # Foundry periphery @@ -100,14 +87,10 @@ foundry-blob-explorers = "0.17" # Async tokio = { version = "1.43.0", features = ["macros"] } -async-trait = "0.1.87" - -# Pinned for compatibility with reth -parking_lot = "0.12" +tokio-stream = "0.1" +tokio-util = "0.7" # Misc -chrono = "0.4.38" -clap = "4" eyre = "0.6.12" futures-util = "0.3.31" hex = { package = "const-hex", version = "1.10", default-features = false, features = [ @@ -116,22 +99,23 @@ hex = { package = "const-hex", version = "1.10", default-features = false, featu itertools = "0.14.0" metrics = "0.24.2" openssl = { version = "0.10", features = ["vendored"] } -proptest = "1.6.0" reqwest = "0.12.9" serde = { version = "1.0.217", features = ["derive"] } serde_json = "1.0.137" -smallvec = "1.15.1" tracing = "0.1.41" tracing-subscriber = "0.3.19" thiserror = "2.0.12" url = "2.5.4" -uuid = "1.16.0" # Test Utils -alloy-rlp = "0.3.11" tempfile = "3.17.0" # [patch.crates-io] +# signet-cold = { path = "../storage/crates/cold" } +# signet-hot = { path = "../storage/crates/hot" } +# signet-storage = { path = "../storage/crates/storage" } +# signet-storage-types = { path = "../storage/crates/types" } + # signet-bundle = { path = "../sdk/crates/bundle"} # signet-constants = { path = "../sdk/crates/constants"} # signet-evm = { path = "../sdk/crates/evm"} diff --git a/crates/block-processor/Cargo.toml b/crates/block-processor/Cargo.toml index f1ac842..ce5eae9 100644 --- a/crates/block-processor/Cargo.toml +++ b/crates/block-processor/Cargo.toml @@ -13,7 +13,6 @@ repository.workspace = true signet-constants.workspace = true signet-evm.workspace = true signet-extract.workspace = true -signet-journal.workspace = true signet-types.workspace = true init4-bin-base.workspace = true @@ -21,14 +20,12 @@ init4-bin-base.workspace = true trevm.workspace = true signet-blobber.workspace = true -signet-db.workspace = true -signet-node-types.workspace = true +signet-hot.workspace = true +signet-storage-types.workspace = true alloy.workspace = true reth.workspace = true -reth-exex.workspace = true -reth-node-api.workspace = true reth-chainspec.workspace = true eyre.workspace = true diff --git a/crates/block-processor/README.md b/crates/block-processor/README.md index c239a6d..6a49851 100644 --- a/crates/block-processor/README.md +++ b/crates/block-processor/README.md @@ -1,13 +1,11 @@ # Signet Block Processor -Block processing logic for the Signet Node. This crate takes a reth `Chain`, -runs the Signet EVM, and commits the results to a database. +Block processing logic for the Signet Node. This crate extracts and processes +Signet blocks from host chain commits using the EVM, reading rollup state from +hot storage. # Significant Types -- A few convenience type aliases: - - `PrimitivesOf` - The primitives type used by the host. - - `Chain` - A reth `Chain` using the host's primitives. - - `ExExNotification` - A reth `ExExNotification` using the host's - primitives. -- `SignetBlockProcessorV1` - The first version of the block processor. +- `SignetBlockProcessor` — The block processor. Reads state from + `HotKv` storage, runs the EVM via `signet_evm`, and returns an + `ExecutedBlock`. diff --git a/crates/block-processor/src/lib.rs b/crates/block-processor/src/lib.rs index d836442..a2c957b 100644 --- a/crates/block-processor/src/lib.rs +++ b/crates/block-processor/src/lib.rs @@ -21,13 +21,3 @@ pub use utils::revm_spec; mod v1; pub use v1::SignetBlockProcessor as SignetBlockProcessorV1; - -/// Primitives used by the host. -pub type PrimitivesOf = - <::Types as reth_node_api::NodeTypes>::Primitives; - -/// A [`reth::providers::Chain`] using the host primitives. -pub type Chain = reth::providers::Chain>; - -/// A [`reth_exex::ExExNotification`] using the host primitives. -pub type ExExNotification = reth_exex::ExExNotification>; diff --git a/crates/block-processor/src/metrics.rs b/crates/block-processor/src/metrics.rs index 5288d11..67fe4a7 100644 --- a/crates/block-processor/src/metrics.rs +++ b/crates/block-processor/src/metrics.rs @@ -57,9 +57,6 @@ const ENTER_TOKEN_PROCESSED_HELP: &str = const TRANSACT_PROCESSED: &str = "signet.block_processor.transact_events.processed"; const TRANSACT_PROCESSED_HELP: &str = "Histogram of number of transact events processed per block"; -const EXTRACTION_TIME: &str = "signet.block_processor.extraction.time"; -const EXTRACTION_TIME_HELP: &str = "Time taken to extract signet outputs from a host notification. Note: sometimes the extraction includes multiple blocks."; - const PROCESSING_TIME: &str = "signet.block_processor.processing.time"; const PROCESSING_TIME_HELP: &str = "Time taken to process a single signet block from extracts, in milliseconds."; @@ -80,7 +77,6 @@ static DESCRIBE: LazyLock<()> = LazyLock::new(|| { describe_histogram!(ENTER_PROCESSED, ENTER_PROCESSED_HELP); describe_histogram!(ENTER_TOKEN_PROCESSED, ENTER_TOKEN_PROCESSED_HELP); describe_histogram!(TRANSACT_PROCESSED, TRANSACT_PROCESSED_HELP); - describe_histogram!(EXTRACTION_TIME, EXTRACTION_TIME_HELP); describe_histogram!(PROCESSING_TIME, PROCESSING_TIME_HELP); describe_histogram!(BLOCK_GAS_USED, BLOCK_GAS_USED_HELP); }); @@ -184,15 +180,6 @@ fn record_transacts_processed(value: u64) { transacts_processed().record(value as f64); } -fn extraction_time() -> Histogram { - LazyLock::force(&DESCRIBE); - histogram!(EXTRACTION_TIME) -} - -pub(crate) fn record_extraction_time(started_at: &std::time::Instant) { - extraction_time().record(started_at.elapsed().as_millis() as f64); -} - fn processing_time() -> Histogram { LazyLock::force(&DESCRIBE); histogram!(PROCESSING_TIME) diff --git a/crates/block-processor/src/v1/processor.rs b/crates/block-processor/src/v1/processor.rs index 154986e..21905cf 100644 --- a/crates/block-processor/src/v1/processor.rs +++ b/crates/block-processor/src/v1/processor.rs @@ -1,48 +1,53 @@ -use crate::{AliasOracle, AliasOracleFactory, Chain, metrics}; +use crate::{AliasOracle, AliasOracleFactory, metrics}; use alloy::{ consensus::BlockHeader, - primitives::{Address, B256, map::HashSet}, + primitives::{Address, Sealable, map::HashSet}, }; use core::fmt; use eyre::ContextCompat; use init4_bin_base::utils::calc::SlotCalculator; -use reth::{ - primitives::EthPrimitives, - providers::{ - BlockNumReader, BlockReader, ExecutionOutcome, HeaderProvider, ProviderFactory, - StateProviderFactory, - }, - revm::{database::StateProviderDatabase, db::StateBuilder}, -}; +use reth::{providers::StateProviderFactory, revm::db::StateBuilder}; use reth_chainspec::ChainSpec; -use reth_node_api::{FullNodeComponents, NodeTypes}; use signet_blobber::{CacheHandle, ExtractableChainShim}; use signet_constants::SignetSystemConstants; -use signet_db::{DataCompat, DbProviderExt, RuChain, RuRevmState, RuWriter}; use signet_evm::{BlockResult, EvmNeedsCfg, SignetDriver}; -use signet_extract::{Extractor, Extracts}; -use signet_journal::HostJournal; -use signet_node_types::{NodeTypesDbTrait, SignetNodeTypes}; +use signet_extract::Extracts; +use signet_hot::{ + db::HotDbRead, + model::{HotKv, HotKvRead, RevmRead}, +}; +use signet_storage_types::{DbSignetEvent, DbZenithHeader, ExecutedBlock, ExecutedBlockBuilder}; use std::{collections::VecDeque, sync::Arc}; -use tracing::{Instrument, debug, error, info, info_span, instrument}; -use trevm::revm::primitives::hardfork::SpecId; +use tracing::{error, instrument}; +use trevm::revm::{ + database::{DBErrorMarker, State}, + primitives::hardfork::SpecId, +}; -/// A block processor that listens to host chain commits and processes -/// Signet blocks accordingly. -pub struct SignetBlockProcessor> +/// The revm state type backed by hot storage. +type HotRevmState = State::RoTx>>; + +/// A block processor that extracts and processes Signet blocks from host +/// chain commits. +/// +/// The processor is a stateless executor: it reads state from hot storage, +/// runs the EVM, and returns an [`ExecutedBlock`]. The caller (node) handles +/// extraction, persistence, and orchestrates the per-block loop. +pub struct SignetBlockProcessor> where - Db: NodeTypesDbTrait, + H: HotKv, { - /// Signet System Constants + /// Signet System Constants. constants: SignetSystemConstants, /// The chain specification, used to determine active hardforks. chain_spec: Arc, - /// A [`ProviderFactory`] instance to allow RU database access. - ru_provider: ProviderFactory>, + /// Hot storage handle for rollup state reads. + hot: H, - /// A [`ProviderFactory`] instance to allow Host database access. + /// An oracle for determining whether addresses should be aliased. + /// Reads HOST (L1) state, not rollup state. alias_oracle: Alias, /// The slot calculator. @@ -52,30 +57,32 @@ where blob_cacher: CacheHandle, } -impl fmt::Debug for SignetBlockProcessor +impl fmt::Debug for SignetBlockProcessor where - Db: NodeTypesDbTrait, + H: HotKv, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("SignetBlockProcessor").finish() } } -impl SignetBlockProcessor +impl SignetBlockProcessor where - Db: NodeTypesDbTrait, + H: HotKv, + H::RoTx: 'static, + ::Error: DBErrorMarker, Alias: AliasOracleFactory, { /// Create a new [`SignetBlockProcessor`]. pub const fn new( constants: SignetSystemConstants, chain_spec: Arc, - ru_provider: ProviderFactory>, + hot: H, alias_oracle: Alias, slot_calculator: SlotCalculator, blob_cacher: CacheHandle, ) -> Self { - Self { constants, chain_spec, ru_provider, alias_oracle, slot_calculator, blob_cacher } + Self { constants, chain_spec, hot, alias_oracle, slot_calculator, blob_cacher } } /// Get the active spec id at the given timestamp. @@ -83,27 +90,23 @@ where crate::revm_spec(&self.chain_spec, timestamp) } - /// Make a [`StateProviderDatabase`] from the read-write provider, suitable - /// for use with Trevm. - fn state_provider_database(&self, height: u64) -> eyre::Result { - // Get the state provider for the block number - let sp = self.ru_provider.history_by_block_number(height)?; - - // Wrap in Revm comatibility layer - let spd = StateProviderDatabase::new(sp); - let builder = StateBuilder::new_with_database(spd); - - Ok(builder.with_bundle_update().build()) + /// Build a revm [`State`] backed by hot storage at the given parent + /// height. + fn revm_state(&self, parent_height: u64) -> eyre::Result> { + let reader = self.hot.reader()?; + let db = RevmRead::at_height(reader, parent_height); + Ok(StateBuilder::new_with_database(db).with_bundle_update().build()) } /// Make a new Trevm instance, building on the given height. - fn trevm(&self, parent_height: u64, spec_id: SpecId) -> eyre::Result> { - let db = self.state_provider_database(parent_height)?; - + fn trevm( + &self, + parent_height: u64, + spec_id: SpecId, + ) -> eyre::Result>> { + let db = self.revm_state(parent_height)?; let mut trevm = signet_evm::signet_evm(db, self.constants.clone()); - trevm.set_spec_id(spec_id); - Ok(trevm) } @@ -112,113 +115,30 @@ where self.alias_oracle.create()?.should_alias(address) } - /// Called when the host chain has committed a block or set of blocks. - #[instrument(skip_all, fields(count = chain.len(), first = chain.first().number(), tip = chain.tip().number()))] - pub async fn on_host_commit(&self, chain: &Chain) -> eyre::Result> - where - Host: FullNodeComponents, - Host::Types: NodeTypes, - { - let highest = chain.tip().number(); - if highest < self.constants.host_deploy_height() { - return Ok(None); - } - - // this should never happen but we want to handle it anyway - if chain.is_empty() { - return Ok(None); - } - + /// Process a single extracted block, returning an [`ExecutedBlock`]. + /// + /// The caller is responsible for driving extraction (via [`Extractor`]) + /// and persisting the result to storage between calls. + /// + /// [`Extractor`]: signet_extract::Extractor + #[instrument(skip_all, fields( + ru_height = block_extracts.ru_height, + host_height = block_extracts.host_block.number(), + has_ru_block = block_extracts.submitted.is_some(), + ))] + pub async fn process_block( + &self, + block_extracts: &Extracts<'_, ExtractableChainShim<'_>>, + ) -> eyre::Result { let start_time = std::time::Instant::now(); + let spec_id = self.spec_id(block_extracts.host_block.timestamp()); - let extractor = Extractor::new(self.constants.clone()); - let shim = ExtractableChainShim::new(chain); - let outputs = extractor.extract_signet(&shim); - - metrics::record_extraction_time(&start_time); - - // TODO: ENG-481 Inherit prune modes from Reth configuration. - // https://linear.app/initiates/issue/ENG-481/inherit-prune-modes-from-reth-node - - // The extractor will filter out blocks at or before the deployment - // height, so we don't need compute the start from the notification. - let mut start = None; - let mut current = 0; - let last_ru_height = self.ru_provider.last_block_number()?; - let mut prev_block_journal = self.ru_provider.provider_rw()?.latest_journal_hash()?; - - let mut net_outcome = ExecutionOutcome::default(); - - // There might be a case where we can get a notification that starts - // "lower" than our last processed block, - // but contains new information beyond one point. In this case, we - // should simply skip the block. - for block_extracts in outputs.skip_while(|extract| extract.ru_height <= last_ru_height) { - // If we haven't set the start yet, set it to the first block. - if start.is_none() { - let new_ru_height = block_extracts.ru_height; - - // If the above condition passes, we should always be - // committing without skipping a range of blocks. - if new_ru_height != last_ru_height + 1 { - error!( - %new_ru_height, - %last_ru_height, - "missing range of DB blocks" - ); - eyre::bail!("missing range of DB blocks"); - } - start = Some(new_ru_height); - } - - metrics::record_extracts(&block_extracts); - - let start_time = std::time::Instant::now(); - current = block_extracts.ru_height; - let spec_id = self.spec_id(block_extracts.host_block.timestamp()); - - let span = info_span!( - "signet::handle_zenith_outputs::block_processing", - start = start.unwrap(), - ru_height = block_extracts.ru_height, - host_height = block_extracts.host_block.number(), - has_ru_block = block_extracts.submitted.is_some(), - height_before_notification = last_ru_height, - ); - - let block_result = - self.run_evm(&block_extracts, spec_id).instrument(span.clone()).await?; - metrics::record_block_result(&block_result, &start_time); - - let _ = span.enter(); - let journal = - self.commit_evm_results(&block_extracts, &block_result, prev_block_journal)?; - - prev_block_journal = journal.journal_hash(); - net_outcome.extend(block_result.execution_outcome.convert()); - } - info!("committed blocks"); - - // If we didn't process any blocks, we don't need to return anything. - // In practice, this should never happen, as we should always have at - // least one block to process. - if start.is_none() { - return Ok(None); - } - let start = start.expect("checked by early return"); - - // Return the range of blocks we processed - let provider = self.ru_provider.provider_rw()?; + metrics::record_extracts(block_extracts); - let ru_info = provider.get_extraction_results(start..=current)?; - - let inner = Chain::::new( - provider.recovered_block_range(start..=current)?, - net_outcome, - Default::default(), - ); + let block_result = self.run_evm(block_extracts, spec_id).await?; + metrics::record_block_result(&block_result, &start_time); - Ok(Some(RuChain { inner, ru_info })) + self.build_executed_block(block_extracts, block_result) } /// ========================== @@ -237,6 +157,9 @@ where /// ███████ ████ ██ ██ /// =========================== /// =========================== + /// + /// Run the EVM for a single block extraction. + #[instrument(skip_all)] async fn run_evm( &self, block_extracts: &Extracts<'_, ExtractableChainShim<'_>>, @@ -247,10 +170,12 @@ where let timestamp = block_extracts.host_block.timestamp(); let parent_header = self - .ru_provider - .sealed_header(block_extracts.ru_height.saturating_sub(1))? + .hot + .reader()? + .get_header(ru_height.saturating_sub(1))? .wrap_err("parent ru block not present in DB") .inspect_err(|e| error!(%e))?; + let parent_header = signet_types::primitives::SealedHeader::new(parent_header.into_inner()); let txns = match &block_extracts.submitted { Some(submitted) => { @@ -285,7 +210,7 @@ where block_extracts, to_alias, txns, - parent_header.convert(), + parent_header, self.constants.clone(), ); @@ -306,38 +231,69 @@ where }) } - /// Commit the outputs of a zenith block to the database. + /// Build an [`ExecutedBlock`] from processor outputs. #[instrument(skip_all)] - fn commit_evm_results<'a>( + fn build_executed_block( &self, extracts: &Extracts<'_, ExtractableChainShim<'_>>, - block_result: &'a BlockResult, - prev_block_journal: B256, - ) -> eyre::Result> { - let journal = block_result.make_host_journal(prev_block_journal); - let time = std::time::Instant::now(); - let jh = journal.journal_hash(); - - debug!( - target: "signet::journal::serialize", - bytes = journal.serialized().len(), - hash = %jh, - elapsed_micros = %time.elapsed().as_micros(), - "journal produced" - ); - - self.ru_provider.provider_rw()?.update(|writer| { - // add execution results to database - writer.append_host_block( - extracts.ru_header(), - extracts.transacts().cloned(), - extracts.enters(), - extracts.enter_tokens(), - block_result, - jh, - )?; - Ok(()) - })?; - Ok(journal) + block_result: BlockResult, + ) -> eyre::Result { + let BlockResult { sealed_block, execution_outcome, .. } = block_result; + + // Header from the sealed block. Re-use the known hash to avoid + // recomputing it. + let hash = sealed_block.block.header.hash(); + let header = sealed_block.block.header.header().clone().seal_unchecked(hash); + + // Bundle and receipts from execution outcome. + let (bundle, receipt_vecs, _) = execution_outcome.into_parts(); + + // Flatten receipts (one block → one inner vec) and convert to + // storage Receipt type. + let receipts = receipt_vecs + .into_iter() + .flatten() + .map(|envelope| { + let tx_type = envelope.tx_type(); + signet_storage_types::Receipt { tx_type, inner: envelope.into_receipt() } + }) + .collect(); + + // Transactions: zip txs + senders → Vec. + let transactions = sealed_block + .block + .body + .transactions + .into_iter() + .zip(sealed_block.senders) + .map(|(tx, sender)| signet_storage_types::Recovered::new_unchecked(tx, sender)) + .collect(); + + // Signet events with a single incrementing index across all types. + let signet_events: Vec<_> = extracts + .enters() + .map(|e| DbSignetEvent::Enter(0, e)) + .chain(extracts.enter_tokens().map(|e| DbSignetEvent::EnterToken(0, e))) + .chain(extracts.transacts().map(|t| DbSignetEvent::Transact(0, t.clone()))) + .enumerate() + .map(|(i, e)| match e { + DbSignetEvent::Enter(_, v) => DbSignetEvent::Enter(i as u64, v), + DbSignetEvent::EnterToken(_, v) => DbSignetEvent::EnterToken(i as u64, v), + DbSignetEvent::Transact(_, v) => DbSignetEvent::Transact(i as u64, v), + }) + .collect(); + + // Zenith header from extracts. + let zenith_header = extracts.ru_header().map(DbZenithHeader::from); + + ExecutedBlockBuilder::new() + .header(header) + .bundle(bundle) + .transactions(transactions) + .receipts(receipts) + .signet_events(signet_events) + .zenith_header(zenith_header) + .build() + .map_err(|e| eyre::eyre!("failed to build ExecutedBlock: {e}")) } } diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml deleted file mode 100644 index 784f3b0..0000000 --- a/crates/db/Cargo.toml +++ /dev/null @@ -1,39 +0,0 @@ -[package] -name = "signet-db" -version.workspace = true -edition.workspace = true -rust-version.workspace = true -authors.workspace = true -license.workspace = true -homepage.workspace = true -repository.workspace = true - -[dependencies] -signet-node-types.workspace = true - -signet-evm.workspace = true -signet-journal.workspace = true -signet-types.workspace = true -signet-zenith.workspace = true - -trevm.workspace = true - -alloy.workspace = true - -reth.workspace = true -reth-db.workspace = true -reth-prune-types.workspace = true -reth-stages-types.workspace = true - -itertools.workspace = true -serde.workspace = true -tracing.workspace = true -futures-util = "0.3.31" -tokio.workspace = true -auto_impl = "1.3.0" - -[dev-dependencies] -serde_json.workspace = true -reth-db = { workspace = true, features = ["test-utils"] } -reth-exex-test-utils.workspace = true -signet-constants = { workspace = true, features = ["test-utils"] } diff --git a/crates/db/README.md b/crates/db/README.md deleted file mode 100644 index cf95ed3..0000000 --- a/crates/db/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# Signet Database - -Extensions and modifications to reth's Database system for use in the Signet -Node. - -This library contains the following: - -- Traits for reading and writing Signet events -- Table definitions for Signet Events, Headers, and JournalHashes -- Helpers for reading, writing, reverting, Signet EVM blocks and headers - -## Significant Traits - -- `RuWriter` - Encapsulates logic for reading and writing Signet events, state, - headers, etc. -- `DbProviderExt` - Extends the reth `DatabaseProviderRW` with a scope-guarded - `update` method. -- `DataCompat` - Provides methods for converting between Signet and reth data - structures, such as `ExecutionOutcome` and `Receipt`. diff --git a/crates/db/src/aliases.rs b/crates/db/src/aliases.rs deleted file mode 100644 index db880fb..0000000 --- a/crates/db/src/aliases.rs +++ /dev/null @@ -1,15 +0,0 @@ -use reth::{ - providers::{DatabaseProviderRW, StateProviderBox}, - revm::database::StateProviderDatabase, -}; -use signet_node_types::SignetNodeTypes; -use trevm::revm::database::State; - -/// A Convenience alias for a [`DatabaseProviderRW`] using [`SignetNodeTypes`]. -pub type SignetDbRw = DatabaseProviderRW>; - -/// Type alias for EVMs using a [`StateProviderBox`] as the `DB` type for -/// trevm. -/// -/// [`StateProviderBox`]: reth::providers::StateProviderBox -pub type RuRevmState = State>; diff --git a/crates/db/src/chain.rs b/crates/db/src/chain.rs deleted file mode 100644 index f517502..0000000 --- a/crates/db/src/chain.rs +++ /dev/null @@ -1,46 +0,0 @@ -use alloy::primitives::BlockNumber; -use reth::providers::Chain; -use signet_zenith::{Passage, Transactor, Zenith}; -use std::collections::BTreeMap; - -/// Host extraction contrents for a single block. -/// -/// This is a container type for DB operations. It is held by the [`RuChain`] -/// struct, and constructed during the [`RuWriter::get_extraction_results`] -/// method. -/// -/// [`RuWriter::get_extraction_results`]: -/// crate::traits::RuWriter::get_extraction_results -#[derive(Debug, Clone, Default, PartialEq, Eq)] -pub struct DbExtractionResults { - /// The zenith header for the block. - pub header: Option, - /// The enters for the block. - pub enters: Vec, - /// The transacts for the block. - pub transacts: Vec, - /// The enter tokens for the block. - pub enter_tokens: Vec, -} - -/// Equivalent of [`Chain`] but also containing [`DbExtractionResults`] for -/// each block. -#[derive(Debug, Clone, Default, PartialEq, Eq)] -pub struct RuChain { - /// Inner chain of RU blocks. - pub inner: Chain, - /// Zenith headers for each block. - pub ru_info: BTreeMap, -} - -impl RuChain { - /// Get the length of the chain in blocks. - pub fn len(&self) -> usize { - self.inner.len() - } - - /// Check if the chain is empty. - pub fn is_empty(&self) -> bool { - self.inner.is_empty() - } -} diff --git a/crates/db/src/consistency.rs b/crates/db/src/consistency.rs deleted file mode 100644 index 5c81f97..0000000 --- a/crates/db/src/consistency.rs +++ /dev/null @@ -1,303 +0,0 @@ -use alloy::primitives::BlockNumber; -use reth::{ - api::NodePrimitives, - primitives::EthPrimitives, - providers::{ - BlockBodyIndicesProvider, ProviderFactory, ProviderResult, StageCheckpointReader, - StaticFileProviderFactory, StaticFileSegment, StaticFileWriter, - }, -}; -use reth_db::{cursor::DbCursorRO, table::Table, tables, transaction::DbTx}; -use reth_stages_types::StageId; -use signet_node_types::{NodeTypesDbTrait, SignetNodeTypes}; -use tracing::{debug, info, info_span, instrument, warn}; - -/// Extension trait that provides consistency checking for the RU database -/// provider. Consistency checks are MANDATORY on node startup to ensure that -/// the static file segments and database are in sync. -/// -/// In general, this should not be implemented outside this crate. -pub trait ProviderConsistencyExt { - /// Check the consistency of the static file segments and return the last - /// known-good block number. - fn ru_check_consistency(&self) -> ProviderResult>; -} - -impl ProviderConsistencyExt for ProviderFactory> -where - Db: NodeTypesDbTrait, -{ - /// Check the consistency of the static file segments and return the last - /// known good block number. - #[instrument(skip(self), fields(read_only = self.static_file_provider().is_read_only()))] - fn ru_check_consistency(&self) -> ProviderResult> { - // Based on `StaticFileProvider::check_consistency` in - // `reth/crates/storage/provider/src/providers/static_file/manager.rs` - // with modifications for RU-specific logic. - // - // Comments are largely reproduced from the original source for context. - // - // Last updated @ reth@1.9.1 - let prune_modes = self.provider_rw()?.prune_modes_ref().clone(); - let sfp = self.static_file_provider(); - - debug!("Checking static file consistency."); - - let mut last_good_height: Option = None; - - let mut update_last_good_height = |new_height: BlockNumber| { - last_good_height = - last_good_height.map(|current| current.min(new_height)).or(Some(new_height)); - }; - - for segment in StaticFileSegment::iter() { - let initial_highest_block = sfp.get_highest_static_file_block(segment); - - if prune_modes.has_receipts_pruning() && segment.is_receipts() { - // Pruned nodes (including full node) do not store receipts as static files. - continue; - } - - let span = info_span!( - "checking_segment", - ?segment, - initial_highest_block, - highest_block = tracing::field::Empty, - highest_tx = tracing::field::Empty - ); - let _guard = span.enter(); - - // File consistency is broken if: - // - // * appending data was interrupted before a config commit, then - // data file will be truncated according to the config. - // - // * pruning data was interrupted before a config commit, then we - // have deleted data that we are expected to still have. We need - // to check the Database and unwind everything accordingly. - if sfp.is_read_only() { - sfp.check_segment_consistency(segment)?; - } else { - // Fetching the writer will attempt to heal any file level - // inconsistency. - sfp.latest_writer(segment)?; - } - - // Only applies to block-based static files. (Headers) - // - // The updated `highest_block` may have decreased if we healed from a pruning - // interruption. - let mut highest_block = sfp.get_highest_static_file_block(segment); - span.record("highest_block", highest_block); - - if initial_highest_block != highest_block { - update_last_good_height(highest_block.unwrap_or_default()); - } - - // Only applies to transaction-based static files. (Receipts & Transactions) - // - // Make sure the last transaction matches the last block from its indices, since a heal - // from a pruning interruption might have decreased the number of transactions without - // being able to update the last block of the static file segment. - let highest_tx = sfp.get_highest_static_file_tx(segment); - if let Some(highest_tx) = highest_tx { - span.record("highest_tx", highest_tx); - let mut last_block = highest_block.unwrap_or_default(); - loop { - if let Some(indices) = self.block_body_indices(last_block)? { - if indices.last_tx_num() <= highest_tx { - break; - } - } else { - // If the block body indices can not be found, then it means that static - // files is ahead of database, and the `ensure_invariants` check will fix - // it by comparing with stage checkpoints. - break; - } - if last_block == 0 { - break; - } - last_block -= 1; - - highest_block = Some(last_block); - update_last_good_height(last_block); - } - } - - if let Some(unwind) = match segment { - StaticFileSegment::Headers => { - ensure_invariants::< - _, - tables::Headers<::BlockHeader>, - >(self, segment, highest_block, highest_block)? - } - StaticFileSegment::Transactions => { - ensure_invariants::< - _, - tables::Transactions<::SignedTx>, - >(self, segment, highest_tx, highest_block)? - } - StaticFileSegment::Receipts => { - ensure_invariants::< - _, - tables::Receipts<::Receipt>, - >(self, segment, highest_tx, highest_block)? - } - StaticFileSegment::TransactionSenders => { - ensure_invariants::<_, tables::TransactionSenders>( - self, - segment, - highest_tx, - highest_block, - )? - } - StaticFileSegment::AccountChangeSets => ensure_invariants::< - _, - tables::AccountChangeSets, - >( - self, segment, highest_tx, highest_block - )?, - } { - update_last_good_height(unwind); - } - } - - Ok(last_good_height) - } -} - -/// Check invariants for each corresponding table and static file segment: -/// -/// 1. The corresponding database table should overlap or have continuity in -/// their keys ([`TxNumber`] or [`BlockNumber`]). -/// 2. Its highest block should match the stage checkpoint block number if it's -/// equal or higher than the corresponding database table last entry. -/// * If the checkpoint block is higher, then request a pipeline unwind to -/// the static file block. This is expressed by returning [`Some`] with the -/// requested pipeline unwind target. -/// * If the checkpoint block is lower, then heal by removing rows from the -/// static file. In this case, the rows will be removed and [`None`] will -/// be returned. -/// 3. If the database tables overlap with static files and have contiguous -/// keys, or the checkpoint block matches the highest static files block, -/// then [`None`] will be returned. -/// -/// [`TxNumber`]: alloy::primitives::TxNumber -#[instrument(skip(this, segment), fields(table = T::NAME))] -fn ensure_invariants>( - this: &ProviderFactory>, - segment: StaticFileSegment, - highest_static_file_entry: Option, - highest_static_file_block: Option, -) -> ProviderResult> -where - Db: NodeTypesDbTrait, -{ - let provider = this.provider_rw()?; - let sfp = this.static_file_provider(); - - let mut db_cursor = provider.tx_ref().cursor_read::()?; - - if let Some((db_first_entry, _)) = db_cursor.first()? { - if let (Some(highest_entry), Some(highest_block)) = - (highest_static_file_entry, highest_static_file_block) - { - // If there is a gap between the entry found in static file and - // database, then we have most likely lost static file data and - // need to unwind so we can load it again - if !(db_first_entry <= highest_entry || highest_entry + 1 == db_first_entry) { - info!(unwind_target = highest_block, "Setting unwind target."); - return Ok(Some(highest_block)); - } - } - - if let Some((db_last_entry, _)) = db_cursor.last()? - && highest_static_file_entry.is_none_or(|highest_entry| db_last_entry > highest_entry) - { - return Ok(None); - } - } - - let highest_static_file_entry = highest_static_file_entry.unwrap_or_default(); - let highest_static_file_block = highest_static_file_block.unwrap_or_default(); - - // If static file entry is ahead of the database entries, then ensure the - // checkpoint block number matches. - let checkpoint_block_number = provider - .get_stage_checkpoint(match segment { - StaticFileSegment::Headers => StageId::Headers, - StaticFileSegment::Transactions => StageId::Bodies, - StaticFileSegment::Receipts | StaticFileSegment::AccountChangeSets => { - StageId::Execution - } - StaticFileSegment::TransactionSenders => StageId::SenderRecovery, - })? - .unwrap_or_default() - .block_number; - - // If the checkpoint is ahead, then we lost static file data. May be data corruption. - if checkpoint_block_number > highest_static_file_block { - info!( - checkpoint_block_number, - unwind_target = highest_static_file_block, - "Setting unwind target." - ); - return Ok(Some(highest_static_file_block)); - } - - // If the checkpoint is behind, then we failed to do a database commit - // **but committed** to static files on executing a stage, or the reverse - // on unwinding a stage. - // - // All we need to do is to prune the extra static file rows. - if checkpoint_block_number < highest_static_file_block { - info!( - from = highest_static_file_block, - to = checkpoint_block_number, - "Unwinding static file segment." - ); - - let mut writer = sfp.latest_writer(segment)?; - if segment.is_headers() { - // TODO(joshie): is_block_meta - writer.prune_headers(highest_static_file_block - checkpoint_block_number)?; - } else if let Some(block) = provider.block_body_indices(checkpoint_block_number)? { - // todo joshie: is querying block_body_indices a potential issue - // once bbi is moved to sf as well - let number = highest_static_file_entry - block.last_tx_num(); - if segment.is_receipts() { - writer.prune_receipts(number, checkpoint_block_number)?; - } else { - writer.prune_transactions(number, checkpoint_block_number)?; - } - } - writer.commit()?; - } - - Ok(None) -} - -// Some code in this file is adapted from reth. It is used under the terms of -// the MIT License. -// -// The MIT License (MIT) -// -// Copyright (c) 2022-2025 Reth Contributors -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. diff --git a/crates/db/src/convert.rs b/crates/db/src/convert.rs deleted file mode 100644 index 7737b84..0000000 --- a/crates/db/src/convert.rs +++ /dev/null @@ -1,109 +0,0 @@ -//! Type conversion traits and implementations for converting between Reth, Alloy, and Signet types. -//! -//! This module provides a set of conversion traits that enable seamless -//! interoperability between different type systems used in the Ethereum -//! ecosystem: -//! -//! - **Reth types**: Core primitives from the Reth Ethereum client -//! - **Alloy types**: Modern Ethereum types from the Alloy framework -//! - **Signet types**: Custom types specific to the Signet protocol -use alloy::consensus::TxReceipt; - -/// Trait for types that can be converted into other types as they're already compatible. -/// Used for converting between alloy/reth/signet types. -pub trait DataCompat>: Sized { - /// Convert `self` into the target type. - fn convert(self) -> Other; - - /// Convert `self` into the target type by cloning. - fn clone_convert(&self) -> Other - where - Self: Clone, - { - self.clone().convert() - } -} - -impl DataCompat> for Vec -where - U: DataCompat, - T: DataCompat, -{ - fn convert(self) -> Vec { - self.into_iter().map(|item| item.convert()).collect() - } -} - -impl DataCompat for signet_evm::ExecutionOutcome { - fn convert(self) -> reth::providers::ExecutionOutcome { - let (bundle, receipts, first_block) = self.into_parts(); - - reth::providers::ExecutionOutcome { - bundle, - receipts: receipts.convert(), - first_block, - requests: Default::default(), // Requests are not present in Signet's ExecutionOutcome - } - } -} - -impl DataCompat for reth::providers::ExecutionOutcome { - fn convert(self) -> signet_evm::ExecutionOutcome { - signet_evm::ExecutionOutcome::new( - self.bundle, - self.receipts.into_iter().map(DataCompat::convert).collect(), - self.first_block, - ) - } -} - -impl DataCompat for alloy::consensus::ReceiptEnvelope { - fn convert(self) -> reth::primitives::Receipt { - reth::primitives::Receipt { - tx_type: self.tx_type(), - success: self.is_success(), - cumulative_gas_used: self.cumulative_gas_used(), - logs: self.logs().to_owned(), - } - } -} - -impl DataCompat for reth::primitives::Receipt { - fn convert(self) -> alloy::consensus::ReceiptEnvelope { - let receipt = alloy::consensus::Receipt { - status: self.status().into(), - cumulative_gas_used: self.cumulative_gas_used, - logs: self.logs.to_owned(), - }; - - match self.tx_type { - reth::primitives::TxType::Legacy => { - alloy::consensus::ReceiptEnvelope::Legacy(receipt.into()) - } - reth::primitives::TxType::Eip2930 => { - alloy::consensus::ReceiptEnvelope::Eip2930(receipt.into()) - } - reth::primitives::TxType::Eip1559 => { - alloy::consensus::ReceiptEnvelope::Eip1559(receipt.into()) - } - reth::primitives::TxType::Eip4844 => { - alloy::consensus::ReceiptEnvelope::Eip4844(receipt.into()) - } - reth::primitives::TxType::Eip7702 => { - alloy::consensus::ReceiptEnvelope::Eip7702(receipt.into()) - } - } - } -} - -impl DataCompat for reth::primitives::SealedHeader { - fn convert(self) -> signet_types::primitives::SealedHeader { - signet_types::primitives::SealedHeader::new(self.into_header()) - } -} - -impl DataCompat for signet_types::primitives::SealedHeader { - fn convert(self) -> reth::primitives::SealedHeader { - reth::primitives::SealedHeader::new_unhashed(self.header().to_owned()) - } -} diff --git a/crates/db/src/journal/ingestor.rs b/crates/db/src/journal/ingestor.rs deleted file mode 100644 index 481e5a3..0000000 --- a/crates/db/src/journal/ingestor.rs +++ /dev/null @@ -1,72 +0,0 @@ -use crate::{SignetDbRw, journal::JournalDb}; -use futures_util::StreamExt; -use reth::providers::ProviderResult; -use signet_journal::{Journal, JournalStream}; -use signet_node_types::NodeTypesDbTrait; -use std::sync::Arc; -use tokio::task::JoinHandle; - -/// A task that ingests journals into a reth database. -#[derive(Debug)] -pub struct JournalIngestor { - db: Arc>, -} - -impl From> for JournalIngestor { - fn from(value: SignetDbRw) -> Self { - Self::new(value.into()) - } -} - -impl From>> for JournalIngestor { - fn from(value: Arc>) -> Self { - Self::new(value) - } -} - -impl JournalIngestor { - /// Create a new `JournalIngestor` with the given database provider. - pub const fn new(db: Arc>) -> Self { - Self { db } - } - - async fn task_future(self, mut stream: S) -> ProviderResult<()> - where - S: JournalStream<'static> + Send + Unpin + 'static, - { - while let Some(Journal::V1(journal)) = stream.next().await { - // FUTURE: Sanity check that the header height matches the update - // height. Sanity check that both heights are 1 greater than the - // last height in the database. - - let db = self.db.clone(); - - // DB interaction is sync, so we spawn a blocking task for it. We - // immediately await that task. This prevents blocking the worker - // thread - tokio::task::spawn_blocking(move || db.ingest(journal)) - .await - .expect("ingestion should not panic")?; - } - // Stream has ended, return Ok - Ok(()) - } - - /// Spawn a task to ingest journals from the provided stream. - pub fn spawn(self, stream: S) -> JoinHandle> - where - S: JournalStream<'static> + Send + Unpin + 'static, - { - tokio::spawn(self.task_future(stream)) - } -} - -/// Ingest journals from a stream into a reth database. -pub async fn ingest_journals(db: Arc>, stream: S) -> ProviderResult<()> -where - Db: NodeTypesDbTrait, - S: JournalStream<'static> + Send + Unpin + 'static, -{ - let ingestor = JournalIngestor::new(db); - ingestor.task_future(stream).await -} diff --git a/crates/db/src/journal/mod.rs b/crates/db/src/journal/mod.rs deleted file mode 100644 index 2e898db..0000000 --- a/crates/db/src/journal/mod.rs +++ /dev/null @@ -1,10 +0,0 @@ -//! Utilities for working with Signet journals in a reth database. - -mod r#trait; -pub use r#trait::JournalDb; - -mod provider; -pub use provider::JournalProviderTask; - -mod ingestor; -pub use ingestor::{JournalIngestor, ingest_journals}; diff --git a/crates/db/src/journal/provider.rs b/crates/db/src/journal/provider.rs deleted file mode 100644 index 31cad05..0000000 --- a/crates/db/src/journal/provider.rs +++ /dev/null @@ -1,80 +0,0 @@ -use crate::journal::JournalDb; -use futures_util::StreamExt; -use reth::{ - primitives::SealedHeader, - providers::{ - CanonChainTracker, DatabaseProviderFactory, DatabaseProviderRW, ProviderResult, - providers::BlockchainProvider, - }, - rpc::types::engine::ForkchoiceState, -}; -use signet_journal::{Journal, JournalStream}; -use signet_node_types::{NodeTypesDbTrait, SignetNodeTypes}; -use tokio::task::JoinHandle; - -/// A task that processes journal updates for a specific database, and calls -/// the appropriate methods on a [`BlockchainProvider`] to update the in-memory -/// chain view. -#[derive(Debug, Clone)] -pub struct JournalProviderTask { - provider: BlockchainProvider>, -} - -impl JournalProviderTask { - /// Instantiate a new task. - pub const fn new(provider: BlockchainProvider>) -> Self { - Self { provider } - } - - /// Get a reference to the provider. - pub const fn provider(&self) -> &BlockchainProvider> { - &self.provider - } - - /// Deconstruct the task into its provider. - pub fn into_inner(self) -> BlockchainProvider> { - self.provider - } - - /// Create a future for the task, suitable for [`tokio::spawn`] or another - /// task-spawning system. - pub async fn task_future(self, mut journals: S) -> ProviderResult<()> - where - S: JournalStream<'static> + Send + Unpin + 'static, - { - loop { - let Some(Journal::V1(journal)) = journals.next().await else { break }; - - let rw = self.provider.database_provider_rw().map(DatabaseProviderRW); - - let r_header = SealedHeader::new_unhashed(journal.header().clone()); - let block_hash = r_header.hash(); - - // DB interaction is sync, so we spawn a blocking task for it. We - // immediately await that task. This prevents blocking the worker - // thread - tokio::task::spawn_blocking(move || rw?.ingest(journal)) - .await - .expect("ingestion should not panic")?; - - self.provider.set_canonical_head(r_header.clone()); - self.provider.set_safe(r_header.clone()); - self.provider.set_finalized(r_header); - self.provider.on_forkchoice_update_received(&ForkchoiceState { - head_block_hash: block_hash, - safe_block_hash: block_hash, - finalized_block_hash: block_hash, - }); - } - - Ok(()) - } - - /// Spawn the journal provider task. - pub fn spawn(self, journals: S) -> JoinHandle> - where - S: JournalStream<'static> + Send + Unpin + 'static, - { - tokio::spawn(self.task_future(journals)) - } -} diff --git a/crates/db/src/journal/trait.rs b/crates/db/src/journal/trait.rs deleted file mode 100644 index 014714f..0000000 --- a/crates/db/src/journal/trait.rs +++ /dev/null @@ -1,55 +0,0 @@ -use crate::RuWriter; -use alloy::consensus::{BlockHeader, Header}; -use reth::{providers::ProviderResult, revm::db::BundleState}; -use signet_evm::{BlockResult, ExecutionOutcome}; -use signet_journal::HostJournal; -use signet_types::primitives::{RecoveredBlock, SealedBlock, SealedHeader, TransactionSigned}; - -/// A database that can be updated with journals. -pub trait JournalDb: RuWriter { - /// Ingest a journal into the database. - /// - /// This will create a [`BlockResult`] from the provided header and update, - /// and append it to the database using [`RuWriter::append_host_block`]. - /// - /// This DOES NOT update tables containing historical transactions, - /// receipts, events, etc. It only updates tables related to headers, - /// and state. - /// - /// This is intended to be used for tx simulation, and other purposes that - /// need fast state access WITHTOUT needing to retrieve historical data. - fn ingest(&self, journal: HostJournal<'static>) -> ProviderResult<()> { - let journal_hash = journal.journal_hash(); - - let (meta, bsi) = journal.into_parts(); - let (host_height, _, header) = meta.into_parts(); - - // TODO: remove the clone in future versions. This can be achieved by - // _NOT_ making a `BlockResult` and instead manually updating relevan - // tables. However, this means diverging more fro the underlying reth - // logic that we are currently re-using. - let bundle_state: BundleState = bsi.into(); - let execution_outcome = ExecutionOutcome::new(bundle_state, vec![], header.number()); - - let block: SealedBlock = - SealedBlock { header: SealedHeader::new(header), body: Default::default() }; - let block_result = BlockResult { - sealed_block: RecoveredBlock::new(block, vec![]), - execution_outcome, - host_height, - }; - - self.append_host_block( - None, - std::iter::empty(), - std::iter::empty(), - std::iter::empty(), - &block_result, - journal_hash, - )?; - - Ok(()) - } -} - -impl JournalDb for T where T: RuWriter {} diff --git a/crates/db/src/lib.rs b/crates/db/src/lib.rs deleted file mode 100644 index 7c0458c..0000000 --- a/crates/db/src/lib.rs +++ /dev/null @@ -1,37 +0,0 @@ -#![doc = include_str!("../README.md")] -#![warn( - missing_copy_implementations, - missing_debug_implementations, - missing_docs, - unreachable_pub, - clippy::missing_const_for_fn, - rustdoc::all -)] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![deny(unused_must_use, rust_2018_idioms)] -#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] - -mod aliases; -pub use aliases::{RuRevmState, SignetDbRw}; - -mod chain; -pub use chain::{DbExtractionResults, RuChain}; - -mod consistency; -pub use consistency::ProviderConsistencyExt; - -mod convert; -pub use convert::DataCompat; - -pub mod journal; - -mod provider; - -mod tables; -pub use tables::{ - DbEnter, DbEnterToken, DbSignetEvent, DbTransact, DbZenithHeader, JournalHashes, SignetEvents, - ZenithHeaders, -}; - -mod traits; -pub use traits::{DbProviderExt, RuWriter}; diff --git a/crates/db/src/provider.rs b/crates/db/src/provider.rs deleted file mode 100644 index f2cd7da..0000000 --- a/crates/db/src/provider.rs +++ /dev/null @@ -1,668 +0,0 @@ -use crate::{ - DataCompat, DbZenithHeader, RuChain, SignetDbRw, ZenithHeaders, - tables::{DbSignetEvent, JournalHashes, SignetEvents}, - traits::RuWriter, -}; -use alloy::{ - consensus::BlockHeader, - primitives::{Address, B256, BlockNumber}, -}; -use reth::{ - primitives::StaticFileSegment, - providers::{ - BlockBodyIndicesProvider, BlockNumReader, BlockReader, BlockWriter, Chain, DBProvider, - HistoryWriter, OriginalValuesKnown, ProviderError, ProviderResult, StageCheckpointWriter, - StateWriteConfig, StateWriter, StaticFileProviderFactory, StaticFileWriter, - }, -}; -use reth_db::{ - cursor::{DbCursorRO, DbCursorRW}, - models::{BlockNumberAddress, StoredBlockBodyIndices}, - tables, - transaction::{DbTx, DbTxMut}, -}; -use reth_prune_types::{MINIMUM_PRUNING_DISTANCE, PruneMode}; -use signet_evm::BlockResult; -use signet_node_types::NodeTypesDbTrait; -use signet_types::primitives::RecoveredBlock; -use signet_zenith::{ - Passage::{self, Enter, EnterToken}, - Transactor::Transact, - Zenith, -}; -use std::ops::{Not, RangeInclusive}; -use tracing::{debug, instrument, trace, warn}; - -impl RuWriter for SignetDbRw -where - Db: NodeTypesDbTrait, -{ - fn last_block_number(&self) -> ProviderResult { - BlockNumReader::last_block_number(&self.0) - } - - fn insert_journal_hash(&self, ru_height: u64, hash: B256) -> ProviderResult<()> { - self.tx_ref().put::(ru_height, hash)?; - Ok(()) - } - - fn remove_journal_hash(&self, ru_height: u64) -> ProviderResult<()> { - self.tx_ref().delete::(ru_height, None)?; - Ok(()) - } - - fn get_journal_hash(&self, ru_height: u64) -> ProviderResult> { - self.tx_ref().get::(ru_height).map_err(Into::into) - } - - #[track_caller] - fn latest_journal_hash(&self) -> ProviderResult { - let latest_height = self.last_block_number()?; - Ok(self - .get_journal_hash(latest_height)? - .expect("DB in corrupt state. Missing Journal Hash for latest height")) - } - - /// Insert an enter into the DB - /// This is a signet-specific function that inserts an enter event into the - /// [`SignetEvents`] table. - fn insert_enter(&self, ru_height: u64, index: u64, enter: Enter) -> ProviderResult<()> { - self.tx_ref() - .put::(ru_height, DbSignetEvent::Enter(index, enter)) - .map_err(Into::into) - } - - /// Insert an enter token event into the DB - /// This is a signet-specific function that inserts an enter token event - /// into the [`SignetEvents`] table. - fn insert_enter_token( - &self, - ru_height: u64, - index: u64, - enter_token: EnterToken, - ) -> ProviderResult<()> { - self.tx_ref() - .put::(ru_height, DbSignetEvent::EnterToken(index, enter_token))?; - Ok(()) - } - - /// Insert a Transact into the DB - /// This is a signet-specific function that inserts a transact event into the - /// [`SignetEvents`] table. - fn insert_transact( - &self, - ru_height: u64, - index: u64, - transact: &Transact, - ) -> ProviderResult<()> { - // this is unfortunate, but probably fine because the large part is the - // shared Bytes object. - let t = transact.clone(); - self.tx_ref() - .put::(ru_height, DbSignetEvent::Transact(index, t)) - .map_err(Into::into) - } - - fn insert_signet_header( - &self, - header: Zenith::BlockHeader, - ru_height: u64, - ) -> ProviderResult<()> { - self.tx_ref().put::(ru_height, header.into())?; - - Ok(()) - } - - fn get_signet_header(&self, ru_height: u64) -> ProviderResult> { - self.tx_ref().get::(ru_height).map(|h| h.map(Into::into)).map_err(Into::into) - } - - /// Inserts the zenith block into the database, always modifying the following tables: - /// * [`JournalHashes`] - /// * [`CanonicalHeaders`](tables::CanonicalHeaders) - /// * [`Headers`](tables::Headers) - /// * [`HeaderTerminalDifficulties`](tables::HeaderTerminalDifficulties) - /// * [`HeaderNumbers`](tables::HeaderNumbers) - /// * [`BlockBodyIndices`](tables::BlockBodyIndices) (through - /// [`RuWriter::append_signet_block_body`]) - /// - /// If there are transactions in the block, the following tables will be - /// modified: - /// * [`Transactions`](tables::Transactions) (through - /// [`RuWriter::append_signet_block_body`]) - /// * [`TransactionBlocks`](tables::TransactionBlocks) (through - /// [`RuWriter::append_signet_block_body`]) - /// - /// If the provider has __not__ configured full sender pruning, this will - /// modify [`TransactionSenders`](tables::TransactionSenders). - /// - /// If the provider has __not__ configured full transaction lookup pruning, - /// this will modify [`TransactionHashNumbers`](tables::TransactionHashNumbers). - /// - /// Ommers and withdrawals are not inserted, as Signet does not use them. - fn insert_signet_block( - &self, - header: Option, - block: &RecoveredBlock, - journal_hash: B256, - ) -> ProviderResult { - // Implementation largely copied from - // `BlockWriter::insert_block` - // in `reth/crates/storage/provider/src/providers/database/provider.rs` - // duration metrics have been removed - // - // Last reviewed at tag v1.9.0 - let block_number = block.number(); - - // SIGNET-SPECIFIC - // Put journal hash into the DB - if let Some(header) = header { - self.insert_signet_header(header, block_number)?; - } - // SIGNET-SPECIFIC - // Put journal hash into the DB - self.tx_ref().put::(block_number, journal_hash)?; - - let block_hash = block.block.header.hash(); - let block_header = block.block.header.header(); - - self.static_file_provider() - .get_writer(block_number, StaticFileSegment::Headers)? - .append_header(block_header, &block_hash)?; - - self.tx_ref().put::(block_hash, block_number)?; - - let mut next_tx_num = self - .tx_ref() - .cursor_read::()? - .last()? - .map(|(n, _)| n + 1) - .unwrap_or_default(); - let first_tx_num = next_tx_num; - let tx_count = block.block.body.transactions.len() as u64; - - for (sender, transaction) in block.senders.iter().zip(block.block.body.transactions()) { - let hash = *transaction.hash(); - debug_assert_ne!(hash, B256::ZERO, "transaction hash is zero"); - - if self.prune_modes_ref().sender_recovery.as_ref().is_none_or(|m| !m.is_full()) { - self.tx_ref().put::(next_tx_num, *sender)?; - } - - if self.prune_modes_ref().transaction_lookup.is_none_or(|m| !m.is_full()) { - self.tx_ref().put::(hash, next_tx_num)?; - } - - next_tx_num += 1; - } - - self.append_signet_block_body((block_number, block))?; - - debug!(?block_number, "Inserted block"); - - Ok(StoredBlockBodyIndices { first_tx_num, tx_count }) - } - - /// Appends the body of a signet block to the database. - fn append_signet_block_body(&self, body: (BlockNumber, &RecoveredBlock)) -> ProviderResult<()> { - // Implementation largely copied from - // `DatabaseProvider::append_block_bodies` - // in `reth/crates/storage/provider/src/providers/database/provider.rs` - // duration metrics have been removed, and the implementation has been - // modified to work with a single signet block. - // - // last reviewed at tag v1.10.1 - - let from_block = body.0; - let sf = self.static_file_provider(); - - // Initialize writer if we will be writing transactions to staticfiles - let mut tx_writer = sf.get_writer(from_block, StaticFileSegment::Transactions)?; - - let mut block_indices_cursor = self.tx_ref().cursor_write::()?; - let mut tx_block_cursor = self.tx_ref().cursor_write::()?; - - let block_number = body.0; - let block = body.1; - - // Get id for the next tx_num or zero if there are no transactions. - let mut next_tx_num = tx_block_cursor.last()?.map(|(n, _)| n + 1).unwrap_or_default(); - - // Increment block on static file header - tx_writer.increment_block(block_number)?; - - let tx_count = block.block.body.transactions.len() as u64; - let block_indices = StoredBlockBodyIndices { first_tx_num: next_tx_num, tx_count }; - - // insert block meta - block_indices_cursor.append(block_number, &block_indices)?; - - // write transaction block index - if tx_count != 0 { - tx_block_cursor.append(block_indices.last_tx_num(), &block_number)?; - } - - // Write transactions - for transaction in block.block.body.transactions() { - tx_writer.append_transaction(next_tx_num, transaction)?; - - // Increment transaction id for each transaction - next_tx_num += 1; - } - - debug!( - target: "signet_db_lifecycle", - ?block_number, - "Inserted block body" - ); - - // NB: Here we'd usually write ommers and withdrawals, via - // `write_block_bodies` (which does not write txns, as you might - // expect). Signet doesn't have ommers or withdrawals. Therefore we're - // able to just return. - Ok(()) - } - - fn get_signet_headers( - &self, - range: RangeInclusive, - ) -> ProviderResult> { - // Implementation largely copied from - // `DatabaseProvider::get_or_take` - // in `reth/crates/storage/provider/src/providers/database/provider.rs` - // which after 1.1.3, has been removed and its functionality inlined. - // - // We have to customize the impl to unwrap the DbZenithHeader - let mut items = Vec::new(); - - trace!(target: "signet_db_lifecycle", "getting zenith headers"); - let mut cursor = self.tx_ref().cursor_read::()?; - let mut walker = cursor.walk_range(range)?; - - while let Some((k, DbZenithHeader(e))) = walker.next().transpose()? { - items.push((k, e)) - } - - Ok(items) - } - - /// Take zenith headers from the DB. - fn take_signet_headers_above( - &self, - target: BlockNumber, - ) -> ProviderResult> { - // Implementation largely copied from - // `DatabaseProvider::get_or_take` - // in `reth/crates/storage/provider/src/providers/database/provider.rs` - // which after 1.1.3, has been removed and the functionality inlined. - - // We have to customize the impl to unwrap the DB enters - let mut items = Vec::new(); - trace!(target: "signet_db_lifecycle", "taking zenith headers"); - let mut cursor_write = self.tx_ref().cursor_write::()?; - let mut walker = cursor_write.walk_range(target + 1..)?; - while let Some((k, DbZenithHeader(e))) = walker.next().transpose()? { - walker.delete_current()?; - items.push((k, e)) - } - - Ok(items) - } - - /// Remove [`Zenith::BlockHeader`] objects above the specified height from the DB. - fn remove_signet_headers_above(&self, target: BlockNumber) -> ProviderResult<()> { - self.remove::(target + 1..)?; - Ok(()) - } - - /// Get [`Passage::EnterToken`], [`Passage::Enter`] and - /// [`Transactor::Transact`] events. - /// - /// [`Transactor::Transact`]: signet_zenith::Transactor::Transact - fn get_signet_events( - &self, - range: RangeInclusive, - ) -> ProviderResult> { - let mut cursor = self.tx_ref().cursor_read::()?; - let walker = cursor.walk_range(range)?; - walker.collect::, _>>().map_err(Into::into) - } - - /// Take [`Passage::EnterToken`]s from the DB. - fn take_signet_events_above( - &self, - target: BlockNumber, - ) -> ProviderResult> { - let range = target + 1..=self.last_block_number()?; - let items = self.get_signet_events(range)?; - self.remove_signet_events_above(target)?; - Ok(items) - } - - /// Remove [`Passage::EnterToken`], [`Passage::Enter`] and - /// [`Transactor::Transact`] events above the specified height from the DB. - /// - /// [`Transactor::Transact`]: signet_zenith::Transactor::Transact - fn remove_signet_events_above(&self, target: BlockNumber) -> ProviderResult<()> { - self.remove::(target + 1..)?; - Ok(()) - } - - /// Appends the signet-related contents of a host block to the DB: - /// (RU block, state, enters, enter tokens, transactions) - /// The contents MUST be appended in the following order: - /// - The Signet Block (through `RuWriter::insert_signet_block`) - /// - The state modified by the block (through `RuWriter::ru_write_state`) - /// - The enters, if any (through `RuWriter::insert_enter`) - /// - The enter tokens, if any (through `RuWriter::insert_enter_token`) - /// - The force-included transactions, if any (through `RuWriter::insert_transact`) - /// - /// Several DB tables are affected throughout this process. For a detailed breakdown, - /// see the documentation for each function. - fn append_host_block( - &self, - header: Option, - transacts: impl IntoIterator, - enters: impl IntoIterator, - enter_tokens: impl IntoIterator, - block_result: &BlockResult, - journal_hash: B256, - ) -> ProviderResult<()> { - // Implementation largely copied from - // `BlockWriter::append_blocks_with_state` - // in `reth/crates/storage/provider/src/providers/database/provider.rs` - // duration metrics have been removed - // - // last reviewed at tag v1.9.0 - - let BlockResult { sealed_block: block, execution_outcome, .. } = block_result; - - let ru_height = block.number(); - self.insert_signet_block(header, block, journal_hash)?; - - // Write the state and match the storage location that Reth uses. - self.ru_write_state(execution_outcome, OriginalValuesKnown::No)?; - - // NB: At this point, reth writes hashed state and trie updates. Signet - // skips this. We re-use these tables to write the enters, enter tokens, - // and transact events. - - let mut index: u64 = 0; - for enter in enters.into_iter() { - self.insert_enter(ru_height, index, enter)?; - debug!(ru_height, index, "inserted enter"); - index += 1; - } - - for enter_token in enter_tokens.into_iter() { - self.insert_enter_token(ru_height, index, enter_token)?; - debug!(ru_height, index, "inserted enter token"); - index += 1; - } - - for transact in transacts.into_iter() { - self.insert_transact(ru_height, index, &transact)?; - debug!(ru_height, index, "inserted transact"); - index += 1; - } - - self.update_history_indices(ru_height..=ru_height)?; - - self.update_pipeline_stages(ru_height, false)?; - - debug!(target: "signet_db_lifecycle", ru_height, "Appended blocks"); - - Ok(()) - } - - #[instrument(skip(self))] - fn ru_take_blocks_and_execution_above(&self, target: BlockNumber) -> ProviderResult { - // Implementation largely copied from - // `BlockExecutionWriter::take_block_and_execution_above` - // in `reth/crates/storage/provider/src/providers/database/provider.rs` - // - // last reviewed at tag v1.9.0 - - let range = target + 1..=self.last_block_number()?; - - // This block is copied from `unwind_trie_state_range` - // - // last reviewed at tag v1.9.0 - { - let changed_accounts = self - .tx_ref() - .cursor_read::()? - .walk_range(range.clone())? - .collect::, _>>()?; - // There's no need to also unwind account hashes, since that is - // only useful for filling intermediate tables that deal with state - // root calculation, which we don't use. - self.unwind_account_history_indices(changed_accounts.iter())?; - - let storage_start = BlockNumberAddress((target, Address::ZERO)); - - // Unwind storage history indices. Similarly, we don't need to - // unwind storage hashes, since we don't use them. - let changed_storages = self - .tx_ref() - .cursor_read::()? - .walk_range(storage_start..)? - .collect::, _>>()?; - - self.unwind_storage_history_indices(changed_storages.iter().copied())?; - - // We also skip calculating the reverted root here. - } - - trace!("trie state unwound"); - - let execution_state = self.take_state_above(target)?; - - trace!("state taken"); - - // get blocks - let blocks = self.recovered_block_range(range.clone())?; - - trace!(count = blocks.len(), "blocks loaded"); - - // remove block bodies it is needed for both get block range and get block execution results - // that is why it is deleted afterwards. - self.remove_blocks_above(target)?; - - trace!("blocks removed"); - - // SIGNET-SPECIFIC - // This is a Signet-specific addition that removes the enters, - // entertokens, zenith headers, and transact events. - let ru_info = self.take_extraction_results_above(target)?.into_iter().collect(); - - trace!("extraction results taken"); - - // Update pipeline stages - self.update_pipeline_stages(target, true)?; - - let chain = Chain::new(blocks, execution_state, Default::default()); - - debug!("Succesfully reverted blocks and updated pipeline stages"); - - Ok(RuChain { inner: chain, ru_info }) - } - - #[instrument(skip(self))] - fn ru_remove_blocks_and_execution_above(&self, target: BlockNumber) -> ProviderResult<()> { - // Implementation largely copied from - // `BlockExecutionWriter::remove_block_and_execution_above` - // in `reth/crates/storage/provider/src/providers/database/provider.rs` - // duration metrics have been removed - // - // last reviewed at tag v1.9.0 - - let range = target + 1..=self.last_block_number()?; - - // This block is copied from `unwind_trie_state_range` - // - // last reviewed at tag v1.9.0 - { - let changed_accounts = self - .tx_ref() - .cursor_read::()? - .walk_range(range.clone())? - .collect::, _>>()?; - // There's no need to also unwind account hashes, since that is - // only useful for filling intermediate tables that deal with state - // root calculation, which we don't use. - self.unwind_account_history_indices(changed_accounts.iter())?; - - let storage_start = BlockNumberAddress((target, Address::ZERO)); - - // Unwind storage history indices. Similarly, we don't need to - // unwind storage hashes, since we don't use them. - let changed_storages = self - .tx_ref() - .cursor_read::()? - .walk_range(storage_start..)? - .collect::, _>>()?; - - self.unwind_storage_history_indices(changed_storages.iter().copied())?; - - // We also skip calculating the reverted root here. - } - - self.remove_state_above(target)?; - self.remove_blocks_above(target)?; - - // Signet specific: - self.remove_extraction_results_above(target)?; - - // Update pipeline stages - self.update_pipeline_stages(target, true)?; - - Ok(()) - } - - fn ru_write_state( - &self, - execution_outcome: &signet_evm::ExecutionOutcome, - is_value_known: OriginalValuesKnown, - ) -> ProviderResult<()> { - // Implementation largely copied from - // `StateWriter::write_state` for `DatabaseProvider` - // in `reth/crates/storage/provider/src/providers/database/provider.rs` - // - // Last reviewed at tag v1.9.0 - let first_block = execution_outcome.first_block(); - let block_count = execution_outcome.len() as u64; - let last_block = execution_outcome.last_block(); - let block_range = first_block..=last_block; - - let tip = self.last_block_number()?.max(last_block); - - let (plain_state, reverts) = - execution_outcome.bundle().to_plain_state_and_reverts(is_value_known); - - self.write_state_reverts(reverts, first_block, StateWriteConfig::default())?; - self.write_state_changes(plain_state)?; - - // Fetch the first transaction number for each block in the range - let block_indices: Vec<_> = self - .block_body_indices_range(block_range)? - .into_iter() - .map(|b| b.first_tx_num) - .collect(); - - // Ensure all expected blocks are present. - if block_indices.len() < block_count as usize { - let missing_blocks = block_count - block_indices.len() as u64; - return Err(ProviderError::BlockBodyIndicesNotFound( - last_block.saturating_sub(missing_blocks - 1), - )); - } - - let has_receipts_pruning = self.prune_modes_ref().has_receipts_pruning(); - - // Prepare receipts cursor if we are going to write receipts to the database - // - // We are writing to database if requested or if there's any kind of receipt pruning - // configured - let mut receipts_cursor = - self.tx_ref().cursor_write::>()?; - - // SIGNET: This is a departure from Reth's implementation. Becuase their - // impl is on `DatabaseProvider`, it has access to the static file - // provider which is its own prop, and has access to its private field. - // We are implementing this on `DatabaseProviderRW`, and are not able - // to borrow from the inner, only to clone it. So we break up the - // static file provider into a separate variable, and then use it to - // create the static file writer. - let sfp = self.0.static_file_provider(); - - let mut receipts_static_writer = has_receipts_pruning - .not() - .then(|| sfp.get_writer(first_block, StaticFileSegment::Receipts)) - .transpose()?; - - // All receipts from the last 128 blocks are required for blockchain tree, even with - // [`PruneSegment::ContractLogs`]. - let prunable_receipts = - PruneMode::Distance(MINIMUM_PRUNING_DISTANCE).should_prune(first_block, tip); - - for (idx, (receipts, first_tx_index)) in - execution_outcome.receipts().iter().zip(block_indices).enumerate() - { - let block_number = first_block + idx as u64; - - // Increment block number for receipts static file writer - if let Some(writer) = receipts_static_writer.as_mut() { - writer.increment_block(block_number)?; - } - - // Skip writing receipts if pruning configuration requires us to. - if prunable_receipts - && self - .prune_modes_ref() - .receipts - .is_some_and(|mode| mode.should_prune(block_number, tip)) - { - continue; - } - - for (idx, receipt) in receipts.iter().map(DataCompat::clone_convert).enumerate() { - let receipt_idx = first_tx_index + idx as u64; - - if let Some(writer) = &mut receipts_static_writer { - writer.append_receipt(receipt_idx, &receipt)?; - } else { - receipts_cursor.append(receipt_idx, &receipt)?; - } - } - } - - Ok(()) - } -} - -// Some code in this file has been copied and modified from reth -// -// The original license is included below: -// -// The MIT License (MIT) -// -// Copyright (c) 2022-2024 Reth Contributors -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -//. -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. diff --git a/crates/db/src/tables.rs b/crates/db/src/tables.rs deleted file mode 100644 index 3f5359d..0000000 --- a/crates/db/src/tables.rs +++ /dev/null @@ -1,424 +0,0 @@ -use alloy::{ - primitives::{Address, B256, BlockNumber, Bytes, U256, bytes::BufMut}, - rlp::Buf, -}; -use reth_db::{ - DatabaseError, - table::{Compress, Decompress, DupSort, Table}, - tables, -}; -use signet_zenith::{ - Passage::{Enter, EnterToken}, - Transactor::Transact, - Zenith::{self, BlockHeader}, -}; - -const FLAG_TRANSACT: u8 = 0; -const FLAG_ENTER: u8 = 1; -const FLAG_ENTER_TOKEN: u8 = 2; - -/// Table that maps heights numbers to zenith headers. We reuse a table from -/// reth's existing schema for this. -#[derive(Debug, Clone, Copy)] -pub struct ZenithHeaders { - _private: (), -} - -impl Table for ZenithHeaders { - const NAME: &'static str = ::NAME; - - const DUPSORT: bool = ::DUPSORT; - - type Key = u64; - - type Value = DbZenithHeader; -} - -/// Newtype for [`BlockHeader`] that implements [`Compress`] and [`Decompress`]. -/// -/// This is an implementation detail of the [`ZenithHeaders`] table, and should -/// not be used outside the DB module. -#[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize)] -pub struct DbZenithHeader(pub BlockHeader); - -impl From for DbZenithHeader { - fn from(header: BlockHeader) -> Self { - Self(header) - } -} - -impl From for BlockHeader { - fn from(header: DbZenithHeader) -> Self { - header.0 - } -} - -impl Compress for DbZenithHeader { - type Compressed = as Compress>::Compressed; - - fn compress_to_buf>(&self, buf: &mut B) { - let DbZenithHeader(Zenith::BlockHeader { - rollupChainId, - hostBlockNumber, - gasLimit, - rewardAddress, - blockDataHash, - }) = self; - buf.put_slice(&rollupChainId.to_le_bytes::<32>()); - buf.put_slice(&hostBlockNumber.to::().to_le_bytes()); - buf.put_slice(&gasLimit.to::().to_le_bytes()); - buf.put_slice(rewardAddress.as_ref()); - buf.put_slice(blockDataHash.as_ref()); - } -} - -impl Decompress for DbZenithHeader { - fn decompress(value: &[u8]) -> Result { - if value.len() < 32 + 8 + 8 + 20 + 32 { - tracing::error!(target: "signet", "decoding error"); - return Err(DatabaseError::Decode); - } - - Ok(Self(Zenith::BlockHeader { - rollupChainId: U256::from_le_slice(&value[0..32]), - hostBlockNumber: U256::from(u64::from_le_bytes(value[32..40].try_into().unwrap())), - gasLimit: U256::from(u64::from_le_bytes(value[40..48].try_into().unwrap())), - rewardAddress: Address::from_slice(&value[48..68]), - blockDataHash: B256::from_slice(&value[68..100]), - })) - } -} - -/// Newtype for extracted Signet events that implements [`Compress`] and -/// [`Decompress`]. -/// -/// This is an implementation detail of the [`SignetEvents`] table, and should -/// not be used outside the DB module. -/// -/// Each event is stored as a separate entry in the same table. -/// The first element of each event tuple is the event's order within all -/// events in the block. -/// -/// The second element is the event itself. -/// -/// We reuse a table from reth's existing schema for this table. -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, PartialEq)] -pub enum DbSignetEvent { - /// Each Transact event is stored as a separate entry in the same table. - Transact(u64, Transact), - /// Each Enter event is stored as a separate entry in the same table. - Enter(u64, Enter), - /// Each EnterToken event is stored as a separate entry in the same table. - EnterToken(u64, EnterToken), -} - -/// Table that maps block number and index number to signet events. We reuse a -/// table from reth's existing schema for this. The key is the rollup block -/// number, and the subkey is the index of the event within the block. -#[derive(Debug, Clone, Copy)] -pub struct SignetEvents { - _private: (), -} - -impl Table for SignetEvents { - const NAME: &'static str = ::NAME; - - const DUPSORT: bool = ::DUPSORT; - - type Key = BlockNumber; - - type Value = DbSignetEvent; -} - -impl DupSort for SignetEvents { - type SubKey = u64; -} - -/// Newtype for [`Transactor::Transact`] that implements [`Compress`] and -/// [`Decompress`]. -/// -/// This is an implementation detail of the [`SignetEvents`] table, and -/// should not be used outside the DB module. -/// -/// The two fields are the index of the transact within the set of all -/// transacts within the block, and the transact itself. The index is used as a -/// subkey in the table. I.e. if this is the first transact in the block, the -/// index would be 0. -/// -/// [`Transactor::Transact`]: signet_zenith::Transactor::Transact -#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] -pub struct DbTransact(pub u64, pub Transact); - -impl From for Transact { - fn from(transact: DbTransact) -> Self { - transact.1 - } -} - -// TODO: ENG-484 - Consider using CompactU256 -// https://linear.app/initiates/issue/ENG-484/consider-using-compactu256-in-compress-impls -impl Compress for DbTransact { - type Compressed = as Compress>::Compressed; - - fn compress_to_buf>(&self, buf: &mut B) { - let Transact { rollupChainId, sender, to, data, value, gas, maxFeePerGas } = &self.1; - - buf.put_slice(&self.0.to_be_bytes()); - buf.put_slice(&rollupChainId.to_le_bytes::<32>()); - buf.put_slice(sender.as_ref()); - buf.put_slice(to.as_ref()); - buf.put_slice(&value.to_le_bytes::<32>()); - buf.put_slice(&gas.to_le_bytes::<32>()); - buf.put_slice(&maxFeePerGas.to_le_bytes::<32>()); - // variable element last - buf.put_slice(data.as_ref()); - } -} - -impl Decompress for DbTransact { - fn decompress(value: &[u8]) -> Result { - if value.len() < 176 { - return Err(DatabaseError::Decode); - } - - let data = if value.len() >= 176 { - Bytes::copy_from_slice(&value[176..]) - } else { - Default::default() - }; - - Ok(Self( - u64::from_be_bytes(value[0..8].try_into().unwrap()), - Transact { - rollupChainId: U256::from_le_slice(&value[8..40]), - sender: Address::from_slice(&value[40..60]), - to: Address::from_slice(&value[60..80]), - data, - value: U256::from_le_slice(&value[80..112]), - gas: U256::from_le_slice(&value[112..144]), - maxFeePerGas: U256::from_le_slice(&value[144..176]), - }, - )) - } -} - -/// Newtype for [`Passage::Enter`] that implements [`Compress`] and -/// [`Decompress`]. -/// -/// This is an implementation detail of the [`SignetEvents`] table, and should -/// not be used outside the DB module. -/// -/// The two fields are the index of the enter within the set of all -/// transacts within the block, and the enter itself. The index is used as a -/// subkey in the table. I.e. if this is the first enter in the block, the -/// index would be 0. -/// -/// [`Passage::Enter`]: signet_zenith::Passage::Enter -#[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize)] -pub struct DbEnter(pub u64, pub Enter); - -impl From for Enter { - fn from(enter: DbEnter) -> Self { - enter.1 - } -} - -// TODO: ENG-484 - Consider using CompactU256 -impl Compress for DbEnter { - type Compressed = as Compress>::Compressed; - - fn compress_to_buf>(&self, buf: &mut B) { - // BE here is important for the subkey - buf.put_slice(&self.0.to_be_bytes()); - buf.put_slice(&self.1.rollupChainId.to_le_bytes::<32>()); - buf.put_slice(self.1.rollupRecipient.as_ref()); - buf.put_slice(&self.1.amount.to_le_bytes::<32>()); - } -} - -impl Decompress for DbEnter { - fn decompress(value: &[u8]) -> Result { - if value.len() < 8 + 32 + 20 + 32 { - return Err(DatabaseError::Decode); - } - - Ok(Self( - u64::from_be_bytes(value[0..8].try_into().unwrap()), - Enter { - rollupChainId: U256::from_le_slice(&value[8..40]), - rollupRecipient: Address::from_slice(&value[40..60]), - amount: U256::from_le_slice(&value[60..92]), - }, - )) - } -} - -/// Newtype for [`Passage::EnterToken`] that implements [`Compress`] and -/// [`Decompress`]. -/// -/// This is an implementation detail of the [`SignetEvents`] table, and should -/// not be used outside the DB module. -/// -/// [`Passage::EnterToken`]: signet_zenith::Passage::EnterToken -#[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize)] -pub struct DbEnterToken(pub u64, pub EnterToken); - -impl From for EnterToken { - fn from(enter_token: DbEnterToken) -> Self { - enter_token.1 - } -} - -impl Compress for DbEnterToken { - type Compressed = as Compress>::Compressed; - - fn compress_to_buf>(&self, buf: &mut B) { - buf.put_slice(&self.0.to_be_bytes()); // 8 bytes // BE here is important for the subkey - buf.put_slice(&self.1.rollupChainId.to_le_bytes::<32>()); // 32 bytes - buf.put_slice(self.1.rollupRecipient.as_slice()); // 20 bytes - buf.put_slice(&self.1.amount.to_le_bytes::<32>()); // 32 bytes - buf.put_slice(self.1.token.as_slice()); // 20 bytes - } -} - -impl Decompress for DbEnterToken { - fn decompress(value: &[u8]) -> Result { - if value.len() < 8 + 32 + 20 + 32 + 20 { - return Err(DatabaseError::Decode); - } - - Ok(Self( - u64::from_be_bytes(value[0..8].try_into().unwrap()), - EnterToken { - rollupChainId: U256::from_le_slice(&value[8..40]), - rollupRecipient: Address::from_slice(&value[40..60]), - amount: U256::from_le_slice(&value[60..92]), - token: Address::from_slice(&value[92..112]), - }, - )) - } -} - -impl Compress for DbSignetEvent { - type Compressed = as Compress>::Compressed; - - fn compress_to_buf>(&self, buf: &mut B) { - match self { - Self::Transact(idx, transact) => { - buf.put_u8(FLAG_TRANSACT); - DbTransact(*idx, transact.clone()).compress_to_buf(buf); - } - Self::Enter(idx, enter) => { - buf.put_u8(FLAG_ENTER); - DbEnter(*idx, *enter).compress_to_buf(buf); - } - Self::EnterToken(idx, enter_token) => { - buf.put_u8(FLAG_ENTER_TOKEN); - DbEnterToken(*idx, *enter_token).compress_to_buf(buf); - } - } - } -} - -impl Decompress for DbSignetEvent { - fn decompress(value: &[u8]) -> Result { - let value = &mut &*value; - - if value.is_empty() { - return Err(DatabaseError::Decode); - } - - match value.get_u8() { - FLAG_TRANSACT => { - let transact = DbTransact::decompress(value)?; - Ok(Self::Transact(transact.0, transact.1)) - } - FLAG_ENTER => { - let enter = DbEnter::decompress(value)?; - Ok(Self::Enter(enter.0, enter.1)) - } - FLAG_ENTER_TOKEN => { - let enter_token = DbEnterToken::decompress(value)?; - Ok(Self::EnterToken(enter_token.0, enter_token.1)) - } - _ => Err(DatabaseError::Decode), - } - } -} - -/// Table that maps rollup block heights to post-block journal hashes. -#[derive(Debug, Clone, Copy)] -pub struct JournalHashes { - _private: (), -} - -impl Table for JournalHashes { - const NAME: &'static str = ::NAME; - - const DUPSORT: bool = ::DUPSORT; - - type Key = u64; - - type Value = B256; -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn db_event_transact_roundtrip() { - let event = DbSignetEvent::Transact( - 32, - Transact { - rollupChainId: U256::from(1), - sender: Address::repeat_byte(1), - to: Address::repeat_byte(2), - data: Bytes::from(vec![1, 2, 3]), - value: U256::from(100), - gas: U256::from(200), - maxFeePerGas: U256::from(300), - }, - ); - - let buf = event.clone().compress(); - - let decompressed = DbSignetEvent::decompress(buf.as_slice()).unwrap(); - assert_eq!(event, decompressed); - } - - #[test] - fn db_event_enter_roundtrip() { - let event = DbSignetEvent::Enter( - 32, - Enter { - rollupChainId: U256::from(1), - rollupRecipient: Address::repeat_byte(1), - amount: U256::from(100), - }, - ); - - let buf = event.clone().compress(); - - let decompressed = DbSignetEvent::decompress(buf.as_slice()).unwrap(); - assert_eq!(event, decompressed); - } - - #[test] - fn db_event_enter_token_roundtrip() { - let event = DbSignetEvent::EnterToken( - 32, - EnterToken { - rollupChainId: U256::from(1), - rollupRecipient: Address::repeat_byte(1), - amount: U256::from(100), - token: Address::repeat_byte(2), - }, - ); - - let buf = event.clone().compress(); - - let decompressed = DbSignetEvent::decompress(buf.as_slice()).unwrap(); - assert_eq!(event, decompressed); - } -} diff --git a/crates/db/src/traits.rs b/crates/db/src/traits.rs deleted file mode 100644 index 53499d6..0000000 --- a/crates/db/src/traits.rs +++ /dev/null @@ -1,286 +0,0 @@ -use crate::{DbExtractionResults, DbSignetEvent, RuChain, SignetDbRw}; -use alloy::primitives::{B256, BlockNumber}; -use itertools::Itertools; -#[cfg(doc)] -use reth::providers::DatabaseProviderRW; -use reth::providers::{OriginalValuesKnown, ProviderResult}; -use reth_db::models::StoredBlockBodyIndices; -use signet_evm::BlockResult; -use signet_node_types::NodeTypesDbTrait; -use signet_types::primitives::RecoveredBlock; -use signet_zenith::{Passage, Transactor, Zenith}; -use std::{collections::BTreeMap, ops::RangeInclusive}; -use tracing::trace; - -/// Writer for [`Passage::Enter`] events. -#[auto_impl::auto_impl(&, Arc, Box)] -pub trait RuWriter { - /// Get the last block number - fn last_block_number(&self) -> ProviderResult; - - /// Insert a journal hash into the DB. - fn insert_journal_hash(&self, rollup_height: u64, hash: B256) -> ProviderResult<()>; - - /// Remove a journal hash from the DB. - fn remove_journal_hash(&self, rollup_height: u64) -> ProviderResult<()>; - - /// Get a journal hash from the DB. - fn get_journal_hash(&self, rollup_height: u64) -> ProviderResult>; - - /// Get the latest journal hash from the DB. - fn latest_journal_hash(&self) -> ProviderResult; - - /// Store a zenith header in the DB - fn insert_signet_header( - &self, - header: Zenith::BlockHeader, - host_height: u64, - ) -> ProviderResult<()>; - - /// Get a Zenith header from the DB. - fn get_signet_header(&self, host_height: u64) -> ProviderResult>; - - /// Store a zenith block in the DB. - fn insert_signet_block( - &self, - header: Option, - block: &RecoveredBlock, - journal_hash: B256, - ) -> ProviderResult; - - /// Append a zenith block body to the DB. - fn append_signet_block_body(&self, body: (BlockNumber, &RecoveredBlock)) -> ProviderResult<()>; - - /// Get zenith headers from the DB. - fn get_signet_headers( - &self, - range: RangeInclusive, - ) -> ProviderResult>; - - /// Take zenith headers from the DB. - fn take_signet_headers_above( - &self, - target: BlockNumber, - ) -> ProviderResult>; - - /// Remove [`Zenith::BlockHeader`] objects above the specified height from the DB. - fn remove_signet_headers_above(&self, target: BlockNumber) -> ProviderResult<()>; - - /// Store an enter event in the DB. - fn insert_enter(&self, height: u64, index: u64, exit: Passage::Enter) -> ProviderResult<()>; - - /// Get enters from the DB. - fn get_enters( - &self, - range: RangeInclusive, - ) -> ProviderResult> { - Ok(self - .get_signet_events(range)? - .into_iter() - .filter_map(|(height, events)| { - if let DbSignetEvent::Enter(_, enter) = events { - Some((height, enter)) - } else { - None - } - }) - .collect()) - } - - /// Store a transaction event in the DB. - fn insert_transact( - &self, - height: u64, - index: u64, - transact: &Transactor::Transact, - ) -> ProviderResult<()>; - - /// Get [`Transactor::Transact`] from the DB. - fn get_transacts( - &self, - range: RangeInclusive, - ) -> ProviderResult> { - Ok(self - .get_signet_events(range)? - .into_iter() - .filter_map(|(height, events)| { - if let DbSignetEvent::Transact(_, transact) = events { - Some((height, transact)) - } else { - None - } - }) - .collect()) - } - - /// Insert [`Passage::EnterToken`] into the DB. - fn insert_enter_token( - &self, - height: u64, - index: u64, - enter_token: Passage::EnterToken, - ) -> ProviderResult<()>; - - /// Get [`Passage::EnterToken`] from the DB. - fn get_enter_tokens( - &self, - range: RangeInclusive, - ) -> ProviderResult> { - Ok(self - .get_signet_events(range)? - .into_iter() - .filter_map(|(height, events)| { - if let DbSignetEvent::EnterToken(_, enter) = events { - Some((height, enter)) - } else { - None - } - }) - .collect()) - } - - /// Get [`Passage::EnterToken`], [`Passage::Enter`] and - /// [`Transactor::Transact`] events. - fn get_signet_events( - &self, - range: RangeInclusive, - ) -> ProviderResult>; - - /// Take [`Passage::EnterToken`]s from the DB. - fn take_signet_events_above( - &self, - target: BlockNumber, - ) -> ProviderResult>; - - /// Remove [`Passage::EnterToken`], [`Passage::Enter`] and - /// [`Transactor::Transact`] events above the specified height from the DB. - fn remove_signet_events_above(&self, target: BlockNumber) -> ProviderResult<()>; - - /// Get extraction results from the DB. - fn get_extraction_results( - &self, - range: RangeInclusive, - ) -> ProviderResult> { - let mut signet_events = self.get_signet_events(range.clone())?.into_iter().peekable(); - let mut headers = self.get_signet_headers(range.clone())?.into_iter().peekable(); - - // For each of these, it is permissible to have no entries. If there is - // no data. The`DbExtractionResults` struct will contain a `None` - // header, or an empty vector for the other fields. - let mut items = BTreeMap::new(); - for working_height in range.clone() { - let mut enters = vec![]; - let mut transacts = vec![]; - let mut enter_tokens = vec![]; - - for (_, event) in - signet_events.peeking_take_while(|(height, _)| *height == working_height) - { - match event { - DbSignetEvent::Enter(_, enter) => enters.push(enter), - DbSignetEvent::Transact(_, transact) => transacts.push(transact), - DbSignetEvent::EnterToken(_, enter_token) => enter_tokens.push(enter_token), - } - } - - let header = headers - .peeking_take_while(|(height, _)| *height == working_height) - .map(|(_, header)| header) - .next(); - - items.insert( - working_height, - DbExtractionResults { header, enters, transacts, enter_tokens }, - ); - } - - Ok(items) - } - - /// Take extraction results from the DB. - fn take_extraction_results_above( - &self, - target: BlockNumber, - ) -> ProviderResult> { - let range = target..=(1 + self.last_block_number()?); - - let items = self.get_extraction_results(range)?; - trace!(count = items.len(), "got extraction results"); - self.remove_extraction_results_above(target)?; - trace!("removed extraction results"); - Ok(items) - } - - /// Remove extraction results from the DB. - /// - /// This will remove the following: - /// - [`Zenith::BlockHeader`] objects - /// - [`Passage::Enter`] events - /// - [`Transactor::Transact`] events - /// - [`Passage::EnterToken`] events - fn remove_extraction_results_above(&self, target: BlockNumber) -> ProviderResult<()> { - self.remove_signet_headers_above(target)?; - self.remove_signet_events_above(target)?; - Ok(()) - } - - /// Add the output of a host block to the DB. - #[allow(clippy::too_many_arguments)] - fn append_host_block( - &self, - header: Option, - transacts: impl IntoIterator, - enters: impl IntoIterator, - enter_tokens: impl IntoIterator, - block_result: &BlockResult, - journal_hash: B256, - ) -> ProviderResult<()>; - - /// Take the block and execution range from the DB, reverting the blocks - /// and returning the removed information - fn ru_take_blocks_and_execution_above(&self, target: BlockNumber) -> ProviderResult; - - /// Remove the block and execution range from the DB. - fn ru_remove_blocks_and_execution_above(&self, target: BlockNumber) -> ProviderResult<()>; - - /// Write the state of the rollup to the database. - /// - /// This should be identical to [`StateWriter::write_state`], but using a - /// [`signet_evm::ExecutionOutcome`]. - /// - /// [`StateWriter::write_state`]: reth::providers::StateWriter::write_state - fn ru_write_state( - &self, - execution_outcome: &signet_evm::ExecutionOutcome, - is_value_known: OriginalValuesKnown, - ) -> ProviderResult<()>; -} - -/// Extend the [`DatabaseProviderRW`] with a guarded commit function. -pub trait DbProviderExt: Into> -where - Db: NodeTypesDbTrait, -{ - /// Update the database. The function `f` is called with a mutable - /// reference to the database. If the function returns an error, the - /// transaction is rolled back. - fn update( - self, - f: impl FnOnce(&mut SignetDbRw) -> ProviderResult<()>, - ) -> ProviderResult<()>; -} - -impl DbProviderExt for T -where - Db: NodeTypesDbTrait, - T: Into>, -{ - fn update( - self, - f: impl FnOnce(&mut SignetDbRw) -> ProviderResult<()>, - ) -> ProviderResult<()> { - let mut this = self.into(); - f(&mut this)?; - this.commit().map(drop) - } -} diff --git a/crates/db/tests/common/mod.rs b/crates/db/tests/common/mod.rs deleted file mode 100644 index d230e90..0000000 --- a/crates/db/tests/common/mod.rs +++ /dev/null @@ -1,39 +0,0 @@ -use alloy::genesis::Genesis; -use reth::{ - chainspec::ChainSpec, - providers::{ - ProviderFactory, - providers::{RocksDBProvider, StaticFileProvider}, - }, -}; -use reth_db::test_utils::{ - create_test_rocksdb_dir, create_test_rw_db, create_test_static_files_dir, -}; -use reth_exex_test_utils::TmpDB as TmpDb; -use signet_node_types::SignetNodeTypes; -use std::sync::{Arc, OnceLock}; - -static GENESIS_JSON: &str = include_str!("../../../../tests/artifacts/local.genesis.json"); - -static SPEC: OnceLock> = OnceLock::new(); - -/// Returns a chain spec for tests. -pub fn chain_spec() -> Arc { - SPEC.get_or_init(|| { - let genesis: Genesis = serde_json::from_str(GENESIS_JSON).expect("valid genesis json"); - Arc::new(genesis.into()) - }) - .clone() -} - -/// Create a provider factory with a chain spec -pub fn create_test_provider_factory() -> ProviderFactory> { - let db = create_test_rw_db(); - let (static_dir, _) = create_test_static_files_dir(); - let (rocksdb_dir, _) = create_test_rocksdb_dir(); - - let sfp = StaticFileProvider::read_write(static_dir.keep()).expect("static file provider"); - let rocks_db = RocksDBProvider::builder(rocksdb_dir.keep()).build().unwrap(); - - ProviderFactory::new(db, chain_spec(), sfp, rocks_db).expect("provider factory") -} diff --git a/crates/db/tests/db.rs b/crates/db/tests/db.rs deleted file mode 100644 index 6d4f769..0000000 --- a/crates/db/tests/db.rs +++ /dev/null @@ -1,171 +0,0 @@ -#[path = "./common/mod.rs"] -mod test_common; - -use alloy::{ - consensus::{BlockBody, BlockHeader, Signed, TxEip1559, TxEnvelope}, - primitives::{Address, B256, U256}, - signers::Signature, -}; -use reth::providers::{BlockNumReader, BlockReader}; -use signet_constants::test_utils::{DEPLOY_HEIGHT, RU_CHAIN_ID}; -use signet_db::RuWriter; -use signet_types::primitives::{RecoveredBlock, SealedBlock, SealedHeader, TransactionSigned}; -use signet_zenith::Zenith; - -#[test] -fn test_ru_writer() { - let factory = test_common::create_test_provider_factory(); - - let writer = factory.provider_rw().unwrap(); - - dbg!(writer.last_block_number().unwrap()); -} - -#[test] -fn test_insert_signet_block() { - let factory = test_common::create_test_provider_factory(); - let writer = factory.provider_rw().unwrap(); - - let journal_hash = B256::repeat_byte(0x55); - let header = Some(Zenith::BlockHeader { - rollupChainId: U256::from(RU_CHAIN_ID), - hostBlockNumber: U256::from(DEPLOY_HEIGHT), - gasLimit: U256::from(30_000_000), - rewardAddress: Address::repeat_byte(0x11), - blockDataHash: B256::repeat_byte(0x22), - }); - - let block = RecoveredBlock { - block: SealedBlock { - header: SealedHeader::new(alloy::consensus::Header::default()), - body: BlockBody { - transactions: std::iter::repeat_n( - TxEnvelope::Eip1559(Signed::new_unhashed( - TxEip1559::default(), - Signature::test_signature(), - )) - .into(), - 10, - ) - .collect(), - ommers: vec![], - withdrawals: None, - }, - }, - senders: std::iter::repeat_n(Address::repeat_byte(0x33), 10).collect(), - }; - - writer.insert_signet_block(header, &block, journal_hash).unwrap(); - writer.commit().unwrap(); - - let reader = factory.provider_rw().unwrap(); - - // Check basic updates - assert_eq!(reader.last_block_number().unwrap(), block.number()); - assert_eq!(reader.latest_journal_hash().unwrap(), journal_hash); - assert_eq!(reader.get_journal_hash(block.number()).unwrap(), Some(journal_hash)); - // This tests resolving `BlockId::Latest` - assert_eq!(reader.best_block_number().unwrap(), block.number()); - - // Check that the block can be loaded back - let loaded_block = reader - .recovered_block_range(block.number()..=block.number()) - .unwrap() - .first() - .cloned() - .unwrap(); - assert_eq!(loaded_block.header(), block.block.header.header()); - assert_eq!(loaded_block.body().transactions.len(), block.block.body.transactions.len()); - - // Check that the ZenithHeader can be loaded back - let loaded_header = reader.get_signet_header(block.number()).unwrap(); - assert_eq!(loaded_header, header); -} - -#[test] -fn test_transaction_hash_indexing() { - use reth::providers::TransactionsProvider; - use reth_db::{cursor::DbCursorRO, tables, transaction::DbTx}; - - let factory = test_common::create_test_provider_factory(); - let writer = factory.provider_rw().unwrap(); - - let journal_hash = B256::repeat_byte(0x55); - let header = Some(Zenith::BlockHeader { - rollupChainId: U256::from(RU_CHAIN_ID), - hostBlockNumber: U256::from(DEPLOY_HEIGHT), - gasLimit: U256::from(30_000_000), - rewardAddress: Address::repeat_byte(0x11), - blockDataHash: B256::repeat_byte(0x22), - }); - - // Create transactions with distinct content so they have different hashes - let transactions: Vec = (0..5u64) - .map(|i| { - let tx = TxEip1559 { nonce: i, ..Default::default() }; - TxEnvelope::Eip1559(Signed::new_unhashed(tx, Signature::test_signature())).into() - }) - .collect(); - - // Collect the expected hashes BEFORE inserting - let expected_hashes: Vec = - transactions.iter().map(|tx: &TransactionSigned| *tx.hash()).collect(); - - let block = RecoveredBlock { - block: SealedBlock { - header: SealedHeader::new(alloy::consensus::Header::default()), - body: BlockBody { transactions, ommers: vec![], withdrawals: None }, - }, - senders: std::iter::repeat_n(Address::repeat_byte(0x33), 5).collect(), - }; - - writer.insert_signet_block(header, &block, journal_hash).unwrap(); - writer.commit().unwrap(); - - let reader = factory.provider_rw().unwrap(); - - // Verify each transaction hash is in the index - for (idx, expected_hash) in expected_hashes.iter().enumerate() { - // Method 1: Use provider's transaction_by_hash - let tx_result = reader.transaction_by_hash(*expected_hash).unwrap(); - assert!( - tx_result.is_some(), - "transaction_by_hash failed for tx {} with hash {}", - idx, - expected_hash - ); - - // Method 2: Query TransactionHashNumbers directly - let mut cursor = reader.tx_ref().cursor_read::().unwrap(); - let index_result = cursor.seek_exact(*expected_hash).unwrap(); - assert!( - index_result.is_some(), - "TransactionHashNumbers entry missing for tx {} with hash {}", - idx, - expected_hash - ); - - let (hash, tx_num) = index_result.unwrap(); - assert_eq!(hash, *expected_hash, "Hash mismatch in index for tx {}", idx); - assert_eq!(tx_num, idx as u64, "Unexpected tx_num for tx {}", idx); - } - - // Verify hashes match when loading block back from storage - let loaded_block = reader - .recovered_block_range(block.number()..=block.number()) - .unwrap() - .first() - .cloned() - .unwrap(); - - for (idx, (original_hash, loaded_tx)) in - expected_hashes.iter().zip(loaded_block.body().transactions.iter()).enumerate() - { - let loaded_hash = *loaded_tx.hash(); - assert_eq!( - *original_hash, loaded_hash, - "Hash mismatch after load for tx {}: original={}, loaded={}", - idx, original_hash, loaded_hash - ); - } -} diff --git a/crates/genesis/Cargo.toml b/crates/genesis/Cargo.toml index c0d5c3c..bad0837 100644 --- a/crates/genesis/Cargo.toml +++ b/crates/genesis/Cargo.toml @@ -14,4 +14,5 @@ init4-bin-base.workspace = true serde = { workspace = true, features = ["derive"] } serde_json.workspace = true signet-constants.workspace = true +signet-evm.workspace = true thiserror.workspace = true diff --git a/crates/genesis/src/lib.rs b/crates/genesis/src/lib.rs index ac3973b..3d783a5 100644 --- a/crates/genesis/src/lib.rs +++ b/crates/genesis/src/lib.rs @@ -16,6 +16,7 @@ use init4_bin_base::utils::from_env::{ EnvItemInfo, FromEnv, FromEnvErr, FromEnvVar, parse_env_if_present, }; use signet_constants::KnownChains; +use signet_evm::EthereumHardfork; use std::{borrow::Cow, path::PathBuf, str::FromStr, sync::LazyLock}; /// Signet mainnet genesis file. @@ -78,6 +79,62 @@ pub static TEST_HOST_GENESIS: LazyLock = LazyLock::new(|| { serde_json::from_str(TEST_HOST_GENESIS_JSON).expect("Failed to parse test host genesis") }); +/// Derive the [`EthereumHardfork`] flags active at the genesis block. +/// +/// Inspects the chain config in `genesis` and returns the set of hardforks +/// whose activation block or timestamp is at or before the genesis +/// block/timestamp. +pub fn genesis_hardforks(genesis: &Genesis) -> EthereumHardfork { + let block = genesis.number.unwrap_or(0); + let timestamp = genesis.timestamp; + let c = &genesis.config; + + let at_block = |b: Option| b.is_some_and(|b| b <= block); + let at_time = |t: Option| t.is_some_and(|t| t <= timestamp); + + [ + (true, EthereumHardfork::Frontier), + (at_block(c.homestead_block), EthereumHardfork::Homestead), + (at_block(c.dao_fork_block), EthereumHardfork::Dao), + (at_block(c.eip150_block), EthereumHardfork::Tangerine), + (at_block(c.eip155_block), EthereumHardfork::SpuriousDragon), + (at_block(c.byzantium_block), EthereumHardfork::Byzantium), + (at_block(c.constantinople_block), EthereumHardfork::Constantinople), + (at_block(c.petersburg_block), EthereumHardfork::Petersburg), + (at_block(c.istanbul_block), EthereumHardfork::Istanbul), + (at_block(c.muir_glacier_block), EthereumHardfork::MuirGlacier), + (at_block(c.berlin_block), EthereumHardfork::Berlin), + (at_block(c.london_block), EthereumHardfork::London), + (at_block(c.arrow_glacier_block), EthereumHardfork::ArrowGlacier), + (at_block(c.gray_glacier_block), EthereumHardfork::GrayGlacier), + (at_block(c.merge_netsplit_block), EthereumHardfork::Paris), + (at_time(c.shanghai_time), EthereumHardfork::Shanghai), + (at_time(c.cancun_time), EthereumHardfork::Cancun), + (at_time(c.prague_time), EthereumHardfork::Prague), + (at_time(c.osaka_time), EthereumHardfork::Osaka), + ] + .into_iter() + .filter(|(active, _)| *active) + .map(|(_, fork)| fork) + .fold(EthereumHardfork::empty(), |acc, fork| acc | fork) +} + +/// Mainnet genesis hardforks. +pub static MAINNET_GENESIS_HARDFORKS: LazyLock = + LazyLock::new(|| genesis_hardforks(&MAINNET_GENESIS)); + +/// Parmigiana testnet genesis hardforks. +pub static PARMIGIANA_GENESIS_HARDFORKS: LazyLock = + LazyLock::new(|| genesis_hardforks(&PARMIGIANA_GENESIS)); + +/// Pecorino testnet genesis hardforks. +pub static PECORINO_GENESIS_HARDFORKS: LazyLock = + LazyLock::new(|| genesis_hardforks(&PECORINO_GENESIS)); + +/// Test genesis hardforks. +pub static TEST_GENESIS_HARDFORKS: LazyLock = + LazyLock::new(|| genesis_hardforks(&TEST_GENESIS)); + /// Environment variable for specifying the rollup genesis JSON file path. const ROLLUP_GENESIS_JSON_PATH: &str = "ROLLUP_GENESIS_JSON_PATH"; @@ -140,6 +197,21 @@ where } impl GenesisSpec { + /// Get the [`EthereumHardfork`] flags active at the rollup genesis block. + pub fn genesis_hardforks(&self) -> EthereumHardfork { + match self { + Self::Known(KnownChains::Mainnet) => *MAINNET_GENESIS_HARDFORKS, + Self::Known(KnownChains::Parmigiana) => *PARMIGIANA_GENESIS_HARDFORKS, + #[allow(deprecated)] + Self::Known(KnownChains::Pecorino) => *PECORINO_GENESIS_HARDFORKS, + Self::Known(KnownChains::Test) => *TEST_GENESIS_HARDFORKS, + Self::Custom { .. } => { + let network = self.load_genesis().expect("failed to load custom genesis"); + genesis_hardforks(&network.rollup) + } + } + } + /// Load the raw genesis JSON strings from the specified source. /// /// Returns both rollup and host genesis JSON strings. diff --git a/crates/node-config/Cargo.toml b/crates/node-config/Cargo.toml index 5774f5e..1db70db 100644 --- a/crates/node-config/Cargo.toml +++ b/crates/node-config/Cargo.toml @@ -18,8 +18,6 @@ reth.workspace = true reth-chainspec.workspace = true reth-exex.workspace = true reth-node-api.workspace = true -reth-db = { workspace = true, optional = true} - alloy.workspace = true eyre.workspace = true @@ -30,5 +28,7 @@ trevm.workspace = true signet-genesis.workspace = true signet-block-processor.workspace = true +tempfile = { workspace = true, optional = true } + [features] -test_utils = ["dep:reth-db", "reth-db/test-utils"] +test_utils = ["dep:tempfile"] diff --git a/crates/node-config/src/test_utils.rs b/crates/node-config/src/test_utils.rs index b5d6b67..b5ca8b7 100644 --- a/crates/node-config/src/test_utils.rs +++ b/crates/node-config/src/test_utils.rs @@ -1,14 +1,14 @@ use crate::SignetNodeConfig; use init4_bin_base::utils::calc::SlotCalculator; -use reth_db::test_utils::tempdir_path; use signet_blobber::BlobFetcherConfig; use signet_genesis::GenesisSpec; use signet_types::constants::KnownChains; use std::borrow::Cow; +use tempfile::tempdir; /// Make a test config pub fn test_config() -> SignetNodeConfig { - let mut tempdir = tempdir_path(); + let mut tempdir = tempdir().unwrap().keep(); tempdir.push("signet.ipc"); // Make a new test config with the IPC endpoint set to the tempdir. diff --git a/crates/node-tests/Cargo.toml b/crates/node-tests/Cargo.toml index 0577309..a6060f0 100644 --- a/crates/node-tests/Cargo.toml +++ b/crates/node-tests/Cargo.toml @@ -11,11 +11,14 @@ repository.workspace = true [dependencies] signet-node.workspace = true signet-node-config = { workspace = true, features = ["test_utils"] } -signet-node-types.workspace = true -signet-db.workspace = true +signet-cold = { workspace = true, features = ["in-memory"] } signet-constants.workspace = true signet-evm.workspace = true +signet-genesis.workspace = true +signet-hot = { workspace = true, features = ["in-memory"] } +signet-storage.workspace = true +signet-storage-types.workspace = true signet-test-utils.workspace = true signet-types.workspace = true signet-zenith.workspace = true @@ -23,18 +26,15 @@ signet-zenith.workspace = true alloy.workspace = true reth.workspace = true -reth-db.workspace = true reth-exex.workspace = true reth-exex-test-utils.workspace = true reth-node-api.workspace = true eyre.workspace = true +tokio.workspace = true tracing.workspace = true tracing-subscriber.workspace = true -tokio.workspace = true [dev-dependencies] -signet-genesis.workspace = true - serde_json.workspace = true serial_test = "3.2.0" diff --git a/crates/node-tests/src/context.rs b/crates/node-tests/src/context.rs index cded0bd..935fa47 100644 --- a/crates/node-tests/src/context.rs +++ b/crates/node-tests/src/context.rs @@ -2,31 +2,30 @@ use crate::{ HostBlockSpec, NotificationSpec, NotificationWithSidecars, RuBlockSpec, convert::ToRethPrimitive, types::{CtxProvider, Log, TestCounterInstance, TestErc20Instance, TestLogInstance}, - utils::create_test_provider_factory_with_chain_spec, }; use alloy::{ consensus::{BlockHeader, TxEnvelope, constants::ETH_TO_WEI}, genesis::{Genesis, GenesisAccount}, network::{Ethereum, EthereumWallet, TransactionBuilder as _}, - primitives::{Address, I256, Sign, U256, keccak256, map::HashSet}, + primitives::{Address, B256, I256, Sign, U256, keccak256, map::HashSet}, providers::{ Provider as _, ProviderBuilder, SendableTx, fillers::{BlobGasFiller, SimpleNonceManager}, }, rpc::types::eth::{TransactionReceipt, TransactionRequest}, }; -use reth::{ - primitives::Account, - providers::{AccountReader, BlockNumReader, ProviderFactory}, - transaction_pool::{TransactionOrigin, TransactionPool, test_utils::MockTransaction}, -}; -use reth_db::{PlainAccountState, transaction::DbTxMut}; -use reth_exex_test_utils::{Adapter, TestExExHandle, TmpDB as TmpDb}; +use reth::transaction_pool::{TransactionOrigin, TransactionPool, test_utils::MockTransaction}; +use reth_exex_test_utils::{Adapter, TestExExHandle}; use reth_node_api::FullNodeComponents; -use signet_db::DbProviderExt; -use signet_node::SignetNodeBuilder; +use signet_cold::{BlockData, ColdStorageReadHandle, mem::MemColdBackend}; +use signet_hot::{ + db::{HotDbRead, UnsafeDbWrite}, + mem::MemKv, +}; +use signet_node::{NodeStatus, SignetNodeBuilder}; use signet_node_config::test_utils::test_config; -use signet_node_types::{NodeStatus, SignetNodeTypes}; +use signet_storage::{CancellationToken, HistoryRead, HistoryWrite, HotKv, UnifiedStorage}; +use signet_storage_types::{Account, BlockNumberList, DbSignetEvent, RecoveredTx, SealedHeader}; use signet_test_utils::contracts::counter::COUNTER_DEPLOY_CODE; use signet_types::constants::{HostPermitted, RollupPermitted, SignetSystemConstants}; use signet_zenith::{HostOrders::OrdersInstance, RollupPassage::RollupPassageInstance}; @@ -46,7 +45,7 @@ use tracing::instrument; /// instance. /// - The components for the Signet Node instance /// - A receiver for the node status (latest block processed) -/// - A DB provider factory +/// - Unified storage backed by in-memory hot and cold storage /// - An alloy provider connected to the Signet Node RPC, /// - Configured with standard fillers /// - A height, used to fill in block numbers for host block notifications @@ -62,8 +61,8 @@ pub struct SignetTestContext { /// The Signet Node status receiver pub node_status: watch::Receiver, - /// The provider factory for the Signet Node instance - pub factory: ProviderFactory>, + /// Unified hot + cold storage for the rollup. + pub storage: Arc>, /// An alloy provider connected to the Signet Node RPC. pub alloy_provider: CtxProvider, @@ -80,6 +79,9 @@ pub struct SignetTestContext { /// Test addresses, copied from [`signet_test_utils::users::TEST_USERS`] for /// convenience pub addresses: [Address; 10], + + /// Cancellation token for the cold storage task. + cancel_token: CancellationToken, } impl core::fmt::Debug for SignetTestContext { @@ -88,6 +90,12 @@ impl core::fmt::Debug for SignetTestContext { } } +impl Drop for SignetTestContext { + fn drop(&mut self) { + self.cancel_token.cancel(); + } +} + impl SignetTestContext { /// Make a new test env #[instrument] @@ -96,17 +104,62 @@ impl SignetTestContext { let (ctx, handle) = reth_exex_test_utils::test_exex_context().await.unwrap(); let components = ctx.components.clone(); - // set up Signet Node db + // set up Signet Node storage let constants = cfg.constants().unwrap(); - let chain_spec: Arc<_> = cfg.chain_spec().clone(); - let factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + let cancel_token = CancellationToken::new(); + let hot = MemKv::new(); + + // Load genesis into hot storage + { + let hardforks = signet_genesis::genesis_hardforks(cfg.genesis()); + let writer = hot.writer().unwrap(); + writer.load_genesis(cfg.genesis(), &hardforks).unwrap(); + writer.commit().unwrap(); + } + + // set up some keys and addresses + let keys = &signet_test_utils::users::TEST_SIGNERS; + let addresses = *signet_test_utils::users::TEST_USERS; + + // Mint balances for test addresses + let mint_amnt = U256::from(1_000) * U256::from(ETH_TO_WEI); + { + // Read existing accounts before acquiring write lock + let existing_accounts: Vec<_> = { + let reader = hot.reader().unwrap(); + addresses + .iter() + .map(|addr| reader.get_account(addr).unwrap().unwrap_or_default()) + .collect() + }; + let writer = hot.writer().unwrap(); + for (address, existing) in addresses.iter().zip(existing_accounts) { + let updated = + Account { balance: existing.balance.saturating_add(mint_amnt), ..existing }; + writer.put_account(address, &updated).unwrap(); + } + writer.commit().unwrap(); + } + + // Create UnifiedStorage + let storage = + Arc::new(UnifiedStorage::spawn(hot, MemColdBackend::new(), cancel_token.clone())); + + // Write genesis block to cold storage so fee_history and other + // cold-backed RPC endpoints can find block 0. + { + let reader = storage.reader().unwrap(); + let genesis_header = reader.get_header(0).unwrap().unwrap(); + let genesis_block = BlockData::new(genesis_header, vec![], vec![], vec![], None); + storage.cold().append_block(genesis_block).await.unwrap(); + } let alias_oracle: Arc>> = Arc::new(Mutex::new(HashSet::default())); let (node, mut node_status) = SignetNodeBuilder::new(cfg.clone()) .with_ctx(ctx) - .with_factory(factory.clone()) + .with_storage(Arc::clone(&storage)) .with_alias_oracle(Arc::clone(&alias_oracle)) .build() .unwrap(); @@ -115,30 +168,12 @@ impl SignetTestContext { let node = tokio::spawn(node.start()); node_status.changed().await.unwrap(); - // set up some keys and addresses - let keys = &signet_test_utils::users::TEST_SIGNERS; - let addresses = *signet_test_utils::users::TEST_USERS; - - // register the signers on the alloy proider + // register the signers on the alloy provider let mut wallet = EthereumWallet::new(keys[0].clone()); for key in keys.iter().skip(1) { wallet.register_signer(key.clone()); } - let mint_amnt = U256::from(1_000) * U256::from(ETH_TO_WEI); - factory - .provider_rw() - .unwrap() - .update(|rw| { - for address in addresses.into_iter() { - let mut account = rw.basic_account(&address)?.unwrap_or_default(); - account.balance = account.balance.saturating_add(mint_amnt); - rw.tx_ref().put::(address, account)?; - } - Ok(()) - }) - .unwrap(); - // after RPC booted, we can create the alloy provider let alloy_provider = ProviderBuilder::new_with_network() .disable_recommended_fillers() @@ -155,13 +190,13 @@ impl SignetTestContext { handle, components, node_status, - factory, + storage, alloy_provider, constants, height: AtomicU64::new(cfg.constants().unwrap().host_deploy_height()), - alias_oracle, addresses, + cancel_token, }; (this, node) @@ -193,6 +228,56 @@ impl SignetTestContext { self.constants.clone() } + /// Get a cold storage read handle. + pub fn cold(&self) -> ColdStorageReadHandle { + self.storage.cold_reader() + } + + /// Get a header by block number from hot storage. + pub fn header_by_number(&self, number: u64) -> Option { + let reader = self.storage.reader().unwrap(); + reader.get_header(number).unwrap() + } + + /// Get the last block number from hot storage. + pub fn last_block_number(&self) -> u64 { + let reader = self.storage.reader().unwrap(); + reader.last_block_number().unwrap().unwrap_or(0) + } + + /// Get all transactions in a block from cold storage. + pub async fn transactions_in_block( + &self, + block: u64, + ) -> Vec { + self.cold().get_transactions_in_block(block).await.unwrap() + } + + /// Get signet events in a single block from cold storage. + pub async fn signet_events_in_block(&self, block: u64) -> Vec { + self.cold().get_signet_events_in_block(block).await.unwrap() + } + + /// Look up a transaction by hash from cold storage. + pub async fn transaction_by_hash(&self, hash: B256) -> Option { + self.cold().get_tx_by_hash(hash).await.unwrap().map(|c| c.into_inner()) + } + + /// Get the account history (block number list) for an address. + pub fn account_history(&self, address: Address) -> Option { + let reader = self.storage.reader().unwrap(); + reader.last_account_history(address).unwrap().map(|(_, list)| list) + } + + /// Get an account's state at a specific block height. + /// + /// Returns `None` if the account did not exist at the given height + /// (i.e. balance, nonce, and bytecode are all zero/empty). + pub fn account_at_height(&self, address: Address, height: u64) -> Option { + let reader = self.storage.reader().unwrap(); + reader.get_account_at_height(&address, Some(height)).unwrap().filter(|a| !a.is_empty()) + } + /// Send a notification to the Signet Node instance pub async fn send_notification(&self, notification: NotificationWithSidecars) { let pool = self.components.pool(); @@ -236,6 +321,21 @@ impl SignetTestContext { self.send_notification(notification).await; recv.changed().await?; + // Wait for cold storage to finish processing dispatched blocks. + // `append_blocks()` dispatches to cold asynchronously, so we + // poll until cold storage has the expected block. + let cold = self.storage.cold_reader(); + tokio::time::timeout(std::time::Duration::from_secs(30), async { + loop { + match cold.get_latest_block().await { + Ok(Some(latest)) if latest >= expected_height => break, + _ => tokio::task::yield_now().await, + } + } + }) + .await + .expect("cold storage did not reach expected height within 30s"); + // cheeky little check that the RPC is correct :) assert_eq!(self.alloy_provider.get_block_number().await.unwrap(), expected_height); @@ -286,7 +386,8 @@ impl SignetTestContext { /// Get the account for an address. pub fn account(&self, address: Address) -> Option { - self.factory.provider().unwrap().basic_account(&address).unwrap() + let reader = self.storage.reader().unwrap(); + reader.get_account(&address).unwrap() } /// Get the nonce off an addresss. @@ -381,7 +482,7 @@ impl SignetTestContext { /// assertions if run on the genesis block. If any other blocks have been /// processed it will do nothing. pub fn verify_allocs(&self, genesis: &Genesis) { - if self.factory.provider().unwrap().last_block_number().unwrap() != 0 { + if self.last_block_number() != 0 { return; } @@ -403,10 +504,12 @@ impl SignetTestContext { } if let Some(ref storage) = alloc.storage { + let reader = self.storage.reader().unwrap(); for (key, value) in storage { + let slot = U256::from_be_bytes(key.0); assert_eq!( - self.factory.latest().unwrap().storage(address, *key).unwrap(), - Some((*value).into()) + reader.get_storage(&address, &slot).unwrap(), + Some(U256::from_be_bytes(value.0)) ); } } diff --git a/crates/node-tests/src/lib.rs b/crates/node-tests/src/lib.rs index 80ecf4d..10a13f8 100644 --- a/crates/node-tests/src/lib.rs +++ b/crates/node-tests/src/lib.rs @@ -32,7 +32,7 @@ pub mod types; pub mod utils; pub use utils::run_test; -pub use reth_exex_test_utils::{Adapter, TestExExContext, TmpDB as TmpDb}; +pub use reth_exex_test_utils::{Adapter, TestExExContext}; pub use signet_test_utils::specs::{ HostBlockSpec, NotificationSpec, NotificationWithSidecars, RuBlockSpec, }; diff --git a/crates/node-tests/src/utils.rs b/crates/node-tests/src/utils.rs index 6d40508..f4e68e5 100644 --- a/crates/node-tests/src/utils.rs +++ b/crates/node-tests/src/utils.rs @@ -5,19 +5,7 @@ use alloy::{ signers::{SignerSync, local::PrivateKeySigner}, uint, }; -use reth::{ - chainspec::ChainSpec, - primitives::{Block, BlockBody, Header, RecoveredBlock, Transaction, TransactionSigned}, - providers::{ - ProviderFactory, - providers::{RocksDBProvider, StaticFileProvider}, - }, -}; -use reth_db::test_utils::{ - create_test_rocksdb_dir, create_test_rw_db, create_test_static_files_dir, -}; -use reth_exex_test_utils::TmpDB; -use signet_node_types::SignetNodeTypes; +use reth::primitives::{Block, BlockBody, Header, RecoveredBlock, Transaction, TransactionSigned}; use signet_zenith::Zenith; use std::{panic, sync::Once}; use tracing_subscriber::EnvFilter; @@ -127,18 +115,3 @@ pub fn adjust_usd_decimals_u256(amount: U256, decimals: u8) -> U256 { pub fn adjust_usd_decimals(amount: usize, decimals: u8) -> U256 { adjust_usd_decimals_u256(U256::from(amount), decimals) } - -/// Create a provider factory with a chain spec -pub fn create_test_provider_factory_with_chain_spec( - chain_spec: std::sync::Arc, -) -> ProviderFactory> { - let (static_dir, _) = create_test_static_files_dir(); - let (rocks, _) = create_test_rocksdb_dir(); - - let db = create_test_rw_db(); - let sfp = StaticFileProvider::read_write(static_dir.keep()).expect("static file provider"); - - let rocks = RocksDBProvider::builder(rocks.keep()).build().expect("rocksdb provider"); - - ProviderFactory::new(db, chain_spec, sfp, rocks).unwrap() -} diff --git a/crates/node-tests/tests/db.rs b/crates/node-tests/tests/db.rs index 84a527b..ce4cd92 100644 --- a/crates/node-tests/tests/db.rs +++ b/crates/node-tests/tests/db.rs @@ -1,9 +1,12 @@ -use alloy::primitives::hex; -use reth::providers::BlockReader; use serial_test::serial; +use signet_cold::mem::MemColdBackend; +use signet_hot::{ + db::{HotDbRead, UnsafeDbWrite}, + mem::MemKv, +}; use signet_node::SignetNodeBuilder; use signet_node_config::test_utils::test_config; -use signet_node_tests::utils::create_test_provider_factory_with_chain_spec; +use signet_storage::{CancellationToken, HistoryRead, HistoryWrite, HotKv, UnifiedStorage}; use std::sync::Arc; #[serial] @@ -16,15 +19,30 @@ async fn test_genesis() { let chain_spec: Arc<_> = cfg.chain_spec().clone(); assert_eq!(chain_spec.genesis().config.chain_id, consts.unwrap().ru_chain_id()); - let factory = create_test_provider_factory_with_chain_spec(chain_spec.clone()); + let cancel_token = CancellationToken::new(); + let hot = MemKv::new(); + { + let hardforks = signet_genesis::genesis_hardforks(cfg.genesis()); + let writer = hot.writer().unwrap(); + writer.load_genesis(cfg.genesis(), &hardforks).unwrap(); + writer.commit().unwrap(); + } + + let storage = Arc::new(UnifiedStorage::spawn(hot, MemColdBackend::new(), cancel_token.clone())); + let (_, _) = SignetNodeBuilder::new(cfg.clone()) .with_ctx(ctx) - .with_factory(factory.clone()) + .with_storage(Arc::clone(&storage)) .build() .unwrap(); - let genesis_block = factory.provider().unwrap().block_by_number(0).unwrap().unwrap(); + let reader = storage.reader().unwrap(); + assert!(reader.has_block(0).unwrap()); + + let header = reader.get_header(0).unwrap().expect("missing genesis header"); + let zero_hash = alloy::primitives::B256::ZERO; + assert_eq!(header.parent_hash, zero_hash); + assert_eq!(header.base_fee_per_gas, Some(0x3b9aca00)); - let want_hash = hex!("0x0000000000000000000000000000000000000000000000000000000000000000"); - assert_eq!(genesis_block.parent_hash, want_hash); + cancel_token.cancel(); } diff --git a/crates/node-tests/tests/host_events.rs b/crates/node-tests/tests/host_events.rs index c44b4d9..e58e3f2 100644 --- a/crates/node-tests/tests/host_events.rs +++ b/crates/node-tests/tests/host_events.rs @@ -9,9 +9,7 @@ use alloy::{ providers::Provider, sol_types::SolCall, }; -use reth::providers::{BlockNumReader, DBProvider, HeaderProvider}; use serial_test::serial; -use signet_db::{DbSignetEvent, SignetEvents}; use signet_node_tests::{ HostBlockSpec, SignetTestContext, constants::{DEFAULT_REWARD_ADDRESS, TEST_CONSTANTS}, @@ -19,6 +17,7 @@ use signet_node_tests::{ types::{Counter, TestCounterInstance}, utils::{adjust_usd_decimals, adjust_usd_decimals_u256}, }; +use signet_storage_types::DbSignetEvent; use signet_test_utils::{chain::USDC_RECORD, contracts::counter::COUNTER_BYTECODE}; use signet_types::{ constants::{HostPermitted, RollupPermitted}, @@ -337,8 +336,8 @@ async fn test_transact_underfunded_gas() { assert_eq!(contract.count().call().await.unwrap(), U256::ZERO); // check signet events for the recorded transact and that the gas equals tiny_gas - let last = ctx.factory.provider().unwrap().last_block_number().unwrap(); - let events = ctx.factory.provider().unwrap().get::(last..last + 1).unwrap(); + let last = ctx.last_block_number(); + let events = ctx.signet_events_in_block(last).await; // Check that the block has no transactions, i.e. that the transact was // discarded @@ -350,7 +349,7 @@ async fn test_transact_underfunded_gas() { .unwrap(); assert!(last_block.transactions.is_empty()); - let found = events.iter().find(|(_, ev)| match ev { + let found = events.iter().find(|ev| match ev { DbSignetEvent::Transact(_, Transactor::Transact { sender, to, gas, .. }) => { *sender == user && *to == contract_addr && *gas == U256::from(tiny_gas) } @@ -402,14 +401,8 @@ async fn test_signet_events() { ctx.process_block(block.clone()).await.unwrap(); // Check the base fee - let base_fee = ctx - .factory - .header_by_number(1) - .unwrap() - .unwrap() - .base_fee_per_gas - .map(U256::from) - .unwrap(); + let header_1 = ctx.header_by_number(1).unwrap(); + let base_fee = U256::from(header_1.base_fee_per_gas.unwrap()); // NB: // user_a should have received 1 USD, @@ -427,51 +420,21 @@ async fn test_signet_events() { ctx.process_block(block).await.unwrap(); // Check the base fee - let base_fee = ctx - .factory - .header_by_number(2) - .unwrap() - .unwrap() - .base_fee_per_gas - .map(U256::from) - .unwrap(); + let header_2 = ctx.header_by_number(2).unwrap(); + let base_fee = U256::from(header_2.base_fee_per_gas.unwrap()); // This time works exactly the same as above. user_a_bal.assert_decrease_exact(base_fee * U256::from(100_000)); user_b_bal.assert_increase_exact(U256::from(expected_usd_minted)); - let events = ctx.factory.provider().unwrap().get::(1..3).unwrap(); - assert_eq!(events.len(), 8); - - // The tuple is (block_number, event). - // We expect 4 events per block - assert_eq!(events[0].0, 1); - assert_eq!(events[1].0, 1); - assert_eq!(events[2].0, 1); - assert_eq!(events[3].0, 1); - assert_eq!(events[4].0, 2); - assert_eq!(events[5].0, 2); - assert_eq!(events[6].0, 2); - assert_eq!(events[7].0, 2); + let events_1 = ctx.signet_events_in_block(1).await; + let events_2 = ctx.signet_events_in_block(2).await; + assert_eq!(events_1.len(), 4); + assert_eq!(events_2.len(), 4); + // Events are in log_index order assert_eq!( - events[0].1, - DbSignetEvent::Transact( - 3, - Transactor::Transact { - rollupChainId, - sender: user_a, - to: user_b, - data: vec![0xab, 0xcd].into(), - value: U256::from(expected_usd_minted), - gas: U256::from(100_000), - maxFeePerGas: U256::from(GWEI_TO_WEI), - } - ) - ); - - assert_eq!( - events[1].1, + events_1[0], DbSignetEvent::Enter( 0, Passage::Enter { @@ -483,7 +446,7 @@ async fn test_signet_events() { ); assert_eq!( - events[2].1, + events_1[1], DbSignetEvent::EnterToken( 1, Passage::EnterToken { @@ -496,7 +459,7 @@ async fn test_signet_events() { ); assert_eq!( - events[3].1, + events_1[2], DbSignetEvent::EnterToken( 2, Passage::EnterToken { @@ -509,7 +472,7 @@ async fn test_signet_events() { ); assert_eq!( - events[4].1, + events_1[3], DbSignetEvent::Transact( 3, Transactor::Transact { @@ -524,8 +487,9 @@ async fn test_signet_events() { ) ); + // Events are in log_index order assert_eq!( - events[5].1, + events_2[0], DbSignetEvent::Enter( 0, Passage::Enter { @@ -537,7 +501,7 @@ async fn test_signet_events() { ); assert_eq!( - events[6].1, + events_2[1], DbSignetEvent::EnterToken( 1, Passage::EnterToken { @@ -550,7 +514,7 @@ async fn test_signet_events() { ); assert_eq!( - events[7].1, + events_2[2], DbSignetEvent::EnterToken( 2, Passage::EnterToken { @@ -561,6 +525,22 @@ async fn test_signet_events() { } ) ); + + assert_eq!( + events_2[3], + DbSignetEvent::Transact( + 3, + Transactor::Transact { + rollupChainId, + sender: user_a, + to: user_b, + data: vec![0xab, 0xcd].into(), + value: U256::from(expected_usd_minted), + gas: U256::from(100_000), + maxFeePerGas: U256::from(GWEI_TO_WEI), + } + ) + ); }) .await; } diff --git a/crates/node-tests/tests/multiple-blocks.rs b/crates/node-tests/tests/multiple-blocks.rs index d012220..f711a83 100644 --- a/crates/node-tests/tests/multiple-blocks.rs +++ b/crates/node-tests/tests/multiple-blocks.rs @@ -2,10 +2,6 @@ use alloy::{ consensus::constants::ETH_TO_WEI, primitives::{Address, U256}, }; -use reth::providers::AccountExtReader; -use reth_db::{ - AccountChangeSets, AccountsHistory, cursor::DbCursorRO, models::ShardedKey, transaction::DbTx, -}; use serial_test::serial; use signet_constants::test_utils::HOST_USDC; use signet_node_tests::{HostBlockSpec, SignetTestContext, run_test, utils::adjust_usd_decimals}; @@ -80,7 +76,7 @@ async fn test_three_enters() { // Processes 5 blocks, setting up accounts with initial balances and histories. // // After this, A will have 500, B will have 1000, C will have 1500. They will -// also have accounthistories entries for blocks 1 to 5. +// also have account histories entries for blocks 1 to 5. async fn setup_accounts_history(ctx: SignetTestContext) -> SignetTestContext { let block = HostBlockSpec::new(ctx.constants()) .enter_token(USER_A, ONE_HOST_USDC, HOST_USDC) @@ -104,48 +100,30 @@ async fn test_write_account_histories() { .enter_token(USER_F, 30 * ONE_HOST_USDC, HOST_USDC); ctx.process_blocks(vec![another_block]).await.unwrap(); - let provider = ctx.factory.provider().unwrap(); - - let a_key = ShardedKey::new(USER_A, u64::MAX); - let b_key = ShardedKey::new(USER_B, u64::MAX); - let c_key = ShardedKey::new(USER_C, u64::MAX); - let d_key = ShardedKey::new(USER_D, u64::MAX); - let e_key = ShardedKey::new(USER_E, u64::MAX); - let f_key = ShardedKey::new(USER_F, u64::MAX); - - let v = provider - .tx_ref() - .cursor_read::() - .unwrap() - .walk_range(a_key.clone()..=f_key.clone()) - .unwrap() - .collect::, _>>() - .unwrap(); - - assert_eq!(v.len(), 6); - assert_eq!(v[0].0, a_key); - assert_eq!(v[1].0, b_key); - assert_eq!(v[2].0, c_key); - for i in 1..5 { - assert!(v[0].1.contains(i)); - assert!(v[1].1.contains(i)); - assert!(v[2].1.contains(i)); + let a_hist = ctx.account_history(USER_A).unwrap(); + let b_hist = ctx.account_history(USER_B).unwrap(); + let c_hist = ctx.account_history(USER_C).unwrap(); + let d_hist = ctx.account_history(USER_D).unwrap(); + let e_hist = ctx.account_history(USER_E).unwrap(); + let f_hist = ctx.account_history(USER_F).unwrap(); + + for i in 1..=5 { + assert!(a_hist.contains(i)); + assert!(b_hist.contains(i)); + assert!(c_hist.contains(i)); } - assert!(!v[0].1.contains(6)); - assert!(!v[1].1.contains(6)); - assert!(!v[2].1.contains(6)); - - assert_eq!(v[3].0, d_key); - assert_eq!(v[4].0, e_key); - assert_eq!(v[5].0, f_key); - for i in 1..5 { - assert!(!v[3].1.contains(i)); - assert!(!v[4].1.contains(i)); - assert!(!v[5].1.contains(i)); + assert!(!a_hist.contains(6)); + assert!(!b_hist.contains(6)); + assert!(!c_hist.contains(6)); + + for i in 1..=5 { + assert!(!d_hist.contains(i)); + assert!(!e_hist.contains(i)); + assert!(!f_hist.contains(i)); } - assert!(v[3].1.contains(6)); - assert!(v[4].1.contains(6)); - assert!(v[5].1.contains(6)); + assert!(d_hist.contains(6)); + assert!(e_hist.contains(6)); + assert!(f_hist.contains(6)); }) .await; } @@ -162,86 +140,59 @@ async fn test_write_account_histories_with_empty_block() { .enter_token(USER_F, 30 * ONE_HOST_USDC, HOST_USDC); ctx.process_blocks(vec![another_block]).await.unwrap(); - let provider = ctx.factory.provider().unwrap(); - - let a_key = ShardedKey::new(USER_A, u64::MAX); - let b_key = ShardedKey::new(USER_B, u64::MAX); - let c_key = ShardedKey::new(USER_C, u64::MAX); - let d_key = ShardedKey::new(USER_D, u64::MAX); - let e_key = ShardedKey::new(USER_E, u64::MAX); - let f_key = ShardedKey::new(USER_F, u64::MAX); - - let v = provider - .tx_ref() - .cursor_read::() - .unwrap() - .walk_range(a_key.clone()..=f_key.clone()) - .unwrap() - .collect::, _>>() - .unwrap(); - - assert_eq!(v.len(), 6); - assert_eq!(v[0].0, a_key); - assert_eq!(v[1].0, b_key); - assert_eq!(v[2].0, c_key); - for i in 1..5 { - assert!(v[0].1.contains(i)); - assert!(v[1].1.contains(i)); - assert!(v[2].1.contains(i)); + let a_hist = ctx.account_history(USER_A).unwrap(); + let b_hist = ctx.account_history(USER_B).unwrap(); + let c_hist = ctx.account_history(USER_C).unwrap(); + let d_hist = ctx.account_history(USER_D).unwrap(); + let e_hist = ctx.account_history(USER_E).unwrap(); + let f_hist = ctx.account_history(USER_F).unwrap(); + + for i in 1..=5 { + assert!(a_hist.contains(i)); + assert!(b_hist.contains(i)); + assert!(c_hist.contains(i)); } - assert!(!v[0].1.contains(6)); - assert!(!v[1].1.contains(6)); - assert!(!v[2].1.contains(6)); - - assert_eq!(v[3].0, d_key); - assert_eq!(v[4].0, e_key); - assert_eq!(v[5].0, f_key); - for i in 1..5 { - assert!(!v[3].1.contains(i)); - assert!(!v[4].1.contains(i)); - assert!(!v[5].1.contains(i)); + assert!(!a_hist.contains(6)); + assert!(!b_hist.contains(6)); + assert!(!c_hist.contains(6)); + + for i in 1..=5 { + assert!(!d_hist.contains(i)); + assert!(!e_hist.contains(i)); + assert!(!f_hist.contains(i)); } - assert!(v[3].1.contains(6)); - assert!(v[4].1.contains(6)); - assert!(v[5].1.contains(6)); + assert!(d_hist.contains(6)); + assert!(e_hist.contains(6)); + assert!(f_hist.contains(6)); let empty_block = HostBlockSpec::new(ctx.constants()); ctx.process_block(empty_block).await.unwrap(); // As we did not process a new RU block, the history should not change. - let v = provider - .tx_ref() - .cursor_read::() - .unwrap() - .walk_range(a_key.clone()..=f_key.clone()) - .unwrap() - .collect::, _>>() - .unwrap(); - - assert_eq!(v.len(), 6); - assert_eq!(v[0].0, a_key); - assert_eq!(v[1].0, b_key); - assert_eq!(v[2].0, c_key); - for i in 1..5 { - assert!(v[0].1.contains(i)); - assert!(v[1].1.contains(i)); - assert!(v[2].1.contains(i)); + let a_hist = ctx.account_history(USER_A).unwrap(); + let b_hist = ctx.account_history(USER_B).unwrap(); + let c_hist = ctx.account_history(USER_C).unwrap(); + let d_hist = ctx.account_history(USER_D).unwrap(); + let e_hist = ctx.account_history(USER_E).unwrap(); + let f_hist = ctx.account_history(USER_F).unwrap(); + + for i in 1..=5 { + assert!(a_hist.contains(i)); + assert!(b_hist.contains(i)); + assert!(c_hist.contains(i)); } - assert!(!v[0].1.contains(6)); - assert!(!v[1].1.contains(6)); - assert!(!v[2].1.contains(6)); - - assert_eq!(v[3].0, d_key); - assert_eq!(v[4].0, e_key); - assert_eq!(v[5].0, f_key); - for i in 1..5 { - assert!(!v[3].1.contains(i)); - assert!(!v[4].1.contains(i)); - assert!(!v[5].1.contains(i)); + assert!(!a_hist.contains(6)); + assert!(!b_hist.contains(6)); + assert!(!c_hist.contains(6)); + + for i in 1..=5 { + assert!(!d_hist.contains(i)); + assert!(!e_hist.contains(i)); + assert!(!f_hist.contains(i)); } - assert!(v[3].1.contains(6)); - assert!(v[4].1.contains(6)); - assert!(v[5].1.contains(6)); + assert!(d_hist.contains(6)); + assert!(e_hist.contains(6)); + assert!(f_hist.contains(6)); }) .await; } @@ -252,118 +203,81 @@ async fn test_write_account_histories_with_reorg_and_empty_blocks() { run_test(|ctx| async move { let ctx = setup_accounts_history(ctx).await; - let provider = ctx.factory.provider().unwrap(); - - let a_key = ShardedKey::new(USER_A, u64::MAX); - let b_key = ShardedKey::new(USER_B, u64::MAX); - let c_key = ShardedKey::new(USER_C, u64::MAX); - let another_block = HostBlockSpec::new(ctx.constants()) .enter_token(USER_A, 10 * ONE_HOST_USDC, HOST_USDC) .enter_token(USER_B, 2 * ONE_HOST_USDC, HOST_USDC) .enter_token(USER_C, 3 * ONE_HOST_USDC, HOST_USDC); ctx.process_blocks(vec![another_block.clone()]).await.unwrap(); - let v = provider - .tx_ref() - .cursor_read::() - .unwrap() - .walk_range(a_key.clone()..=c_key.clone()) - .unwrap() - .collect::, _>>() - .unwrap(); - - assert_eq!(v.len(), 3); - assert_eq!(v[0].0, a_key); - assert_eq!(v[1].0, b_key); - assert_eq!(v[2].0, c_key); - - for i in 1..6 { - assert!(v[0].1.contains(i)); - assert!(v[1].1.contains(i)); - assert!(v[2].1.contains(i)); + let a_hist = ctx.account_history(USER_A).unwrap(); + let b_hist = ctx.account_history(USER_B).unwrap(); + let c_hist = ctx.account_history(USER_C).unwrap(); + + for i in 1..=6 { + assert!(a_hist.contains(i)); + assert!(b_hist.contains(i)); + assert!(c_hist.contains(i)); } // After reorg, the history should not contain the latest entries ctx.revert_block(another_block).await.unwrap(); - let v = provider - .tx_ref() - .cursor_read::() - .unwrap() - .walk_range(a_key.clone()..=c_key.clone()) - .unwrap() - .collect::, _>>() - .unwrap(); - - assert_eq!(v.len(), 3); - assert_eq!(v[0].0, a_key); - assert_eq!(v[1].0, b_key); - assert_eq!(v[2].0, c_key); - - for i in 1..5 { - assert!(v[0].1.contains(i)); - assert!(v[1].1.contains(i)); - assert!(v[2].1.contains(i)); + let a_hist = ctx.account_history(USER_A).unwrap(); + let b_hist = ctx.account_history(USER_B).unwrap(); + let c_hist = ctx.account_history(USER_C).unwrap(); + + for i in 1..=5 { + assert!(a_hist.contains(i)); + assert!(b_hist.contains(i)); + assert!(c_hist.contains(i)); } - assert!(!v[0].1.contains(6)); - assert!(!v[1].1.contains(6)); - assert!(!v[2].1.contains(6)); + assert!(!a_hist.contains(6)); + assert!(!b_hist.contains(6)); + assert!(!c_hist.contains(6)); // Now process an empty block. let empty_block = HostBlockSpec::new(ctx.constants()); ctx.process_block(empty_block).await.unwrap(); // As we did not process a new RU block, the history should not change. - let v = provider - .tx_ref() - .cursor_read::() - .unwrap() - .walk_range(a_key.clone()..=c_key.clone()) - .unwrap() - .collect::, _>>() - .unwrap(); - - assert_eq!(v.len(), 3); - assert_eq!(v[0].0, a_key); - assert_eq!(v[1].0, b_key); - assert_eq!(v[2].0, c_key); - - for i in 1..5 { - assert!(v[0].1.contains(i)); - assert!(v[1].1.contains(i)); - assert!(v[2].1.contains(i)); + let a_hist = ctx.account_history(USER_A).unwrap(); + let b_hist = ctx.account_history(USER_B).unwrap(); + let c_hist = ctx.account_history(USER_C).unwrap(); + + for i in 1..=5 { + assert!(a_hist.contains(i)); + assert!(b_hist.contains(i)); + assert!(c_hist.contains(i)); } - assert!(!v[0].1.contains(6)); - assert!(!v[1].1.contains(6)); - assert!(!v[2].1.contains(6)); + assert!(!a_hist.contains(6)); + assert!(!b_hist.contains(6)); + assert!(!c_hist.contains(6)); - // re-process the reorged block. + // re-process the reorged block. The empty block above consumed + // RU height 6, so this block lands at RU height 7. let another_block = HostBlockSpec::new(ctx.constants()) .enter_token(USER_A, 10 * ONE_HOST_USDC, HOST_USDC) .enter_token(USER_B, 2 * ONE_HOST_USDC, HOST_USDC) .enter_token(USER_C, 3 * ONE_HOST_USDC, HOST_USDC); ctx.process_blocks(vec![another_block.clone()]).await.unwrap(); - let v = provider - .tx_ref() - .cursor_read::() - .unwrap() - .walk_range(a_key.clone()..=c_key.clone()) - .unwrap() - .collect::, _>>() - .unwrap(); - - assert_eq!(v.len(), 3); - assert_eq!(v[0].0, a_key); - assert_eq!(v[1].0, b_key); - assert_eq!(v[2].0, c_key); - - for i in 1..6 { - assert!(v[0].1.contains(i)); - assert!(v[1].1.contains(i)); - assert!(v[2].1.contains(i)); + let a_hist = ctx.account_history(USER_A).unwrap(); + let b_hist = ctx.account_history(USER_B).unwrap(); + let c_hist = ctx.account_history(USER_C).unwrap(); + + for i in 1..=5 { + assert!(a_hist.contains(i)); + assert!(b_hist.contains(i)); + assert!(c_hist.contains(i)); } + // Block 6 was the empty block (no state changes, no history entry). + // Block 7 is the re-processed block with enter_tokens. + assert!(!a_hist.contains(6)); + assert!(!b_hist.contains(6)); + assert!(!c_hist.contains(6)); + assert!(a_hist.contains(7)); + assert!(b_hist.contains(7)); + assert!(c_hist.contains(7)); }) .await; } @@ -374,70 +288,44 @@ async fn test_write_account_histories_with_reorg() { run_test(|ctx| async move { let ctx = setup_accounts_history(ctx).await; - let provider = ctx.factory.provider().unwrap(); - - let a_key = ShardedKey::new(USER_A, u64::MAX); - let b_key = ShardedKey::new(USER_B, u64::MAX); - let c_key = ShardedKey::new(USER_C, u64::MAX); - let another_block = HostBlockSpec::new(ctx.constants()) .enter_token(USER_A, 10 * ONE_HOST_USDC, HOST_USDC) .enter_token(USER_B, 2 * ONE_HOST_USDC, HOST_USDC) .enter_token(USER_C, 3 * ONE_HOST_USDC, HOST_USDC); ctx.process_blocks(vec![another_block.clone()]).await.unwrap(); - let v = provider - .tx_ref() - .cursor_read::() - .unwrap() - .walk_range(a_key.clone()..=c_key.clone()) - .unwrap() - .collect::, _>>() - .unwrap(); - - assert_eq!(v.len(), 3); - assert_eq!(v[0].0, a_key); - assert_eq!(v[1].0, b_key); - assert_eq!(v[2].0, c_key); - - for i in 1..6 { - assert!(v[0].1.contains(i)); - assert!(v[1].1.contains(i)); - assert!(v[2].1.contains(i)); + let a_hist = ctx.account_history(USER_A).unwrap(); + let b_hist = ctx.account_history(USER_B).unwrap(); + let c_hist = ctx.account_history(USER_C).unwrap(); + + for i in 1..=6 { + assert!(a_hist.contains(i)); + assert!(b_hist.contains(i)); + assert!(c_hist.contains(i)); } // After reorg, the history should not contain the latest entries ctx.revert_block(another_block).await.unwrap(); - let v = provider - .tx_ref() - .cursor_read::() - .unwrap() - .walk_range(a_key.clone()..=c_key.clone()) - .unwrap() - .collect::, _>>() - .unwrap(); - - assert_eq!(v.len(), 3); - assert_eq!(v[0].0, a_key); - assert_eq!(v[1].0, b_key); - assert_eq!(v[2].0, c_key); - - for i in 1..5 { - assert!(v[0].1.contains(i)); - assert!(v[1].1.contains(i)); - assert!(v[2].1.contains(i)); + let a_hist = ctx.account_history(USER_A).unwrap(); + let b_hist = ctx.account_history(USER_B).unwrap(); + let c_hist = ctx.account_history(USER_C).unwrap(); + + for i in 1..=5 { + assert!(a_hist.contains(i)); + assert!(b_hist.contains(i)); + assert!(c_hist.contains(i)); } - assert!(!v[0].1.contains(6)); - assert!(!v[1].1.contains(6)); - assert!(!v[2].1.contains(6)); + assert!(!a_hist.contains(6)); + assert!(!b_hist.contains(6)); + assert!(!c_hist.contains(6)); }) .await; } #[serial] #[tokio::test] -async fn test_historical_state_provider(ctx: SignetTestContext) { +async fn test_historical_state_provider() { run_test(|ctx| async move { let ctx = setup_accounts_history(ctx).await; @@ -450,44 +338,28 @@ async fn test_historical_state_provider(ctx: SignetTestContext) { .enter_token(USER_F, 60 * ONE_HOST_USDC, HOST_USDC); ctx.process_blocks(vec![another_block]).await.unwrap(); - let provider = ctx.factory.provider().unwrap(); - - // NB: It is bizarre that reth has completely different APIs for - // historical state and current state. basic_accounts is only on - // current while account_balance is only on historical. - let accounts = - provider.basic_accounts([USER_A, USER_B, USER_C, USER_D, USER_E, USER_F]).unwrap(); - - assert_eq!(accounts[0].1.as_ref().unwrap().balance, U256::from(15 * ONE_RU_USDC)); - assert_eq!(accounts[1].1.as_ref().unwrap().balance, U256::from(30 * ONE_RU_USDC)); - assert_eq!(accounts[2].1.as_ref().unwrap().balance, U256::from(45 * ONE_RU_USDC)); - assert_eq!(accounts[3].1.as_ref().unwrap().balance, U256::from(40 * ONE_RU_USDC)); - assert_eq!(accounts[4].1.as_ref().unwrap().balance, U256::from(50 * ONE_RU_USDC)); - assert_eq!(accounts[5].1.as_ref().unwrap().balance, U256::from(60 * ONE_RU_USDC)); - - let historical = ctx.factory.history_by_block_number(5).unwrap(); - assert_eq!( - historical.account_balance(&USER_A).unwrap().unwrap(), - U256::from(5 * ONE_RU_USDC) - ); - assert_eq!( - historical.account_balance(&USER_B).unwrap().unwrap(), - U256::from(10 * ONE_RU_USDC) - ); - assert_eq!( - historical.account_balance(&USER_C).unwrap().unwrap(), - U256::from(15 * ONE_RU_USDC) - ); - assert!(historical.account_balance(&USER_D).unwrap().is_none()); - assert!(historical.account_balance(&USER_E).unwrap().is_none()); - assert!(historical.account_balance(&USER_F).unwrap().is_none()); + // Current state + assert_eq!(ctx.account(USER_A).unwrap().balance, U256::from(15 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_B).unwrap().balance, U256::from(30 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_C).unwrap().balance, U256::from(45 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_D).unwrap().balance, U256::from(40 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_E).unwrap().balance, U256::from(50 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_F).unwrap().balance, U256::from(60 * ONE_RU_USDC)); + + // Historical state at block 5 + assert_eq!(ctx.account_at_height(USER_A, 5).unwrap().balance, U256::from(5 * ONE_RU_USDC)); + assert_eq!(ctx.account_at_height(USER_B, 5).unwrap().balance, U256::from(10 * ONE_RU_USDC)); + assert_eq!(ctx.account_at_height(USER_C, 5).unwrap().balance, U256::from(15 * ONE_RU_USDC)); + assert!(ctx.account_at_height(USER_D, 5).is_none()); + assert!(ctx.account_at_height(USER_E, 5).is_none()); + assert!(ctx.account_at_height(USER_F, 5).is_none()); }) .await; } #[serial] #[tokio::test] -async fn test_historical_state_provider_with_empty_blocks(ctx: SignetTestContext) { +async fn test_historical_state_provider_with_empty_blocks() { run_test(|ctx| async move { let ctx = setup_accounts_history(ctx).await; @@ -500,68 +372,40 @@ async fn test_historical_state_provider_with_empty_blocks(ctx: SignetTestContext .enter_token(USER_F, 60 * ONE_HOST_USDC, HOST_USDC); ctx.process_blocks(vec![another_block]).await.unwrap(); - let provider = ctx.factory.provider().unwrap(); - - // NB: It is bizarre that reth has completely different APIs for - // historical state and current state. basic_accounts is only on - // current while account_balance is only on historical. - let accounts = - provider.basic_accounts([USER_A, USER_B, USER_C, USER_D, USER_E, USER_F]).unwrap(); - - assert_eq!(accounts[0].1.as_ref().unwrap().balance, U256::from(15 * ONE_RU_USDC)); - assert_eq!(accounts[1].1.as_ref().unwrap().balance, U256::from(30 * ONE_RU_USDC)); - assert_eq!(accounts[2].1.as_ref().unwrap().balance, U256::from(45 * ONE_RU_USDC)); - assert_eq!(accounts[3].1.as_ref().unwrap().balance, U256::from(40 * ONE_RU_USDC)); - assert_eq!(accounts[4].1.as_ref().unwrap().balance, U256::from(50 * ONE_RU_USDC)); - assert_eq!(accounts[5].1.as_ref().unwrap().balance, U256::from(60 * ONE_RU_USDC)); - - let historical = ctx.factory.history_by_block_number(5).unwrap(); - assert_eq!( - historical.account_balance(&USER_A).unwrap().unwrap(), - U256::from(5 * ONE_RU_USDC) - ); - assert_eq!( - historical.account_balance(&USER_B).unwrap().unwrap(), - U256::from(10 * ONE_RU_USDC) - ); - assert_eq!( - historical.account_balance(&USER_C).unwrap().unwrap(), - U256::from(15 * ONE_RU_USDC) - ); - assert!(historical.account_balance(&USER_D).unwrap().is_none()); - assert!(historical.account_balance(&USER_E).unwrap().is_none()); - assert!(historical.account_balance(&USER_F).unwrap().is_none()); + // Current state + assert_eq!(ctx.account(USER_A).unwrap().balance, U256::from(15 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_B).unwrap().balance, U256::from(30 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_C).unwrap().balance, U256::from(45 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_D).unwrap().balance, U256::from(40 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_E).unwrap().balance, U256::from(50 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_F).unwrap().balance, U256::from(60 * ONE_RU_USDC)); + + // Historical state at block 5 + assert_eq!(ctx.account_at_height(USER_A, 5).unwrap().balance, U256::from(5 * ONE_RU_USDC)); + assert_eq!(ctx.account_at_height(USER_B, 5).unwrap().balance, U256::from(10 * ONE_RU_USDC)); + assert_eq!(ctx.account_at_height(USER_C, 5).unwrap().balance, U256::from(15 * ONE_RU_USDC)); + assert!(ctx.account_at_height(USER_D, 5).is_none()); + assert!(ctx.account_at_height(USER_E, 5).is_none()); + assert!(ctx.account_at_height(USER_F, 5).is_none()); let empty_block = HostBlockSpec::new(ctx.constants()); ctx.process_blocks(vec![empty_block; 2]).await.unwrap(); - // the historical state that we previously checked should not change, even after processing empty blocks. - let accounts = - provider.basic_accounts([USER_A, USER_B, USER_C, USER_D, USER_E, USER_F]).unwrap(); - - assert_eq!(accounts[0].1.as_ref().unwrap().balance, U256::from(15 * ONE_RU_USDC)); - assert_eq!(accounts[1].1.as_ref().unwrap().balance, U256::from(30 * ONE_RU_USDC)); - assert_eq!(accounts[2].1.as_ref().unwrap().balance, U256::from(45 * ONE_RU_USDC)); - assert_eq!(accounts[3].1.as_ref().unwrap().balance, U256::from(40 * ONE_RU_USDC)); - assert_eq!(accounts[4].1.as_ref().unwrap().balance, U256::from(50 * ONE_RU_USDC)); - assert_eq!(accounts[5].1.as_ref().unwrap().balance, U256::from(60 * ONE_RU_USDC)); - - let historical = ctx.factory.history_by_block_number(5).unwrap(); - assert_eq!( - historical.account_balance(&USER_A).unwrap().unwrap(), - U256::from(5 * ONE_RU_USDC) - ); - assert_eq!( - historical.account_balance(&USER_B).unwrap().unwrap(), - U256::from(10 * ONE_RU_USDC) - ); - assert_eq!( - historical.account_balance(&USER_C).unwrap().unwrap(), - U256::from(15 * ONE_RU_USDC) - ); - assert!(historical.account_balance(&USER_D).unwrap().is_none()); - assert!(historical.account_balance(&USER_E).unwrap().is_none()); - assert!(historical.account_balance(&USER_F).unwrap().is_none()); + // The historical state previously checked should not change, even after + // processing empty blocks. + assert_eq!(ctx.account(USER_A).unwrap().balance, U256::from(15 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_B).unwrap().balance, U256::from(30 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_C).unwrap().balance, U256::from(45 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_D).unwrap().balance, U256::from(40 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_E).unwrap().balance, U256::from(50 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_F).unwrap().balance, U256::from(60 * ONE_RU_USDC)); + + assert_eq!(ctx.account_at_height(USER_A, 5).unwrap().balance, U256::from(5 * ONE_RU_USDC)); + assert_eq!(ctx.account_at_height(USER_B, 5).unwrap().balance, U256::from(10 * ONE_RU_USDC)); + assert_eq!(ctx.account_at_height(USER_C, 5).unwrap().balance, U256::from(15 * ONE_RU_USDC)); + assert!(ctx.account_at_height(USER_D, 5).is_none()); + assert!(ctx.account_at_height(USER_E, 5).is_none()); + assert!(ctx.account_at_height(USER_F, 5).is_none()); }) .await; } @@ -581,55 +425,30 @@ async fn test_historical_state_provider_with_reorg() { .enter_token(USER_F, 60 * ONE_HOST_USDC, HOST_USDC); ctx.process_blocks(vec![another_block.clone()]).await.unwrap(); - let provider = ctx.factory.provider().unwrap(); - - // NB: It is bizarre that reth has completely different APIs for - // historical state and current state. basic_accounts is only on - // current while account_balance is only on historical. - let accounts = - provider.basic_accounts([USER_A, USER_B, USER_C, USER_D, USER_E, USER_F]).unwrap(); - - assert_eq!(accounts[0].1.as_ref().unwrap().balance, U256::from(15 * ONE_RU_USDC)); - assert_eq!(accounts[1].1.as_ref().unwrap().balance, U256::from(30 * ONE_RU_USDC)); - assert_eq!(accounts[2].1.as_ref().unwrap().balance, U256::from(45 * ONE_RU_USDC)); - assert_eq!(accounts[3].1.as_ref().unwrap().balance, U256::from(40 * ONE_RU_USDC)); - assert_eq!(accounts[4].1.as_ref().unwrap().balance, U256::from(50 * ONE_RU_USDC)); - assert_eq!(accounts[5].1.as_ref().unwrap().balance, U256::from(60 * ONE_RU_USDC)); - - let historical = ctx.factory.history_by_block_number(5).unwrap(); - assert_eq!( - historical.account_balance(&USER_A).unwrap().unwrap(), - U256::from(5 * ONE_RU_USDC) - ); - assert_eq!( - historical.account_balance(&USER_B).unwrap().unwrap(), - U256::from(10 * ONE_RU_USDC) - ); - assert_eq!( - historical.account_balance(&USER_C).unwrap().unwrap(), - U256::from(15 * ONE_RU_USDC) - ); - assert!(historical.account_balance(&USER_D).unwrap().is_none()); - assert!(historical.account_balance(&USER_E).unwrap().is_none()); - assert!(historical.account_balance(&USER_F).unwrap().is_none()); + // Current state + assert_eq!(ctx.account(USER_A).unwrap().balance, U256::from(15 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_B).unwrap().balance, U256::from(30 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_C).unwrap().balance, U256::from(45 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_D).unwrap().balance, U256::from(40 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_E).unwrap().balance, U256::from(50 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_F).unwrap().balance, U256::from(60 * ONE_RU_USDC)); + + // Historical state at block 5 + assert_eq!(ctx.account_at_height(USER_A, 5).unwrap().balance, U256::from(5 * ONE_RU_USDC)); + assert_eq!(ctx.account_at_height(USER_B, 5).unwrap().balance, U256::from(10 * ONE_RU_USDC)); + assert_eq!(ctx.account_at_height(USER_C, 5).unwrap().balance, U256::from(15 * ONE_RU_USDC)); + assert!(ctx.account_at_height(USER_D, 5).is_none()); + assert!(ctx.account_at_height(USER_E, 5).is_none()); + assert!(ctx.account_at_height(USER_F, 5).is_none()); ctx.revert_block(another_block).await.unwrap(); // Make the same assertions after reverting, the historical state should not change - assert_eq!( - historical.account_balance(&USER_A).unwrap().unwrap(), - U256::from(5 * ONE_RU_USDC) - ); - assert_eq!( - historical.account_balance(&USER_B).unwrap().unwrap(), - U256::from(10 * ONE_RU_USDC) - ); - assert_eq!( - historical.account_balance(&USER_C).unwrap().unwrap(), - U256::from(15 * ONE_RU_USDC) - ); - assert!(historical.account_balance(&USER_D).unwrap().is_none()); - assert!(historical.account_balance(&USER_E).unwrap().is_none()); - assert!(historical.account_balance(&USER_F).unwrap().is_none()); + assert_eq!(ctx.account_at_height(USER_A, 5).unwrap().balance, U256::from(5 * ONE_RU_USDC)); + assert_eq!(ctx.account_at_height(USER_B, 5).unwrap().balance, U256::from(10 * ONE_RU_USDC)); + assert_eq!(ctx.account_at_height(USER_C, 5).unwrap().balance, U256::from(15 * ONE_RU_USDC)); + assert!(ctx.account_at_height(USER_D, 5).is_none()); + assert!(ctx.account_at_height(USER_E, 5).is_none()); + assert!(ctx.account_at_height(USER_F, 5).is_none()); let new_block_6 = HostBlockSpec::new(ctx.constants()) .enter_token(USER_A, 10 * ONE_HOST_USDC, HOST_USDC) @@ -638,33 +457,21 @@ async fn test_historical_state_provider_with_reorg() { ctx.process_block(new_block_6).await.unwrap(); - // new current state assertions - let provider = ctx.factory.provider().unwrap(); - let accounts = - provider.basic_accounts([USER_A, USER_B, USER_C, USER_D, USER_E, USER_F]).unwrap(); - assert_eq!(accounts[0].1.as_ref().unwrap().balance, U256::from(15 * ONE_RU_USDC)); - assert_eq!(accounts[1].1.as_ref().unwrap().balance, U256::from(30 * ONE_RU_USDC)); - assert_eq!(accounts[2].1.as_ref().unwrap().balance, U256::from(45 * ONE_RU_USDC)); - assert!(accounts[3].1.is_none()); - assert!(accounts[4].1.is_none()); - assert!(accounts[5].1.is_none()); + // New current state assertions + assert_eq!(ctx.account(USER_A).unwrap().balance, U256::from(15 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_B).unwrap().balance, U256::from(30 * ONE_RU_USDC)); + assert_eq!(ctx.account(USER_C).unwrap().balance, U256::from(45 * ONE_RU_USDC)); + assert!(ctx.account(USER_D).is_none()); + assert!(ctx.account(USER_E).is_none()); + assert!(ctx.account(USER_F).is_none()); // Make the same assertions after the new block 6, the historical state should not change - assert_eq!( - historical.account_balance(&USER_A).unwrap().unwrap(), - U256::from(5 * ONE_RU_USDC) - ); - assert_eq!( - historical.account_balance(&USER_B).unwrap().unwrap(), - U256::from(10 * ONE_RU_USDC) - ); - assert_eq!( - historical.account_balance(&USER_C).unwrap().unwrap(), - U256::from(15 * ONE_RU_USDC) - ); - assert!(historical.account_balance(&USER_D).unwrap().is_none()); - assert!(historical.account_balance(&USER_E).unwrap().is_none()); - assert!(historical.account_balance(&USER_F).unwrap().is_none()); + assert_eq!(ctx.account_at_height(USER_A, 5).unwrap().balance, U256::from(5 * ONE_RU_USDC)); + assert_eq!(ctx.account_at_height(USER_B, 5).unwrap().balance, U256::from(10 * ONE_RU_USDC)); + assert_eq!(ctx.account_at_height(USER_C, 5).unwrap().balance, U256::from(15 * ONE_RU_USDC)); + assert!(ctx.account_at_height(USER_D, 5).is_none()); + assert!(ctx.account_at_height(USER_E, 5).is_none()); + assert!(ctx.account_at_height(USER_F, 5).is_none()); }) .await; } @@ -675,31 +482,24 @@ async fn test_write_changesets() { run_test(|ctx| async move { let ctx = setup_accounts_history(ctx).await; - let provider = ctx.factory.provider().unwrap(); - - let mut cursor = provider.tx_ref().cursor_dup_read::().unwrap(); + // The changeset at block N records the state before block N. + // get_account_at_height(addr, N-1) gives the state at end of block N-1, + // which is the "before" state for block N. + let acct_a_at_3 = ctx.account_at_height(USER_A, 3).unwrap(); + let acct_b_at_3 = ctx.account_at_height(USER_B, 3).unwrap(); + let acct_c_at_3 = ctx.account_at_height(USER_C, 3).unwrap(); - let entries_4 = - cursor.walk_range(4..5).unwrap().map(Result::unwrap).map(|t| t.1).collect::>(); + assert_eq!(acct_a_at_3.balance, U256::from(3 * ONE_RU_USDC)); + assert_eq!(acct_b_at_3.balance, U256::from(6 * ONE_RU_USDC)); + assert_eq!(acct_c_at_3.balance, U256::from(9 * ONE_RU_USDC)); - let entry_a = entries_4.iter().find(|e| e.address == USER_A).unwrap(); - let entry_b = entries_4.iter().find(|e| e.address == USER_B).unwrap(); - let entry_c = entries_4.iter().find(|e| e.address == USER_C).unwrap(); + let acct_a_at_4 = ctx.account_at_height(USER_A, 4).unwrap(); + let acct_b_at_4 = ctx.account_at_height(USER_B, 4).unwrap(); + let acct_c_at_4 = ctx.account_at_height(USER_C, 4).unwrap(); - assert_eq!(entry_a.info.as_ref().unwrap().balance, U256::from(3 * ONE_RU_USDC)); - assert_eq!(entry_b.info.as_ref().unwrap().balance, U256::from(6 * ONE_RU_USDC)); - assert_eq!(entry_c.info.as_ref().unwrap().balance, U256::from(9 * ONE_RU_USDC)); - - let entries_5 = - cursor.walk(Some(5)).unwrap().map(Result::unwrap).map(|t| t.1).collect::>(); - - let entry_a = entries_5.iter().find(|e| e.address == USER_A).unwrap(); - let entry_b = entries_5.iter().find(|e| e.address == USER_B).unwrap(); - let entry_c = entries_5.iter().find(|e| e.address == USER_C).unwrap(); - - assert_eq!(entry_a.info.as_ref().unwrap().balance, U256::from(4 * ONE_RU_USDC)); - assert_eq!(entry_b.info.as_ref().unwrap().balance, U256::from(8 * ONE_RU_USDC)); - assert_eq!(entry_c.info.as_ref().unwrap().balance, U256::from(12 * ONE_RU_USDC)); + assert_eq!(acct_a_at_4.balance, U256::from(4 * ONE_RU_USDC)); + assert_eq!(acct_b_at_4.balance, U256::from(8 * ONE_RU_USDC)); + assert_eq!(acct_c_at_4.balance, U256::from(12 * ONE_RU_USDC)); }) .await; } @@ -710,59 +510,41 @@ async fn test_write_changesets_with_empty_blocks() { run_test(|ctx| async move { let ctx = setup_accounts_history(ctx).await; - let provider = ctx.factory.provider().unwrap(); - - let mut cursor = provider.tx_ref().cursor_dup_read::().unwrap(); - - let entries_4 = - cursor.walk_range(4..5).unwrap().map(Result::unwrap).map(|t| t.1).collect::>(); + let acct_a_at_3 = ctx.account_at_height(USER_A, 3).unwrap(); + let acct_b_at_3 = ctx.account_at_height(USER_B, 3).unwrap(); + let acct_c_at_3 = ctx.account_at_height(USER_C, 3).unwrap(); - let entry_a = entries_4.iter().find(|e| e.address == USER_A).unwrap(); - let entry_b = entries_4.iter().find(|e| e.address == USER_B).unwrap(); - let entry_c = entries_4.iter().find(|e| e.address == USER_C).unwrap(); + assert_eq!(acct_a_at_3.balance, U256::from(3 * ONE_RU_USDC)); + assert_eq!(acct_b_at_3.balance, U256::from(6 * ONE_RU_USDC)); + assert_eq!(acct_c_at_3.balance, U256::from(9 * ONE_RU_USDC)); - assert_eq!(entry_a.info.as_ref().unwrap().balance, U256::from(3 * ONE_RU_USDC)); - assert_eq!(entry_b.info.as_ref().unwrap().balance, U256::from(6 * ONE_RU_USDC)); - assert_eq!(entry_c.info.as_ref().unwrap().balance, U256::from(9 * ONE_RU_USDC)); + let acct_a_at_4 = ctx.account_at_height(USER_A, 4).unwrap(); + let acct_b_at_4 = ctx.account_at_height(USER_B, 4).unwrap(); + let acct_c_at_4 = ctx.account_at_height(USER_C, 4).unwrap(); - let entries_5 = - cursor.walk(Some(5)).unwrap().map(Result::unwrap).map(|t| t.1).collect::>(); - - let entry_a = entries_5.iter().find(|e| e.address == USER_A).unwrap(); - let entry_b = entries_5.iter().find(|e| e.address == USER_B).unwrap(); - let entry_c = entries_5.iter().find(|e| e.address == USER_C).unwrap(); - - assert_eq!(entry_a.info.as_ref().unwrap().balance, U256::from(4 * ONE_RU_USDC)); - assert_eq!(entry_b.info.as_ref().unwrap().balance, U256::from(8 * ONE_RU_USDC)); - assert_eq!(entry_c.info.as_ref().unwrap().balance, U256::from(12 * ONE_RU_USDC)); + assert_eq!(acct_a_at_4.balance, U256::from(4 * ONE_RU_USDC)); + assert_eq!(acct_b_at_4.balance, U256::from(8 * ONE_RU_USDC)); + assert_eq!(acct_c_at_4.balance, U256::from(12 * ONE_RU_USDC)); let empty_block = HostBlockSpec::new(ctx.constants()); ctx.process_blocks(vec![empty_block; 2]).await.unwrap(); - // Even after processing empty blocks, the changesets should not change. - let mut cursor = provider.tx_ref().cursor_dup_read::().unwrap(); - - let entries_4 = - cursor.walk_range(4..5).unwrap().map(Result::unwrap).map(|t| t.1).collect::>(); - - let entry_a = entries_4.iter().find(|e| e.address == USER_A).unwrap(); - let entry_b = entries_4.iter().find(|e| e.address == USER_B).unwrap(); - let entry_c = entries_4.iter().find(|e| e.address == USER_C).unwrap(); - - assert_eq!(entry_a.info.as_ref().unwrap().balance, U256::from(3 * ONE_RU_USDC)); - assert_eq!(entry_b.info.as_ref().unwrap().balance, U256::from(6 * ONE_RU_USDC)); - assert_eq!(entry_c.info.as_ref().unwrap().balance, U256::from(9 * ONE_RU_USDC)); + // Even after processing empty blocks, the historical state should not change. + let acct_a_at_3 = ctx.account_at_height(USER_A, 3).unwrap(); + let acct_b_at_3 = ctx.account_at_height(USER_B, 3).unwrap(); + let acct_c_at_3 = ctx.account_at_height(USER_C, 3).unwrap(); - let entries_5 = - cursor.walk(Some(5)).unwrap().map(Result::unwrap).map(|t| t.1).collect::>(); + assert_eq!(acct_a_at_3.balance, U256::from(3 * ONE_RU_USDC)); + assert_eq!(acct_b_at_3.balance, U256::from(6 * ONE_RU_USDC)); + assert_eq!(acct_c_at_3.balance, U256::from(9 * ONE_RU_USDC)); - let entry_a = entries_5.iter().find(|e| e.address == USER_A).unwrap(); - let entry_b = entries_5.iter().find(|e| e.address == USER_B).unwrap(); - let entry_c = entries_5.iter().find(|e| e.address == USER_C).unwrap(); + let acct_a_at_4 = ctx.account_at_height(USER_A, 4).unwrap(); + let acct_b_at_4 = ctx.account_at_height(USER_B, 4).unwrap(); + let acct_c_at_4 = ctx.account_at_height(USER_C, 4).unwrap(); - assert_eq!(entry_a.info.as_ref().unwrap().balance, U256::from(4 * ONE_RU_USDC)); - assert_eq!(entry_b.info.as_ref().unwrap().balance, U256::from(8 * ONE_RU_USDC)); - assert_eq!(entry_c.info.as_ref().unwrap().balance, U256::from(12 * ONE_RU_USDC)); + assert_eq!(acct_a_at_4.balance, U256::from(4 * ONE_RU_USDC)); + assert_eq!(acct_b_at_4.balance, U256::from(8 * ONE_RU_USDC)); + assert_eq!(acct_c_at_4.balance, U256::from(12 * ONE_RU_USDC)); }) .await; } diff --git a/crates/node-tests/tests/rpc.rs b/crates/node-tests/tests/rpc.rs index 9c183e1..72581b6 100644 --- a/crates/node-tests/tests/rpc.rs +++ b/crates/node-tests/tests/rpc.rs @@ -13,7 +13,6 @@ use alloy::{ }, sol_types::{SolCall, SolEvent}, }; -use reth::providers::{BlockNumReader, BlockReader, TransactionsProvider}; use serial_test::serial; use signet_node_tests::{ HostBlockSpec, SignetTestContext, @@ -99,28 +98,29 @@ async fn test_eth_estimateGas(ctx: &SignetTestContext, contract: &TestCounterIns } async fn test_eth_getBlockByHash(ctx: &SignetTestContext, _contract: &TestCounterInstance) { - let genesis = ctx.factory.block(0.into()).unwrap().unwrap(); + let genesis = ctx.header_by_number(0).unwrap(); - let block = ctx.alloy_provider.get_block_by_hash(genesis.hash_slow()).await.unwrap().unwrap(); + let block = ctx.alloy_provider.get_block_by_hash(genesis.hash()).await.unwrap().unwrap(); assert_eq!(block.header.number, genesis.number); assert_eq!(block.header.timestamp, genesis.timestamp); } async fn test_eth_getBlockByNumber(ctx: &SignetTestContext, _contract: &TestCounterInstance) { - let db_block = ctx.factory.block(1.into()).unwrap().unwrap(); + let db_header = ctx.header_by_number(1).unwrap(); let rpc_block = - ctx.alloy_provider.get_block_by_number(db_block.number.into()).await.unwrap().unwrap(); - assert_eq!(rpc_block.header.number, db_block.number); - assert_eq!(rpc_block.header.timestamp, db_block.timestamp); - assert_eq!(rpc_block.header.hash, db_block.hash_slow()); + ctx.alloy_provider.get_block_by_number(db_header.number.into()).await.unwrap().unwrap(); + assert_eq!(rpc_block.header.number, db_header.number); + assert_eq!(rpc_block.header.timestamp, db_header.timestamp); + assert_eq!(rpc_block.header.hash, db_header.hash()); } async fn test_eth_getTransactionByHash(ctx: &SignetTestContext, _contract: &TestCounterInstance) { let deployer = ctx.addresses[0]; - let deploy_tx = &ctx.factory.transactions_by_block(1.into()).unwrap().unwrap()[0]; - let tx_hash = *deploy_tx.hash(); + let txs = ctx.transactions_in_block(1).await; + let deploy_tx = &txs[0]; + let tx_hash = *deploy_tx.tx_hash(); let rpc_tx = ctx.alloy_provider.get_transaction_by_hash(tx_hash).await.unwrap().unwrap(); assert_eq!(rpc_tx.tx_hash(), tx_hash); @@ -135,8 +135,9 @@ async fn test_eth_getTransactionByHash(ctx: &SignetTestContext, _contract: &Test async fn test_eth_getTransactionReceipt(ctx: &SignetTestContext, contract: &TestCounterInstance) { let deployer = ctx.addresses[0]; - let deploy_tx = &ctx.factory.transactions_by_block(1.into()).unwrap().unwrap()[0]; - let tx_hash = *deploy_tx.hash(); + let txs = ctx.transactions_in_block(1).await; + let deploy_tx = &txs[0]; + let tx_hash = *deploy_tx.tx_hash(); let receipt = ctx.alloy_provider.get_transaction_receipt(tx_hash).await.unwrap().unwrap(); @@ -210,8 +211,8 @@ async fn test_stateful_rpc_calls() { } async fn getLogs_post(ctx: &SignetTestContext, contract: &TestCounterInstance) { - let latest_block = ctx.factory.last_block_number().unwrap(); - let latest_hash = ctx.factory.block(latest_block.into()).unwrap().unwrap().hash_slow(); + let latest_block = ctx.last_block_number(); + let latest_hash = ctx.header_by_number(latest_block).unwrap().hash(); let logs = ctx .alloy_provider @@ -223,13 +224,13 @@ async fn getLogs_post(ctx: &SignetTestContext, contract: &TestCounterInstance) { .await .unwrap(); - // Two logs: one from the host transact, one from the alloy tx + // Two logs: one from the alloy tx, one from the host transact assert_eq!(logs.len(), 2); let log_inner = &logs[0].inner; assert_eq!(log_inner.address, *contract.address()); - // First increment is from the host transact (system tx runs first) + // First increment is from the alloy tx (regular txs execute before system txs) assert_eq!(log_inner.topics(), &[Counter::Count::SIGNATURE_HASH, B256::with_last_byte(1)]); - // Second increment is from the alloy tx + // Second increment is from the host transact (system tx) let log_inner = &logs[1].inner; assert_eq!(log_inner.address, *contract.address()); assert_eq!(log_inner.topics(), &[Counter::Count::SIGNATURE_HASH, B256::with_last_byte(2)]); @@ -303,8 +304,8 @@ async fn newBlockFilter_pre(ctx: &SignetTestContext) -> U256 { async fn newBlockFilter_post(ctx: &SignetTestContext, filter_id: U256) { let blocks: Vec = ctx.alloy_provider.get_filter_changes(filter_id).await.unwrap(); - let latest_block = ctx.factory.last_block_number().unwrap(); - let latest_hash = ctx.factory.block(latest_block.into()).unwrap().unwrap().hash_slow(); + let latest_block = ctx.last_block_number(); + let latest_hash = ctx.header_by_number(latest_block).unwrap().hash(); assert_eq!(blocks.len(), 1); assert_eq!(blocks[0], latest_hash); @@ -534,8 +535,8 @@ async fn subscribe_blocks_pre(ctx: &SignetTestContext) -> Subscription
{ async fn subscribe_blocks_post(ctx: &SignetTestContext, mut sub: Subscription
) { let block = sub.recv().await.unwrap(); - let latest_block = ctx.factory.last_block_number().unwrap(); - let latest_hash = ctx.factory.block(latest_block.into()).unwrap().unwrap().hash_slow(); + let latest_block = ctx.last_block_number(); + let latest_hash = ctx.header_by_number(latest_block).unwrap().hash(); assert_eq!(block.number, latest_block); assert_eq!(block.hash, latest_hash); } @@ -665,19 +666,19 @@ async fn verify_all_txs_in_block(ctx: &SignetTestContext, block_number: u64) { let txs = block.transactions.as_transactions().unwrap(); - // Also get transactions directly from DB - let db_txs = ctx.factory.transactions_by_block(block_number.into()).unwrap().unwrap(); + // Also get transactions directly from storage + let db_txs = ctx.transactions_in_block(block_number).await; assert_eq!(txs.len(), db_txs.len(), "Transaction count mismatch in block {}", block_number); for (idx, (rpc_tx, db_tx)) in txs.iter().zip(db_txs.iter()).enumerate() { let rpc_hash = rpc_tx.tx_hash(); - let db_hash = *db_tx.hash(); + let db_hash = *db_tx.tx_hash(); - // Verify RPC and DB hashes match + // Verify RPC and storage hashes match assert_eq!( rpc_hash, db_hash, - "Hash mismatch between RPC and DB for block {} tx {}", + "Hash mismatch between RPC and storage for block {} tx {}", block_number, idx ); @@ -688,11 +689,11 @@ async fn verify_all_txs_in_block(ctx: &SignetTestContext, block_number: u64) { "RPC hash lookup failed: block={block_number}, idx={idx}, hash={rpc_hash}", ); - // Verify hash lookup works via DB provider - let provider_lookup = ctx.factory.provider().unwrap().transaction_by_hash(db_hash).unwrap(); + // Verify hash lookup works via cold storage + let storage_lookup = ctx.transaction_by_hash(db_hash).await; assert!( - provider_lookup.is_some(), - "DB provider hash lookup failed: block={}, idx={}, hash={}", + storage_lookup.is_some(), + "Cold storage hash lookup failed: block={}, idx={}, hash={}", block_number, idx, db_hash diff --git a/crates/node-tests/tests/rpc_debug.rs b/crates/node-tests/tests/rpc_debug.rs index 90d240a..8dd8824 100644 --- a/crates/node-tests/tests/rpc_debug.rs +++ b/crates/node-tests/tests/rpc_debug.rs @@ -1,8 +1,5 @@ use alloy::{primitives::Bytes, providers::ext::DebugApi, sol_types::SolCall}; -use reth::{ - providers::TransactionsProvider, - rpc::types::trace::geth::{CallConfig, GethDebugTracingOptions}, -}; +use reth::rpc::types::trace::geth::{CallConfig, GethDebugTracingOptions}; use serial_test::serial; use signet_node_tests::{rpc::rpc_test, types::Counter::incrementCall}; use signet_test_utils::specs::{HostBlockSpec, RuBlockSpec}; @@ -12,8 +9,9 @@ use signet_test_utils::specs::{HostBlockSpec, RuBlockSpec}; async fn test_debug_trace_transaction() { rpc_test(|ctx, counter| async move { let deployer = ctx.addresses[0]; - let deploy_tx = &ctx.factory.transactions_by_block(1.into()).unwrap().unwrap()[0]; - let tx_hash = *deploy_tx.hash(); + let txs = ctx.transactions_in_block(1).await; + let deploy_tx = &txs[0]; + let tx_hash = *deploy_tx.tx_hash(); let tracing_opts = GethDebugTracingOptions::call_tracer(CallConfig { only_top_call: Some(false), diff --git a/crates/node-types/Cargo.toml b/crates/node-types/Cargo.toml deleted file mode 100644 index 6db0b39..0000000 --- a/crates/node-types/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "signet-node-types" -version.workspace = true -edition.workspace = true -rust-version.workspace = true -authors.workspace = true -license.workspace = true -homepage.workspace = true -repository.workspace = true - -[dependencies] -alloy.workspace = true -reth.workspace = true -reth-chainspec.workspace = true -reth-db.workspace = true -reth-node-api.workspace = true -reth-node-ethereum.workspace = true -signet-zenith.workspace = true - -tokio.workspace = true -tracing.workspace = true diff --git a/crates/node-types/README.md b/crates/node-types/README.md deleted file mode 100644 index 61ddffc..0000000 --- a/crates/node-types/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# Signet Node Types - -This crate provides parameterizations and conveneniences for the Signet node's -use of reth's internal generics. E.g. [`NodePrimitives`] and [`NodeTypes`]. - -It also provides a [`NodeTypesDbTrait`] to aggregate several trait constraints -on the database type. This is then used in the node and in `signet-db`. - -This crate is mostly shims. It is not intended to be used outside of the -Signet node and `signet-db` crates. diff --git a/crates/node-types/src/block.rs b/crates/node-types/src/block.rs deleted file mode 100644 index 29d4463..0000000 --- a/crates/node-types/src/block.rs +++ /dev/null @@ -1,26 +0,0 @@ -use alloy::eips::eip2718::{Decodable2718, Encodable2718}; -use reth::primitives::TransactionSigned; -use signet_zenith::Coder; -use tracing::trace; - -/// [signet_zenith::ZenithBlock] parameterized for use with reth. -pub type ZenithBlock = signet_zenith::ZenithBlock; - -/// [Coder] implementation for reth's 2718 impl -#[derive(Debug, Clone, Copy)] -pub struct Reth2718Coder; - -impl Coder for Reth2718Coder { - type Tx = TransactionSigned; - - fn encode(t: &TransactionSigned) -> Vec { - t.encoded_2718() - } - - fn decode(buf: &mut &[u8]) -> Option { - TransactionSigned::decode_2718(buf) - .inspect_err(|e| trace!(%e, "Discarding transaction due to failed decoding")) - .ok() - .filter(|tx| !tx.is_eip4844()) - } -} diff --git a/crates/node-types/src/lib.rs b/crates/node-types/src/lib.rs deleted file mode 100644 index ba49628..0000000 --- a/crates/node-types/src/lib.rs +++ /dev/null @@ -1,174 +0,0 @@ -#![doc = include_str!("../README.md")] -#![warn( - missing_copy_implementations, - missing_debug_implementations, - missing_docs, - unreachable_pub, - clippy::missing_const_for_fn, - rustdoc::all -)] -#![cfg_attr(not(test), warn(unused_crate_dependencies))] -#![deny(unused_must_use, rust_2018_idioms)] -#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] - -mod block; -pub use block::{Reth2718Coder, ZenithBlock}; - -mod utils; -pub use utils::{NodeTypesDbTrait, Pnt}; - -use reth::{ - primitives::EthPrimitives, - providers::{ - CanonStateNotification, CanonStateNotificationSender, CanonStateNotifications, - CanonStateSubscriptions, EthStorage, NodePrimitivesProvider, - }, -}; -use reth_chainspec::ChainSpec; -use reth_node_api::{NodePrimitives, NodeTypes, NodeTypesWithDB}; -use reth_node_ethereum::EthEngineTypes; -use std::marker::PhantomData; -use tokio::sync::broadcast::error::SendError; - -/// Items that can be sent via the status channel. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum NodeStatus { - /// Node is booting. - Booting, - /// Node's current height. - AtHeight(u64), -} - -/// Signet node types for [`NodeTypes`] and [`NodeTypesWithDB`]. -#[derive(Copy, Debug)] -pub struct SignetNodeTypes { - _db: PhantomData Db>, -} - -impl Clone for SignetNodeTypes { - fn clone(&self) -> Self { - Self { _db: PhantomData } - } -} - -impl PartialEq for SignetNodeTypes { - fn eq(&self, _other: &Self) -> bool { - true - } -} - -impl Eq for SignetNodeTypes {} - -impl Default for SignetNodeTypes { - fn default() -> Self { - Self { _db: PhantomData } - } -} - -impl NodePrimitives for SignetNodeTypes -where - Db: NodeTypesDbTrait, -{ - type Block = ::Block; - type BlockHeader = ::BlockHeader; - /// Block body primitive. - type BlockBody = ::BlockBody; - /// Signed version of the transaction type. - type SignedTx = ::SignedTx; - /// A receipt. - type Receipt = ::Receipt; -} - -impl NodeTypes for SignetNodeTypes -where - Db: NodeTypesDbTrait, -{ - type Primitives = EthPrimitives; - - type ChainSpec = ChainSpec; - - type Storage = EthStorage; - - type Payload = EthEngineTypes; -} - -impl NodeTypesWithDB for SignetNodeTypes -where - Db: NodeTypesDbTrait, -{ - type DB = Db; -} - -/// Shim to impl [`CanonStateSubscriptions`] -#[derive(Debug, Clone)] -pub struct SharedCanonState { - sender: CanonStateNotificationSender, - _pd: PhantomData Db>, -} - -impl SharedCanonState -where - Db: NodeTypesDbTrait, -{ - /// Get the number of receivers, via [`CanonStateNotificationSender::receiver_count`]. - pub fn receiver_count(&self) -> usize { - self.sender.receiver_count() - } - - /// Send a notification to all subscribers. - pub fn send( - &self, - notification: CanonStateNotification, - ) -> Result> { - self.sender.send(notification) - } -} - -impl Default for SharedCanonState -where - Db: NodeTypesDbTrait, -{ - fn default() -> Self { - // magic constant matches reth behavior in blockchain_tree. - // Max reorg depth is default 64, blockchain tree doubles it to 128. - Self::new(128) - } -} - -impl NodePrimitivesProvider for SharedCanonState -where - Db: NodeTypesDbTrait, -{ - type Primitives = EthPrimitives; -} - -impl SharedCanonState -where - Db: NodeTypesDbTrait, -{ - /// Create a new shared canon state. - pub fn new(capacity: usize) -> Self { - Self { sender: tokio::sync::broadcast::channel(capacity).0, _pd: PhantomData } - } -} - -impl CanonStateSubscriptions for SharedCanonState -where - Db: NodeTypesDbTrait, -{ - fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { - self.sender.subscribe() - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[allow(dead_code)] - fn compile_check() { - fn inner() {} - - inner::>>(); - } -} diff --git a/crates/node-types/src/utils.rs b/crates/node-types/src/utils.rs deleted file mode 100644 index 0d49922..0000000 --- a/crates/node-types/src/utils.rs +++ /dev/null @@ -1,24 +0,0 @@ -use reth::{primitives::EthPrimitives, providers::providers::ProviderNodeTypes}; -use reth_chainspec::ChainSpec; - -/// Convenience trait for specifying the [`ProviderNodeTypes`] implementation -/// required for Signet functionality. This is used to condense many trait -/// bounds. -pub trait Pnt: ProviderNodeTypes {} - -impl Pnt for T where T: ProviderNodeTypes {} - -/// Convenience trait to aggregate the DB requirements -pub trait NodeTypesDbTrait: - reth_db::database::Database + reth_db::database_metrics::DatabaseMetrics + Clone + Unpin + 'static -{ -} - -impl NodeTypesDbTrait for T where - T: reth_db::database::Database - + reth_db::database_metrics::DatabaseMetrics - + Clone - + Unpin - + 'static -{ -} diff --git a/crates/node/Cargo.toml b/crates/node/Cargo.toml index d1219d0..1d8f2da 100644 --- a/crates/node/Cargo.toml +++ b/crates/node/Cargo.toml @@ -10,27 +10,32 @@ repository.workspace = true [dependencies] signet-block-processor.workspace = true -signet-db.workspace = true +signet-extract.workspace = true +signet-genesis.workspace = true signet-node-config.workspace = true -signet-node-types.workspace = true signet-rpc.workspace = true +signet-hot.workspace = true +signet-storage.workspace = true signet-blobber.workspace = true signet-tx-cache.workspace = true signet-types.workspace = true +ajj.workspace = true alloy.workspace = true +axum = "0.8.1" +interprocess = { version = "2.2.2", features = ["tokio"] } reth.workspace = true -reth-chainspec.workspace = true -reth-db.workspace = true -reth-db-common.workspace = true reth-exex.workspace = true reth-node-api.workspace = true +trevm.workspace = true + eyre.workspace = true futures-util.workspace = true metrics.workspace = true reqwest.workspace = true tokio.workspace = true +tower-http = { version = "0.6.2", features = ["cors"] } tracing.workspace = true diff --git a/crates/node/src/builder.rs b/crates/node/src/builder.rs index 1cc4205..28251be 100644 --- a/crates/node/src/builder.rs +++ b/crates/node/src/builder.rs @@ -1,53 +1,45 @@ #![allow(clippy::type_complexity)] -use crate::{GENESIS_JOURNAL_HASH, SignetNode}; +use crate::{NodeStatus, SignetNode}; use eyre::OptionExt; -use reth::{ - primitives::EthPrimitives, - providers::{BlockHashReader, ProviderFactory, StateProviderFactory}, -}; -use reth_db::transaction::DbTxMut; -use reth_db_common::init; +use reth::{primitives::EthPrimitives, providers::StateProviderFactory}; use reth_exex::ExExContext; use reth_node_api::{FullNodeComponents, NodeTypes}; use signet_block_processor::AliasOracleFactory; -use signet_db::DbProviderExt; +use signet_hot::db::UnsafeDbWrite; use signet_node_config::SignetNodeConfig; -use signet_node_types::{NodeStatus, NodeTypesDbTrait, SignetNodeTypes}; +use signet_storage::{HistoryRead, HistoryWrite, HotKv, HotKvRead, UnifiedStorage}; use std::sync::Arc; +use tracing::info; +use trevm::revm::database::DBErrorMarker; /// A type that does not implement [`AliasOracleFactory`]. #[derive(Debug, Clone, Copy)] pub struct NotAnAof; -/// A type that does not implement [`NodeTypesDbTrait`]. +/// Sentinel indicating no storage has been provided. #[derive(Debug, Clone, Copy)] -pub struct NotADb; +pub struct NotAStorage; /// Builder for [`SignetNode`]. This is the main way to create a signet node. /// /// The builder requires the following components to be set before building: /// - An [`ExExContext`], via [`Self::with_ctx`]. -/// - A [`ProviderFactory`] for the signet node's database. -/// - This can be provided directly via [`Self::with_factory`]. -/// - Or created from a database implementing [`NodeTypesDbTrait`] via -/// [`Self::with_db`]. -/// - If not set directly, can be created from the config via -/// [`Self::with_config_db`]. +/// - An [`Arc>`], via [`Self::with_storage`]. /// - An [`AliasOracleFactory`], via [`Self::with_alias_oracle`]. /// - If not set, a default one will be created from the [`ExExContext`]'s /// provider. /// - A `reqwest::Client`, via [`Self::with_client`]. /// - If not set, a default client will be created. -pub struct SignetNodeBuilder { +pub struct SignetNodeBuilder { config: SignetNodeConfig, alias_oracle: Option, ctx: Option, - factory: Option, + storage: Option, client: Option, } -impl core::fmt::Debug for SignetNodeBuilder { +impl core::fmt::Debug for SignetNodeBuilder { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("SignetNodeBuilder").finish_non_exhaustive() } @@ -56,70 +48,21 @@ impl core::fmt::Debug for SignetNodeBuilder { impl SignetNodeBuilder { /// Create a new SignetNodeBuilder instance. pub const fn new(config: SignetNodeConfig) -> Self { - Self { config, alias_oracle: None, ctx: None, factory: None, client: None } + Self { config, alias_oracle: None, ctx: None, storage: None, client: None } } } -impl SignetNodeBuilder { - /// Set the DB for the signet node. - pub fn with_db( +impl SignetNodeBuilder { + /// Set the [`UnifiedStorage`] backend for the signet node. + pub fn with_storage( self, - db: NewDb, - ) -> eyre::Result>, Aof>> { - let factory = ProviderFactory::new( - db, - self.config.chain_spec().clone(), - self.config.static_file_rw()?, - self.config.open_rocks_db()?, - )?; - - Ok(SignetNodeBuilder { - config: self.config, - alias_oracle: self.alias_oracle, - ctx: self.ctx, - factory: Some(factory), - client: self.client, - }) - } - - /// Set the DB for the signet node from config, opening the mdbx database. - pub fn with_config_db( - self, - ) -> eyre::Result< - SignetNodeBuilder>>, Aof>, - > { - let factory = ProviderFactory::new_with_database_path( - self.config.database_path(), - self.config.chain_spec().clone(), - reth_db::mdbx::DatabaseArguments::default(), - self.config.static_file_rw()?, - self.config.open_rocks_db()?, - )?; - Ok(SignetNodeBuilder { - config: self.config, - alias_oracle: self.alias_oracle, - ctx: self.ctx, - factory: Some(factory), - client: self.client, - }) - } - - /// Set the provider factory for the signet node. - /// - /// This is an alternative to [`Self::with_db`] and - /// [`Self::with_config_db`]. - pub fn with_factory( - self, - factory: ProviderFactory>, - ) -> SignetNodeBuilder>, Aof> - where - NewDb: NodeTypesDbTrait, - { + storage: Arc>, + ) -> SignetNodeBuilder>, Aof> { SignetNodeBuilder { config: self.config, alias_oracle: self.alias_oracle, ctx: self.ctx, - factory: Some(factory), + storage: Some(storage), client: self.client, } } @@ -128,7 +71,7 @@ impl SignetNodeBuilder { pub fn with_ctx( self, ctx: ExExContext, - ) -> SignetNodeBuilder, Db, Aof> + ) -> SignetNodeBuilder, Storage, Aof> where NewHost: FullNodeComponents, NewHost::Types: NodeTypes, @@ -137,7 +80,7 @@ impl SignetNodeBuilder { config: self.config, alias_oracle: self.alias_oracle, ctx: Some(ctx), - factory: self.factory, + storage: self.storage, client: self.client, } } @@ -146,131 +89,65 @@ impl SignetNodeBuilder { pub fn with_alias_oracle( self, alias_oracle: NewAof, - ) -> SignetNodeBuilder { + ) -> SignetNodeBuilder { SignetNodeBuilder { config: self.config, alias_oracle: Some(alias_oracle), ctx: self.ctx, - factory: self.factory, + storage: self.storage, client: self.client, } } /// Set the reqwest client for the signet node. - pub fn with_client(mut self, client: reqwest::Client) -> SignetNodeBuilder { + pub fn with_client(mut self, client: reqwest::Client) -> Self { self.client = Some(client); self } } -impl SignetNodeBuilder, ProviderFactory>, Aof> +impl SignetNodeBuilder, Arc>, Aof> where Host: FullNodeComponents, Host::Types: NodeTypes, - Db: NodeTypesDbTrait, + H: HotKv, { /// Prebuild checks for the signet node builder. Shared by all build /// commands. fn prebuild(&mut self) -> eyre::Result<()> { self.client.get_or_insert_default(); self.ctx.as_ref().ok_or_eyre("Launch context must be set")?; - let factory = self.factory.as_ref().ok_or_eyre("Provider factory must be set")?; - - // This check appears redundant with the same check made in - // `init_genesis`, but is not. We init the genesis DB state but then we - // drop some of it, and reuse those tables for our own nefarious - // purposes. If we attempt to drop those tables AFTER we have reused - // them, we will get a key deser error (as the tables will contain keys - // the old schema does not permit). This check ensures we only attempt - // to drop the tables once. - if matches!( - factory.block_hash(0), - Ok(None) - | Err(reth::providers::ProviderError::MissingStaticFileBlock( - reth::primitives::StaticFileSegment::Headers, - 0 - )) - ) { - init::init_genesis(factory)?; - - factory.provider_rw()?.update( - |writer: &mut reth::providers::DatabaseProviderRW>| { - writer.tx_mut().clear::()?; - writer.tx_mut().clear::()?; - writer.tx_mut().clear::()?; - - writer.tx_ref().put::(0, GENESIS_JOURNAL_HASH)?; - // we do not need to pre-populate the `ZenithHeaders` or - // `SignetEvents` tables, as missing data is legal in those - // tables - - Ok(()) - }, - )?; + let storage = self.storage.as_ref().ok_or_eyre("Storage must be set")?; + + // Check if genesis is loaded + let reader = storage.reader()?; + let has_genesis = reader.has_block(0)?; + drop(reader); + + if !has_genesis { + let genesis = self.config.genesis(); + let hardforks = signet_genesis::genesis_hardforks(genesis); + let writer = storage.hot().writer()?; + writer.load_genesis(genesis, &hardforks)?; + writer.commit()?; + info!("loaded genesis into hot storage"); } Ok(()) } } -impl SignetNodeBuilder, NotADb, NotAnAof> -where - Host: FullNodeComponents, - Host::Types: NodeTypes, -{ - /// Build the node. This performs the following steps: - /// - /// - Runs prebuild checks. - /// - Inits the rollup DB from genesis if needed. - /// - Creates a default `AliasOracleFactory` from the host DB. - /// - /// # Panics - /// - /// If called outside a tokio runtime. - pub fn build( - self, - ) -> eyre::Result<( - SignetNode, Box>, - tokio::sync::watch::Receiver, - )> { - self.with_config_db()?.build() - } -} - -impl SignetNodeBuilder, NotADb, Aof> -where - Host: FullNodeComponents, - Host::Types: NodeTypes, - Aof: AliasOracleFactory, -{ - /// Build the node. This performs the following steps: - /// - /// - Runs prebuild checks. - /// - Inits the rollup DB from genesis if needed. - /// - /// # Panics - /// - /// If called outside a tokio runtime. - pub fn build( - self, - ) -> eyre::Result<( - SignetNode, Aof>, - tokio::sync::watch::Receiver, - )> { - self.with_config_db()?.build() - } -} - -impl SignetNodeBuilder, ProviderFactory>, NotAnAof> +impl SignetNodeBuilder, Arc>, NotAnAof> where Host: FullNodeComponents, Host::Types: NodeTypes, - Db: NodeTypesDbTrait, + H: HotKv + Clone + Send + Sync + 'static, + ::Error: DBErrorMarker, { /// Build the node. This performs the following steps: /// /// - Runs prebuild checks. - /// - Inits the rollup DB from genesis if needed. + /// - Inits storage from genesis if needed. /// - Creates a default `AliasOracleFactory` from the host DB. /// /// # Panics @@ -278,9 +155,8 @@ where /// If called outside a tokio runtime. pub fn build( mut self, - ) -> eyre::Result<(SignetNode, tokio::sync::watch::Receiver)> { + ) -> eyre::Result<(SignetNode, tokio::sync::watch::Receiver)> { self.prebuild()?; - // This allows the node to look up contract status. let ctx = self.ctx.unwrap(); let provider = ctx.provider().clone(); let alias_oracle: Box = Box::new(provider); @@ -288,36 +164,37 @@ where SignetNode::new_unsafe( ctx, self.config, - self.factory.unwrap(), + self.storage.unwrap(), alias_oracle, self.client.unwrap(), ) } } -impl SignetNodeBuilder, ProviderFactory>, Aof> +impl SignetNodeBuilder, Arc>, Aof> where Host: FullNodeComponents, Host::Types: NodeTypes, - Db: NodeTypesDbTrait, + H: HotKv + Clone + Send + Sync + 'static, + ::Error: DBErrorMarker, Aof: AliasOracleFactory, { /// Build the node. This performs the following steps: /// /// - Runs prebuild checks. - /// - Inits the rollup DB from genesis if needed. + /// - Inits storage from genesis if needed. /// /// # Panics /// /// If called outside a tokio runtime. pub fn build( mut self, - ) -> eyre::Result<(SignetNode, tokio::sync::watch::Receiver)> { + ) -> eyre::Result<(SignetNode, tokio::sync::watch::Receiver)> { self.prebuild()?; SignetNode::new_unsafe( self.ctx.unwrap(), self.config, - self.factory.unwrap(), + self.storage.unwrap(), self.alias_oracle.unwrap(), self.client.unwrap(), ) diff --git a/crates/node/src/lib.rs b/crates/node/src/lib.rs index 92b97f1..7b5aa9e 100644 --- a/crates/node/src/lib.rs +++ b/crates/node/src/lib.rs @@ -17,6 +17,11 @@ pub use builder::SignetNodeBuilder; mod metrics; mod node; -pub use node::{GENESIS_JOURNAL_HASH, SignetNode}; +pub use node::SignetNode; mod rpc; + +mod serve; + +mod status; +pub use status::NodeStatus; diff --git a/crates/node/src/metrics.rs b/crates/node/src/metrics.rs index 7623d73..d775464 100644 --- a/crates/node/src/metrics.rs +++ b/crates/node/src/metrics.rs @@ -7,6 +7,7 @@ //! - Number of reorgs processed use metrics::{Counter, counter, describe_counter}; +use reth::primitives::NodePrimitives; use reth_exex::ExExNotification; use std::sync::LazyLock; @@ -29,13 +30,22 @@ static DESCRIBE: LazyLock<()> = LazyLock::new(|| { describe_counter!(REORGS_PROCESSED, REORGS_PROCESSED_HELP); }); -fn reorgs_processed() -> Counter { +fn notifications_received() -> Counter { LazyLock::force(&DESCRIBE); - counter!(REORGS_PROCESSED) + counter!(NOTIFICATION_RECEIVED) } -fn inc_reorgs_processed() { - reorgs_processed().increment(1); +fn inc_notifications_received() { + notifications_received().increment(1); +} + +fn reorgs_received() -> Counter { + LazyLock::force(&DESCRIBE); + counter!(REORGS_RECEIVED) +} + +fn inc_reorgs_received() { + reorgs_received().increment(1); } fn notifications_processed() -> Counter { @@ -47,14 +57,23 @@ fn inc_notifications_processed() { notifications_processed().increment(1); } -pub(crate) fn record_notification_received(notification: &ExExNotification) { - inc_notifications_processed(); +fn reorgs_processed() -> Counter { + LazyLock::force(&DESCRIBE); + counter!(REORGS_PROCESSED) +} + +fn inc_reorgs_processed() { + reorgs_processed().increment(1); +} + +pub(crate) fn record_notification_received(notification: &ExExNotification) { + inc_notifications_received(); if notification.reverted_chain().is_some() { - inc_reorgs_processed(); + inc_reorgs_received(); } } -pub(crate) fn record_notification_processed(notification: &ExExNotification) { +pub(crate) fn record_notification_processed(notification: &ExExNotification) { inc_notifications_processed(); if notification.reverted_chain().is_some() { inc_reorgs_processed(); diff --git a/crates/node/src/node.rs b/crates/node/src/node.rs index 1839aa4..172edc3 100644 --- a/crates/node/src/node.rs +++ b/crates/node/src/node.rs @@ -1,49 +1,37 @@ -use crate::metrics; -use alloy::{ - consensus::BlockHeader, - eips::NumHash, - primitives::{B256, BlockNumber, b256}, -}; +use crate::{NodeStatus, metrics, serve::RpcServerGuard}; +use alloy::consensus::BlockHeader; use eyre::Context; use futures_util::StreamExt; use reth::{ + chainspec::EthChainSpec, primitives::EthPrimitives, - providers::{ - BlockIdReader, BlockNumReader, BlockReader, CanonChainTracker, CanonStateNotification, - CanonStateNotifications, CanonStateSubscriptions, HeaderProvider, NodePrimitivesProvider, - ProviderFactory, StateProviderFactory, providers::BlockchainProvider, - }, - rpc::types::engine::ForkchoiceState, + providers::{BlockIdReader, BlockReader, HeaderProvider, StateProviderFactory}, }; -use reth_chainspec::EthChainSpec; use reth_exex::{ExExContext, ExExEvent, ExExHead, ExExNotificationsStream}; use reth_node_api::{FullNodeComponents, FullNodeTypes, NodeTypes}; -use signet_blobber::BlobFetcher; +use signet_blobber::ExtractableChainShim; use signet_block_processor::{AliasOracleFactory, SignetBlockProcessorV1}; -use signet_db::{DbProviderExt, ProviderConsistencyExt, RuChain, RuWriter}; +use signet_extract::Extractor; use signet_node_config::SignetNodeConfig; -use signet_node_types::{NodeStatus, NodeTypesDbTrait, SignetNodeTypes}; -use signet_rpc::RpcServerGuard; +use signet_rpc::{ChainNotifier, NewBlockNotification}; +use signet_storage::{HistoryRead, HotKv, HotKvRead, UnifiedStorage}; use signet_types::{PairedHeights, constants::SignetSystemConstants}; -use std::{fmt, mem::MaybeUninit, sync::Arc}; +use std::{fmt, sync::Arc}; use tokio::sync::watch; use tracing::{debug, info, instrument}; +use trevm::revm::database::DBErrorMarker; -/// The genesis journal hash for the signet chain. -pub const GENESIS_JOURNAL_HASH: B256 = - b256!("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"); - -/// Make it easier to write some args +/// Type alias for the host primitives. type PrimitivesOf = <::Types as NodeTypes>::Primitives; type ExExNotification = reth_exex::ExExNotification>; type Chain = reth::providers::Chain>; /// Signet context and configuration. -pub struct SignetNode> +pub struct SignetNode> where Host: FullNodeComponents, Host::Types: NodeTypes, - Db: NodeTypesDbTrait, + H: HotKv, { /// The host context, which manages provider access and notifications. pub(crate) host: ExExContext, @@ -51,12 +39,12 @@ where /// Signet node configuration. pub(crate) config: Arc, - /// A [`ProviderFactory`] instance to allow RU database access. - pub(crate) ru_provider: ProviderFactory>, + /// Unified hot + cold storage backend. + pub(crate) storage: Arc>, - /// A [`BlockchainProvider`] instance. Used to notify the RPC server of - /// changes to the canonical/safe/finalized head. - pub(crate) bp: BlockchainProvider>, + /// Shared chain state (block tags + notification sender). + /// Cloned to the RPC context on startup. + pub(crate) chain: ChainNotifier, /// The join handle for the RPC server. None if the RPC server is not /// yet running. @@ -65,61 +53,41 @@ where /// Chain configuration constants. pub(crate) constants: SignetSystemConstants, - /// Status channel, currently used only for testing + /// Status channel, currently used only for testing. pub(crate) status: watch::Sender, - /// The block processor - pub(crate) processor: SignetBlockProcessorV1, + /// The block processor. + pub(crate) processor: SignetBlockProcessorV1, /// A reqwest client, used by the blob fetch and the tx cache forwarder. pub(crate) client: reqwest::Client, } -impl fmt::Debug for SignetNode +impl fmt::Debug for SignetNode where Host: FullNodeComponents, Host::Types: NodeTypes, - Db: NodeTypesDbTrait, + H: HotKv, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("SignetNode").field("config", &self.config).finish_non_exhaustive() } } -impl NodePrimitivesProvider for SignetNode -where - Host: FullNodeComponents, - Host::Types: NodeTypes, - Db: NodeTypesDbTrait, -{ - type Primitives = EthPrimitives; -} - -impl CanonStateSubscriptions for SignetNode -where - Host: FullNodeComponents, - Host::Types: NodeTypes, - Db: NodeTypesDbTrait, - AliasOracle: AliasOracleFactory, -{ - fn subscribe_to_canonical_state(&self) -> CanonStateNotifications { - self.bp.subscribe_to_canonical_state() - } -} - -impl SignetNode +impl SignetNode where Host: FullNodeComponents, Host::Types: NodeTypes, - Db: NodeTypesDbTrait, + H: HotKv + Clone + Send + Sync + 'static, + ::Error: DBErrorMarker, AliasOracle: AliasOracleFactory, { /// Create a new Signet instance. It is strongly recommend that you use the /// [`SignetNodeBuilder`] instead of this function. /// /// This function does NOT initialize the genesis state. As such it is NOT - /// safe to use directly. The genesis state in the `factory` MUST be - /// initialized BEFORE calling this function. + /// safe to use directly. The genesis state in storage MUST be initialized + /// BEFORE calling this function. /// /// # Panics /// @@ -130,18 +98,17 @@ where pub fn new_unsafe( ctx: ExExContext, config: SignetNodeConfig, - factory: ProviderFactory>, + storage: Arc>, alias_oracle: AliasOracle, client: reqwest::Client, - ) -> eyre::Result<(Self, tokio::sync::watch::Receiver)> { + ) -> eyre::Result<(Self, watch::Receiver)> { let constants = config.constants().wrap_err("failed to load signet constants from genesis")?; - let bp: BlockchainProvider> = BlockchainProvider::new(factory.clone())?; - - let (status, receiver) = tokio::sync::watch::channel(NodeStatus::Booting); + let (status, receiver) = watch::channel(NodeStatus::Booting); + let chain = ChainNotifier::new(128); - let blob_cacher = BlobFetcher::builder() + let blob_cacher = signet_blobber::BlobFetcher::builder() .with_config(config.block_extractor())? .with_pool(ctx.pool().clone()) .with_client(client.clone()) @@ -152,7 +119,7 @@ where let processor = SignetBlockProcessorV1::new( constants.clone(), config.chain_spec().clone(), - factory.clone(), + storage.hot().clone(), alias_oracle, config.slot_calculator(), blob_cacher, @@ -161,15 +128,12 @@ where let this = Self { config: config.into(), host: ctx, - ru_provider: factory.clone(), - bp, - + storage, + chain, rpc_handle: None, constants, status, - processor, - client, }; Ok((this, receiver)) @@ -179,8 +143,24 @@ where /// errors. #[instrument(skip(self), fields(host = ?self.host.config.chain.chain()))] pub async fn start(mut self) -> eyre::Result<()> { - if let Some(height) = self.ru_provider.ru_check_consistency()? { - self.unwind_to(height).wrap_err("failed to unwind RU database to consistent state")?; + // Ensure hot and cold storage are at the same height. If either + // is ahead, unwind to the minimum so the host re-delivers blocks. + { + let reader = self.storage.reader()?; + let hot_tip = reader.last_block_number()?.unwrap_or(0); + drop(reader); + let cold_tip = self.storage.cold_reader().get_latest_block().await?.unwrap_or(0); + + let target = hot_tip.min(cold_tip); + if target < hot_tip || target < cold_tip { + info!( + hot_tip, + cold_tip, + unwind_to = target, + "storage layers inconsistent, reconciling" + ); + self.storage.unwind_above(target)?; + } } // This exists only to bypass the `tracing::instrument(err)` macro to @@ -190,7 +170,8 @@ where // includes cause reporting. let err = format!("{err:#}"); - let last_block = self.ru_provider.last_block_number().ok(); + let last_block = + self.storage.reader().ok().and_then(|r| r.last_block_number().ok().flatten()); let exex_head = last_block.and_then(|h| self.set_exex_head(h).ok()); tracing::error!(err, last_block, ?exex_head, "Signet node crashed"); @@ -203,8 +184,10 @@ where self.start_rpc().await?; - // Determine the last block written to the database for backfill - let last_rollup_block: u64 = self.ru_provider.last_block_number()?; + // Determine the last block written to storage for backfill + let reader = self.storage.reader()?; + let last_rollup_block = reader.last_block_number()?.unwrap_or(0); + drop(reader); info!(last_rollup_block, "resuming execution from last rollup block found"); @@ -232,9 +215,11 @@ where Ok(()) } - /// Sets the head of the Exex chain from the last rollup block, handling genesis conditions if necessary. + /// Sets the head of the Exex chain from the last rollup block, handling + /// genesis conditions if necessary. fn set_exex_head(&mut self, last_rollup_block: u64) -> eyre::Result { - // If the last rollup block is 0, we can shortcut and just set the head to the host rollup deployment block. + // If the last rollup block is 0, shortcut to the host rollup + // deployment block. if last_rollup_block == 0 { let host_deployment_block = self.host.provider().block_by_number(self.constants.host_deploy_height())?; @@ -250,22 +235,19 @@ where host_ru_deploy_block, "Host deploy height not found. Falling back to genesis block" ); - let genesis_block = self.host.provider().block_by_number(0)?; - match genesis_block { - Some(genesis_block) => { - let exex_head = ExExHead { block: genesis_block.num_hash_slow() }; - self.host.notifications.set_with_head(exex_head); - return Ok(exex_head); - } - None => panic!("failed to find genesis block"), - } + let genesis_block = self + .host + .provider() + .block_by_number(0)? + .expect("failed to find genesis block"); + let exex_head = ExExHead { block: genesis_block.num_hash_slow() }; + self.host.notifications.set_with_head(exex_head); + return Ok(exex_head); } } } - // If the last rollup block is not 0, we need to find the corresponding host block. - // We do this by looking up the host block number for the rollup block number, and then - // looking up the host block for that number. + // Find the corresponding host block for the rollup block number. let host_height = self.constants.pair_ru(last_rollup_block).host; match self.host.provider().block_by_number(host_height)? { @@ -277,15 +259,11 @@ where } None => { debug!(host_height, "no host block found for host height"); - let genesis_block = self.host.provider().block_by_number(0)?; - match genesis_block { - Some(genesis_block) => { - let exex_head = ExExHead { block: genesis_block.num_hash_slow() }; - self.host.notifications.set_with_head(exex_head); - Ok(exex_head) - } - None => panic!("failed to find genesis block"), - } + let genesis_block = + self.host.provider().block_by_number(0)?.expect("failed to find genesis block"); + let exex_head = ExExHead { block: genesis_block.num_hash_slow() }; + self.host.notifications.set_with_head(exex_head); + Ok(exex_head) } } } @@ -298,355 +276,188 @@ where pub async fn on_notification(&self, notification: ExExNotification) -> eyre::Result<()> { metrics::record_notification_received(¬ification); + let mut changed = false; + // NB: REVERTS MUST RUN FIRST - let mut reverted = None; if let Some(chain) = notification.reverted_chain() { - reverted = self.on_host_revert(&chain).wrap_err("error encountered during revert")?; + self.on_host_revert(&chain).wrap_err("error encountered during revert")?; + changed = true; } - let mut committed = None; if let Some(chain) = notification.committed_chain() { - committed = self - .processor - .on_host_commit::(&chain) + self.process_committed_chain(&chain) .await .wrap_err("error encountered during commit")?; + changed = true; } - if committed.is_some() || reverted.is_some() { - // Update the status channel and canon heights, etc. - self.update_status(committed, reverted)?; + if changed { + self.update_status()?; } metrics::record_notification_processed(¬ification); Ok(()) } - /// Update the status channel and the latest block info. This is necessary - /// to keep the RPC node in sync with the latest block information. - fn update_status( - &self, - committed: Option, - reverted: Option, - ) -> eyre::Result<()> { - let ru_height = self.ru_provider.last_block_number()?; + /// Process a committed chain by extracting and executing blocks. + async fn process_committed_chain(&self, chain: &Arc>) -> eyre::Result<()> { + let shim = ExtractableChainShim::new(chain); + let extractor = Extractor::new(self.constants.clone()); + let extracts: Vec<_> = extractor.extract_signet(&shim).collect(); - // Update the RPC's block information - self.update_canon_heights(ru_height)?; + let reader = self.storage.reader()?; + let last_height = reader.last_block_number()?.unwrap_or(0); + drop(reader); - // We'll also emit the new chains as notifications on our canonstate - // notification channel, provided anyone is listening - self.update_canon_state(committed, reverted); + for block_extracts in extracts.iter().filter(|e| e.ru_height > last_height) { + let executed = self.processor.process_block(block_extracts).await?; + self.notify_new_block(&executed); + self.storage.append_blocks(vec![executed])?; + } + Ok(()) + } - // Update the status channel. This is used by the test-utils to watch - // notification processing, and may be removed in the future. - self.status.send_modify(|s| *s = NodeStatus::AtHeight(ru_height)); + /// Send a new block notification on the broadcast channel. + fn notify_new_block(&self, block: &signet_storage::ExecutedBlock) { + let notif = NewBlockNotification { + header: block.header.inner().clone(), + transactions: block.transactions.iter().map(|tx| tx.inner().clone()).collect(), + receipts: block.receipts.clone(), + }; + // Ignore send errors — no subscribers is fine. + let _ = self.chain.send_notification(notif); + } + + /// Update the status channel and block tags. This keeps the RPC node + /// in sync with the latest block information. + fn update_status(&self) -> eyre::Result<()> { + let reader = self.storage.reader()?; + let ru_height = reader.last_block_number()?.unwrap_or(0); + drop(reader); + self.update_block_tags(ru_height)?; + self.status.send_modify(|s| *s = NodeStatus::AtHeight(ru_height)); Ok(()) } - /// Update the canonical heights of the chain. This does two main things - /// - Update the RPC server's view of the forkchoice rule, setting the - /// tip and block labels - /// - Update the reth node that the ExEx has finished processing blocks up - /// to the finalized block. - /// - /// This is used by the RPC to resolve block tags including "latest", - /// "safe", and "finalized", as well as the number returned by - /// `eth_blockNumber`. - fn update_canon_heights(&self, ru_height: u64) -> eyre::Result<()> { - // Set the canonical head ("latest" label) - let latest_ru_block_header = self - .ru_provider - .sealed_header(ru_height)? - .expect("ru db inconsistent. no header for height"); - let latest_ru_block_hash = latest_ru_block_header.hash(); - self.bp.set_canonical_head(latest_ru_block_header); - - // This is our fallback safe and finalized, in case the host chain - // hasn't finalized more recent blocks - let genesis_ru_hash = self - .ru_provider - .sealed_header(0)? - .expect("ru db inconsistent. no header for height") - .hash(); - - // Load the safe block hash for both the host and the rollup. - // The safe block height of the rollup CANNOT be higher than the latest ru height, - // as we've already processed all the blocks up to the latest ru height. - let PairedHeights { host: _, rollup: safe_ru_height } = - self.load_safe_block_heights(ru_height)?; - let safe_ru_block_header = self - .ru_provider - .sealed_header(safe_ru_height)? - .expect("ru db inconsistent. no header for height"); - let safe_ru_block_hash = safe_ru_block_header.hash(); - + /// Update block tags (latest/safe/finalized) and notify reth of processed + /// height. + fn update_block_tags(&self, ru_height: u64) -> eyre::Result<()> { + // Safe height + let safe_heights = self.load_safe_block_heights(ru_height)?; + let safe_ru_height = safe_heights.rollup; debug!(safe_ru_height, "calculated safe ru height"); - // Update the safe rollup block hash iff it's not the genesis rollup block. - if safe_ru_block_hash != genesis_ru_hash { - self.bp.set_safe(safe_ru_block_header); - } - - // Load the finalized rollup block hash. - // The finalized rollup block height CANNOT be higher than the latest ru height, - // as we've already processed all the blocks up to the latest ru height. + // Finalized height let finalized_heights = self.load_finalized_block_heights(ru_height)?; - debug!( finalized_host_height = finalized_heights.host, finalized_ru_height = finalized_heights.rollup, "calculated finalized heights" ); - // Load the finalized RU block hash. It's the genesis hash if the host - // and rollup finalized heights are both 0. Otherwise, we load the finalized - // RU header and set the finalized block hash. - let finalized_ru_block_hash = - self.set_finalized_ru_block_hash(finalized_heights, genesis_ru_hash)?; - - // NB: - // We also need to notify the reth node that we are totally - // finished processing the host block before the finalized block now. - // We want to keep the finalized host block in case we reorg to the block - // immediately on top of it, and we need some state from the parent. - // - // If this errors, it means that the reth node has shut down and we - // should stop processing blocks. - // - // To do this, we grab the finalized host header to get its height and hash, - // so we can send the corresponding [`ExExEvent`]. - if finalized_ru_block_hash != genesis_ru_hash { + // Atomically update all three tags + self.chain.tags().update_all(ru_height, safe_ru_height, finalized_heights.rollup); + + // Notify reth that we've finished processing up to the finalized + // height. Skip if finalized rollup height is still at genesis. + if finalized_heights.rollup > 0 { self.update_highest_processed_height(finalized_heights.host)?; } - // Update the RPC's forkchoice timestamp. - self.bp.on_forkchoice_update_received(&ForkchoiceState { - head_block_hash: latest_ru_block_hash, - safe_block_hash: safe_ru_block_hash, - finalized_block_hash: finalized_ru_block_hash, - }); debug!( - %latest_ru_block_hash, %safe_ru_block_hash, %finalized_ru_block_hash, - "updated RPC block producer" + latest = ru_height, + safe = safe_ru_height, + finalized = finalized_heights.rollup, + "updated block tags" ); Ok(()) } - /// Update the ExEx head to the finalized host block. - /// - /// If this errors, it means that the reth node has shut down and we - /// should stop processing blocks. - fn update_exex_head( - &self, - finalized_host_height: u64, - finalized_host_hash: B256, - ) -> eyre::Result<()> { - debug!(finalized_host_height, "Sending FinishedHeight notification"); - self.host.events.send(ExExEvent::FinishedHeight(NumHash { - number: finalized_host_height, - hash: finalized_host_hash, - }))?; - Ok(()) - } - - /// Send a canon state notification via the channel. - fn update_canon_state(&self, committed: Option, reverted: Option) { - let commit_count = committed.as_ref().map(|c| c.len()).unwrap_or_default(); - let revert_count = reverted.as_ref().map(|r| r.len()).unwrap_or_default(); - - let notif = match (committed, reverted) { - (None, None) => None, - (None, Some(r)) => Some(CanonStateNotification::Reorg { - old: Arc::new(r.inner), - new: Arc::new(Default::default()), - }), - (Some(c), None) => Some(CanonStateNotification::Commit { new: Arc::new(c.inner) }), - (Some(c), Some(r)) => Some(CanonStateNotification::Reorg { - old: Arc::new(r.inner), - new: Arc::new(c.inner), - }), - }; - if let Some(notif) = notif { - tracing::debug!(commit_count, revert_count, "sending canon state notification"); - // we don't care if it fails, we just want to send it - self.bp.canonical_in_memory_state().notify_canon_state(notif); - } - } - /// Load the host chain "safe" block number and determine the rollup "safe" - /// block number. There are three cases: + /// block number. /// + /// There are three cases: /// 1. The host chain "safe" block number is below the rollup genesis. - /// In this case, we'll use the host genesis block number as the "safe" - /// block number. This can happen if the rollup starts syncing while the - /// host still hasn't seen the rollup genesis block. - /// 2. The host safe "block", when converted to the equivalent rollup block, - /// is beyond the current rollup block. In this case, we'll use the current - /// rollup block as safe block. This can happen if the host chain is - /// synced beyond the current rollup block, but the rollup is still syncing - /// and catching up with the host head and therefore hasn't seen the host - /// safe block. - /// 3. The host safe block number is below the current rollup block. In this - /// case, we can use the safe host block number, converted to its rollup - /// equivalent, as the safe rollup block number. This is the expected case - /// when the rollup and host are both caught up and in live sync. + /// 2. The safe rollup equivalent is beyond the current rollup height. + /// 3. The safe rollup equivalent is below the current rollup height (normal + /// case). fn load_safe_block_heights(&self, ru_height: u64) -> eyre::Result { - // Load the host safe block number - let safe_host_height = self.host.provider().safe_block_number()?; - - // Convert the host safe block number to the rollup safe block number. - // If the host safe block number is below the rollup genesis, - // this will return None. - let safe_heights = safe_host_height - .and_then(|safe_host_height| self.constants.pair_host(safe_host_height)); - - // If we successfully converted the host safe block number to the rollup safe block number, - // then we'll compare it to the current rollup block height and use the smaller of the two. - if let Some(safe_heights) = safe_heights { - // We compare the safe ru height to the current ru height. If the safe ru height is - // beyond the current ru height, we're in case 2. - if safe_heights.rollup > ru_height { - // We are in case 2. - Ok(PairedHeights { - host: self.constants.rollup_block_to_host_block_num(ru_height), - rollup: ru_height, - }) - } else { - // If the safe ru height is below the current ru height, we're in case 3. - Ok(safe_heights) - } + let Some(safe_heights) = + self.host.provider().safe_block_number()?.and_then(|h| self.constants.pair_host(h)) + else { + // Host safe block is below rollup genesis — use genesis. + return Ok(PairedHeights { host: self.constants.host_deploy_height(), rollup: 0 }); + }; + + // Clamp to current rollup height if ahead. + if safe_heights.rollup > ru_height { + Ok(self.constants.pair_ru(ru_height)) } else { - // If the host safe block number is below the rollup genesis, - // we'll use the host genesis block number as the "safe" block number. - Ok(PairedHeights { host: 0, rollup: 0 }) + Ok(safe_heights) } } - /// Set the finalized RU block hash. + /// Load the host chain "finalized" block number and determine the rollup + /// "finalized" block number. /// - /// Depending on the current rollup sync status, there are two cases: - /// 1. If we're syncing from scratch, we'll set the finalized RU block hash to the genesis hash. - /// 2. If we're syncing, or following the tip, we'll set the finalized RU block hash to the current RU block hash. - fn set_finalized_ru_block_hash( - &self, - finalized_heights: PairedHeights, - genesis_hash: B256, - ) -> eyre::Result { - // If both heights are 0, return genesis hash - if finalized_heights.host == 0 && finalized_heights.rollup == 0 { - return Ok(genesis_hash); - } - - // Load and set finalized RU header - let finalized_ru_header = self - .ru_provider - .sealed_header(finalized_heights.rollup)? - .expect("ru db inconsistent. no header for height"); - let finalized_ru_block_hash = finalized_ru_header.hash(); - self.bp.set_finalized(finalized_ru_header); + /// There are three cases: + /// 1. The host chain "finalized" block is below the rollup genesis. + /// 2. The finalized rollup equivalent is beyond the current rollup height. + /// 3. The finalized rollup equivalent is below the current rollup height + /// (normal case). + fn load_finalized_block_heights(&self, ru_height: u64) -> eyre::Result { + let Some(finalized_ru) = self + .host + .provider() + .finalized_block_number()? + .and_then(|h| self.constants.host_block_to_rollup_block_num(h)) + else { + // Host finalized block is below rollup genesis — use genesis. + return Ok(PairedHeights { host: self.constants.host_deploy_height(), rollup: 0 }); + }; - Ok(finalized_ru_block_hash) + // Clamp to current rollup height if ahead. + let ru = finalized_ru.min(ru_height); + Ok(self.constants.pair_ru(ru)) } - /// Update the host node with the highest processed host height for the exex. + /// Update the host node with the highest processed host height for the + /// ExEx. fn update_highest_processed_height(&self, finalized_host_height: u64) -> eyre::Result<()> { - let finalized_host_header = self + let adjusted_height = finalized_host_height.saturating_sub(1); + let adjusted_header = self .host .provider() - .sealed_header(finalized_host_height)? - .expect("db inconsistent. no host header for finalized height"); + .sealed_header(adjusted_height)? + .expect("db inconsistent. no host header for adjusted height"); - let adjusted_height = finalized_host_header.number.saturating_sub(1); - let hash = finalized_host_header.hash(); + let hash = adjusted_header.hash(); debug!(finalized_host_height = adjusted_height, "Sending FinishedHeight notification"); - self.update_exex_head(adjusted_height, hash) - } - - /// Load the host chain "finalized" block number and determine the rollup - /// "finalized" block number. If the host chain "finalized" block number is below the - /// rollup genesis, we'll use the genesis hash as the "finalized" block. - /// If the host chain "finalized" block number is beyond the current rollup block, - /// we'll use the current rollup block and its host equivalent as the "finalized" blocks. - /// - /// This returns a tuple of the host and rollup "finalized" block numbers. - /// - /// There are three cases: - /// 1. The host chain "finalized" block number is below the rollup genesis (and therefore the current rollup block). - /// In this case, we'll use the host genesis block number as the "finalized" block number, with the rollup syncing from scratch. - /// This can happen if the rollup starts syncing while the host still hasn't seen the rollup genesis block. - /// 2. The host chain "finalized" block number is beyond the current rollup block. - /// In this case, we'll use the current rollup block number as the "finalized" block number. - /// This can happen if the host chain is synced beyond the current rollup block, but the rollup is still syncing - /// and catching up with the host head and therefore hasn't seen the host finalized block. - /// 3. The host chain "finalized" block number is below the current rollup block. - /// In this case, we'll use the host chain "finalized" block number, converted to its rollup equivalent, as the "finalized" block number. - /// This is the expected case when the rollup and host are both caught up and in live sync. - fn load_finalized_block_heights(&self, ru_height: u64) -> eyre::Result { - // Load the host chain "finalized" block number - let finalized_host_block_number = self.host.provider().finalized_block_number()?; - - // Convert the host chain "finalized" block number to the rollup "finalized" block number. - // If the host chain "finalized" block number is below the rollup genesis, - // this will return None. - let finalized_ru_block_number = - finalized_host_block_number.and_then(|finalized_host_block_number| { - self.constants.host_block_to_rollup_block_num(finalized_host_block_number) - }); - - // If we successfully converted the host chain "finalized" block number to the rollup "finalized" block number, - // then we'll figure out which case we're in and return the appropriate heights. - if let Some(finalized_ru_block_number) = finalized_ru_block_number { - // We compare the finalized ru height to the current ru height. If the finalized ru height is - // beyond the current ru height, we're in case 2 (rollup is behind host). - if finalized_ru_block_number > ru_height { - Ok(self.constants.pair_ru(ru_height)) - } else { - // If the finalized ru height is below the current ru height, we're in case 3 (rollup is near or in sync with the host head). - Ok(self.constants.pair_ru(finalized_ru_block_number)) - } - } else { - // If we failed to convert the host chain "finalized" block number to the rollup "finalized" block number, - // then this means the host chain "finalized" block number is below the rollup genesis (and therefore the current rollup block). - // We'll use the genesis block number as the "finalized" block number. - Ok(PairedHeights { host: 0, rollup: 0 }) - } - } - - /// Unwind the RU chain DB to the target block number. - fn unwind_to(&self, target: BlockNumber) -> eyre::Result { - let mut reverted = MaybeUninit::uninit(); - self.ru_provider - .provider_rw()? - .update(|writer| { - reverted.write(writer.ru_take_blocks_and_execution_above(target)?); - Ok(()) - }) - // SAFETY: if the closure above returns Ok, reverted is initialized. - .map(|_| unsafe { reverted.assume_init() }) - .map_err(Into::into) + self.host.events.send(ExExEvent::FinishedHeight(alloy::eips::NumHash { + number: adjusted_height, + hash, + }))?; + Ok(()) } /// Called when the host chain has reverted a block or set of blocks. #[instrument(skip_all, fields(first = chain.first().number(), tip = chain.tip().number()))] - pub fn on_host_revert(&self, chain: &Arc>) -> eyre::Result> { - // If the end is before the RU genesis, we don't need to do anything at - // all. + pub fn on_host_revert(&self, chain: &Arc>) -> eyre::Result<()> { + // If the end is before the RU genesis, nothing to do. if chain.tip().number() <= self.constants.host_deploy_height() { - return Ok(None); + return Ok(()); } - // The target is - // - the block BEFORE the first block in the chain - // - or block 0, if the first block is before the rollup deploy height + // Target is the block BEFORE the first block in the chain, or 0. let target = self .constants .host_block_to_rollup_block_num(chain.first().number()) - .unwrap_or_default() // 0 if the block is before the deploy height - .saturating_sub(1); // still 0 if 0, otherwise the block BEFORE. + .unwrap_or_default() + .saturating_sub(1); - self.unwind_to(target).map(Some) + self.storage.unwind_above(target)?; + Ok(()) } } diff --git a/crates/node/src/rpc.rs b/crates/node/src/rpc.rs index 80d79c2..a1a0317 100644 --- a/crates/node/src/rpc.rs +++ b/crates/node/src/rpc.rs @@ -1,17 +1,22 @@ -use crate::SignetNode; -use reth::{primitives::EthPrimitives, rpc::builder::config::RethRpcServerConfig}; +use crate::{ + SignetNode, + serve::{RpcServerGuard, ServeConfig}, +}; +use reth::primitives::EthPrimitives; use reth_node_api::{FullNodeComponents, NodeTypes}; use signet_block_processor::AliasOracleFactory; -use signet_node_types::NodeTypesDbTrait; -use signet_rpc::{RpcCtx, RpcServerGuard, ServeConfig}; +use signet_rpc::{StorageRpcConfig, StorageRpcCtx}; +use signet_storage::HotKv; use signet_tx_cache::TxCache; +use std::sync::Arc; use tracing::info; -impl SignetNode +impl SignetNode where Host: FullNodeComponents, Host::Types: NodeTypes, - Db: NodeTypesDbTrait, + H: HotKv + Send + Sync + 'static, + ::Error: trevm::revm::database::DBErrorMarker, AliasOracle: AliasOracleFactory, { /// Start the RPC server. @@ -24,17 +29,16 @@ where async fn launch_rpc(&self) -> eyre::Result { let tasks = self.host.task_executor(); - let forwarder = + let tx_cache = self.config.forward_url().map(|url| TxCache::new_with_client(url, self.client.clone())); - let eth_config = self.host.config.rpc.eth_config(); - let router = signet_rpc::router().with_state(RpcCtx::new( - self.host.components.clone(), + let rpc_ctx = StorageRpcCtx::new( + Arc::clone(&self.storage), self.constants.clone(), - self.bp.clone(), - eth_config, - forwarder, - tasks.clone(), - )?); + self.chain.clone(), + tx_cache, + StorageRpcConfig::default(), + ); + let router = signet_rpc::router::().with_state(rpc_ctx); let serve_config: ServeConfig = self.config.merge_rpc_configs(&self.host)?.into(); serve_config.serve(tasks, router).await } diff --git a/crates/rpc/src/utils.rs b/crates/node/src/serve.rs similarity index 54% rename from crates/rpc/src/utils.rs rename to crates/node/src/serve.rs index a20d561..720269a 100644 --- a/crates/rpc/src/utils.rs +++ b/crates/node/src/serve.rs @@ -5,113 +5,125 @@ use ajj::{ use axum::http::HeaderValue; use interprocess::local_socket as ls; use reqwest::Method; -use reth::{rpc::builder::CorsDomainError, tasks::TaskExecutor}; -use std::{future::IntoFuture, iter::StepBy, net::SocketAddr, ops::RangeInclusive}; +use reth::{args::RpcServerArgs, rpc::builder::CorsDomainError, tasks::TaskExecutor}; +use std::{future::IntoFuture, net::SocketAddr}; use tokio::task::JoinHandle; use tower_http::cors::{AllowOrigin, Any, CorsLayer}; use tracing::error; -macro_rules! await_handler { - ($h:expr) => { - match $h.await { - Ok(res) => res, - Err(_) => return Err("task panicked or cancelled".to_string()), - } - }; +/// Guard to shutdown the RPC servers. When dropped, this will shutdown all +/// running servers. +#[derive(Default)] +pub(crate) struct RpcServerGuard { + http: Option>, + ws: Option>, + ipc: Option, +} - (@option $h:expr) => { - match $h.await { - Ok(Some(res)) => res, - _ => return Err("task panicked or cancelled".to_string()), - } - }; +impl core::fmt::Debug for RpcServerGuard { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("RpcServerGuard") + .field("http", &self.http.is_some()) + .field("ipc", &self.ipc.is_some()) + .field("ws", &self.ws.is_some()) + .finish() + } +} - (@response $h:expr) => { - match $h.await { - Ok(res) => res, - _ => { - return ResponsePayload::internal_error_message(std::borrow::Cow::Borrowed( - "task panicked or cancelled", - )) - } +impl Drop for RpcServerGuard { + fn drop(&mut self) { + if let Some(http) = self.http.take() { + http.abort(); } - }; - - (@response_option $h:expr) => { - match $h.await { - Ok(Some(res)) => res, - _ => { - return ResponsePayload::internal_error_message(std::borrow::Cow::Borrowed( - "task panicked or cancelled", - )) - } + if let Some(ws) = self.ws.take() { + ws.abort(); } - }; + // IPC is handled by its own drop guards. + } } -pub(crate) use await_handler; +/// Configuration for the RPC server. +#[derive(Clone, Debug)] +pub(crate) struct ServeConfig { + /// HTTP server addresses. + pub http: Vec, + /// CORS header to be used for HTTP (if any). + pub http_cors: Option, + /// WS server addresses. + pub ws: Vec, + /// CORS header to be used for WS (if any). + pub ws_cors: Option, + /// IPC name info. + pub ipc: Option, +} -macro_rules! response_tri { - ($h:expr) => { - match $h { - Ok(res) => res, - Err(err) => return ResponsePayload::internal_error_message(err.to_string().into()), - } - }; +impl From for ServeConfig { + fn from(args: RpcServerArgs) -> Self { + let http = if args.http { + vec![SocketAddr::from((args.http_addr, args.http_port))] + } else { + vec![] + }; + let ws = + if args.ws { vec![SocketAddr::from((args.ws_addr, args.ws_port))] } else { vec![] }; - ($h:expr, $msg:literal) => { - match $h { - Ok(res) => res, - Err(_) => return ResponsePayload::internal_error_message($msg.into()), - } - }; + let http_cors = args.http_corsdomain; + let ws_cors = args.ws_allowed_origins; - ($h:expr, $obj:expr) => { - match $h { - Ok(res) => res, - Err(err) => returnResponsePayload::internal_error_with_message_and_obj( - err.to_string().into(), - $obj, - ), - } - }; + let ipc = if !args.ipcdisable { Some(args.ipcpath) } else { None }; - ($h:expr, $msg:literal, $obj:expr) => { - match $h { - Ok(res) => res, - Err(err) => { - return ResponsePayload::internal_error_with_message_and_obj($msg.into(), $obj) - } - } - }; + Self { http, http_cors, ws, ws_cors, ipc } + } } -pub(crate) use response_tri; - -/// An iterator that yields _inclusive_ block ranges of a given step size -#[derive(Debug)] -pub(crate) struct BlockRangeInclusiveIter { - iter: StepBy>, - step: u64, - end: u64, -} +impl ServeConfig { + /// Serve the router via HTTP. + async fn serve_http( + &self, + tasks: &TaskExecutor, + router: Router<()>, + ) -> eyre::Result>> { + if self.http.is_empty() { + return Ok(None); + } + serve_axum(tasks, router, &self.http, self.http_cors.as_deref()).await.map(Some) + } -impl BlockRangeInclusiveIter { - pub(crate) fn new(range: RangeInclusive, step: u64) -> Self { - Self { end: *range.end(), iter: range.step_by(step as usize + 1), step } + /// Serve the router via WebSocket. + async fn serve_ws( + &self, + tasks: &TaskExecutor, + router: Router<()>, + ) -> eyre::Result>> { + if self.ws.is_empty() { + return Ok(None); + } + serve_ws(tasks, router, &self.ws, self.ws_cors.as_deref()).await.map(Some) } -} -impl Iterator for BlockRangeInclusiveIter { - type Item = (u64, u64); + /// Serve the router on the given ipc path. + async fn serve_ipc( + &self, + tasks: &TaskExecutor, + router: &Router<()>, + ) -> eyre::Result> { + let Some(endpoint) = &self.ipc else { return Ok(None) }; + let shutdown = serve_ipc(tasks, router, endpoint).await?; + Ok(Some(shutdown)) + } - fn next(&mut self) -> Option { - let start = self.iter.next()?; - let end = (start + self.step).min(self.end); - if start > end { - return None; - } - Some((start, end)) + /// Serve the router. + pub(crate) async fn serve( + &self, + tasks: &TaskExecutor, + router: Router<()>, + ) -> eyre::Result { + let (http, ws, ipc) = tokio::try_join!( + self.serve_http(tasks, router.clone()), + self.serve_ws(tasks, router.clone()), + self.serve_ipc(tasks, &router), + )?; + Ok(RpcServerGuard { http, ws, ipc }) } } @@ -144,7 +156,7 @@ async fn serve( tasks: &TaskExecutor, addrs: &[SocketAddr], service: axum::Router, -) -> Result, eyre::Error> { +) -> eyre::Result> { let listener = tokio::net::TcpListener::bind(addrs).await?; let fut = async move { @@ -158,7 +170,7 @@ async fn serve( } /// Serve the router on the given addresses using axum. -pub async fn serve_axum( +async fn serve_axum( tasks: &TaskExecutor, router: Router<()>, addrs: &[SocketAddr], @@ -173,7 +185,7 @@ pub async fn serve_axum( } /// Serve the router on the given address using a Websocket. -pub async fn serve_ws( +async fn serve_ws( tasks: &TaskExecutor, router: Router<()>, addrs: &[SocketAddr], @@ -196,13 +208,13 @@ fn to_name(path: &std::ffi::OsStr) -> std::io::Result> { } /// Serve the router on the given address using IPC. -pub async fn serve_ipc( +async fn serve_ipc( tasks: &TaskExecutor, router: &Router<()>, endpoint: &str, ) -> eyre::Result { let name = std::ffi::OsStr::new(endpoint); - let name = to_name(name).expect("invalid name"); + let name = to_name(name)?; ls::ListenerOptions::new() .name(name) .serve_with_handle(router.clone(), tasks.handle().clone()) diff --git a/crates/node/src/status.rs b/crates/node/src/status.rs new file mode 100644 index 0000000..0aff564 --- /dev/null +++ b/crates/node/src/status.rs @@ -0,0 +1,8 @@ +/// Items that can be sent via the status channel. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum NodeStatus { + /// Node is booting. + Booting, + /// Node's current height. + AtHeight(u64), +} diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index 6594a41..23c35ba 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -7,46 +7,41 @@ authors.workspace = true license.workspace = true homepage.workspace = true repository.workspace = true +description = "Ethereum JSON-RPC server backed by signet-storage" [dependencies] -signet-node-types.workspace = true -signet-db.workspace = true - -signet-bundle.workspace = true +signet-storage.workspace = true +signet-cold.workspace = true +signet-hot.workspace = true +signet-storage-types.workspace = true signet-evm.workspace = true -signet-tx-cache.workspace = true +trevm = { workspace = true, features = ["call", "estimate_gas"] } signet-types.workspace = true - -ajj.workspace = true -trevm.workspace = true - +signet-tx-cache.workspace = true +signet-bundle.workspace = true alloy.workspace = true -revm-inspectors.workspace = true - -reth.workspace = true -reth-chainspec.workspace = true -reth-db.workspace = true -reth-db-common.workspace = true -reth-node-api.workspace = true -reth-rpc-eth-api.workspace = true - -axum = "0.8.1" -dashmap = "6.1.0" -eyre.workspace = true -interprocess = { version = "2.2.2", features = ["tokio"] } -reqwest.workspace = true -serde.workspace = true -thiserror.workspace = true -tokio = { workspace = true, features = ["macros"] } -tokio-util = "0.7.13" -tower-http = { version = "0.6.2", features = ["cors"] } +ajj.workspace = true +tokio.workspace = true +tokio-stream.workspace = true +tokio-util.workspace = true tracing.workspace = true -serde_json.workspace = true -futures-util = "0.3.31" +thiserror.workspace = true +serde.workspace = true +dashmap = "6.1.0" +revm-inspectors.workspace = true itertools.workspace = true -signet-block-processor.workspace = true [dev-dependencies] -signet-zenith.workspace = true +tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } +tokio-util.workspace = true +signet-cold = { workspace = true, features = ["test-utils"] } +signet-hot = { workspace = true, features = ["test-utils"] } +signet-storage.workspace = true +signet-storage-types.workspace = true signet-constants.workspace = true -chrono.workspace = true +alloy.workspace = true +serde_json.workspace = true +axum = "0.8" +tower = { version = "0.5", features = ["util"] } +http = "1" +trevm.workspace = true diff --git a/crates/rpc/README.md b/crates/rpc/README.md index 324f060..ff0eede 100644 --- a/crates/rpc/README.md +++ b/crates/rpc/README.md @@ -1,51 +1,52 @@ -## signet-rpc - -This crate contains the RPC server for Signet. The RPC server is a JSON-RPC -server that listens for incoming requests and processes them. The server is -built on top of the `ajj` crate, and uses the `tokio` runtime. - -This crate is intended to be used as part of a complete [reth] node. It is -incredibly difficult to use this crate without a full reth node, as it requires -a database handle and access to host configuration. If you are interested in -doing that, let us know we think it'd be cool. - -### What's new in Signet? - -Signet's RPC server draws heavily on [reth]'s data types, and borrows code from -reth's RPC handler logic. However, we make a few design decisions that are -unique to Signet: - -- The following endpoints are disabled - - wallet-related endpoints like `eth_sign`. Good Riddance. - - p2p-related endpoints like `eth_listening`. Signet has no peer. - - mining-related endpoints like `eth_mining`. Signet needs no miners. - - txpool-related endpoints like `txpool_content`. Signet wants no txpool. - - uncle-related endpoints like `eth_getUncleByBlockHashAndIndex`. Signet - knows no family. - - trie-related endpoints like `eth_getProof`. Signet grows no tries. -- Filters and subscriptions have been rewritten from the ground up. -- Bundle-related endpoints (WIP) use signet bundles from the `signet-bundle` - crate. - -See the [Signet Docs] for more information. - -### What's in this crate? - -- `RpcCtx` a struct managing the DB handle, subscriptions, filters, etc. -- The `router()` function will create a complete [`ajj::Router`]. -- `serve_*` family of methods allow quick setup of the RPC server. - -This is a work in progress. The RPC server is fully functional, but a few -things are missing. - -- The following namespaces are not well-supported yet: - - `admin_` - - `debug_` - - `trace_` - - `signet_` - -[reth]: https://github.com/paradigmxyz/reth -[`ajj`]: https://docs.rs/ajj/latest/ajj/ -[`ajj::Router`]: https://docs.rs/ajj/latest/ajj/struct.Router.html -[`tokio`]: https://docs.rs/tokio/latest/tokio/ -[Signet Docs]: https://docs.signet.sh +# signet-rpc + +Ethereum JSON-RPC server backed by `signet-storage`'s unified storage backend. + +This crate provides a standalone RPC implementation that uses hot storage +for state queries and cold storage for block, transaction, and receipt data. + +## Namespaces + +### `eth` + +Standard Ethereum JSON-RPC methods: + +- Block queries: `blockNumber`, `getBlockByHash`, `getBlockByNumber`, + `getBlockTransactionCount*`, `getBlockReceipts`, `getBlockHeader*` +- Transaction queries: `getTransactionByHash`, `getTransactionReceipt`, + `getTransactionByBlock*AndIndex`, `getRawTransaction*` +- Account state: `getBalance`, `getStorageAt`, `getCode`, `getTransactionCount` +- EVM execution: `call`, `estimateGas`, `createAccessList` +- Gas/fees: `gasPrice`, `maxPriorityFeePerGas`, `feeHistory` +- Logs & filters: `getLogs`, `newFilter`, `newBlockFilter`, + `getFilterChanges`, `getFilterLogs`, `uninstallFilter` +- Subscriptions: `subscribe`, `unsubscribe` +- Transaction submission: `sendRawTransaction` (optional, via `TxCache`) +- Uncle queries: `getUncleCountByBlock*`, `getUncleByBlock*AndIndex` + (always return 0 / null — Signet has no uncle blocks) +- Misc: `chainId`, `syncing` + +### `debug` + +- `traceBlockByNumber`, `traceBlockByHash` — trace all transactions in a block +- `traceTransaction` — trace a single transaction by hash + +### `signet` + +- `sendOrder` — forward a signed order to the transaction cache +- `callBundle` — simulate a bundle against a specific block + +## Unsupported Methods + +The following `eth` methods are **not supported** and return +`method_not_found`: + +- **Mining**: `getWork`, `hashrate`, `mining`, `submitHashrate`, `submitWork` + — Signet does not use proof-of-work. +- **Account management**: `accounts`, `sign`, `signTransaction`, + `signTypedData`, `sendTransaction` — the RPC server does not hold keys. + Use `sendRawTransaction` with a pre-signed transaction instead. +- **Blob transactions**: `blobBaseFee` — Signet does not support EIP-4844 + blob transactions. +- **Other**: `protocolVersion`, `getProof`, `newPendingTransactionFilter`, + `coinbase`. diff --git a/crates/rpc/src/config.rs b/crates/rpc/src/config.rs deleted file mode 100644 index 7dbc579..0000000 --- a/crates/rpc/src/config.rs +++ /dev/null @@ -1,121 +0,0 @@ -use crate::utils::{serve_axum, serve_ipc, serve_ws}; -use ajj::{Router, pubsub::ServerShutdown}; -use reth::{args::RpcServerArgs, tasks::TaskExecutor}; -use std::net::SocketAddr; -use tokio::task::JoinHandle; - -/// Guard to shutdown the RPC servers. When dropped, this will shutdown all -/// running servers -#[derive(Default)] -pub struct RpcServerGuard { - http: Option>, - ws: Option>, - ipc: Option, -} - -impl core::fmt::Debug for RpcServerGuard { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("RpcServerGuard") - .field("http", &self.http.is_some()) - .field("ipc", &self.ipc.is_some()) - .field("ws", &self.ws.is_some()) - .finish() - } -} - -impl Drop for RpcServerGuard { - fn drop(&mut self) { - if let Some(http) = self.http.take() { - http.abort(); - } - if let Some(ws) = self.ws.take() { - ws.abort(); - } - // IPC is handled by its own drop guards. - } -} - -/// Configuration for the RPC server. -#[derive(Clone, Debug)] -pub struct ServeConfig { - /// HTTP server addresses. - pub http: Vec, - /// CORS header to be used for HTTP (if any). - pub http_cors: Option, - /// WS server addresses. - pub ws: Vec, - /// CORS header to be used for WS (if any). - pub ws_cors: Option, - /// IPC name info. - pub ipc: Option, -} - -impl From for ServeConfig { - fn from(args: RpcServerArgs) -> Self { - let http = if args.http { - vec![SocketAddr::from((args.http_addr, args.http_port))] - } else { - vec![] - }; - let ws = - if args.ws { vec![SocketAddr::from((args.ws_addr, args.ws_port))] } else { vec![] }; - - let http_cors = args.http_corsdomain; - let ws_cors = args.ws_allowed_origins; - - let ipc = if !args.ipcdisable { Some(args.ipcpath) } else { None }; - - Self { http, http_cors, ws, ws_cors, ipc } - } -} - -impl ServeConfig { - /// Serve the router on the given addresses. - async fn serve_http( - &self, - tasks: &TaskExecutor, - router: Router<()>, - ) -> eyre::Result>> { - if self.http.is_empty() { - return Ok(None); - } - serve_axum(tasks, router, &self.http, self.http_cors.as_deref()).await.map(Some) - } - - /// Serve the router on the given addresses. - async fn serve_ws( - &self, - tasks: &TaskExecutor, - router: Router<()>, - ) -> eyre::Result>> { - if self.ws.is_empty() { - return Ok(None); - } - serve_ws(tasks, router, &self.ws, self.ws_cors.as_deref()).await.map(Some) - } - - /// Serve the router on the given ipc path. - async fn serve_ipc( - &self, - tasks: &TaskExecutor, - router: &Router<()>, - ) -> eyre::Result> { - let Some(endpoint) = &self.ipc else { return Ok(None) }; - let shutdown = serve_ipc(tasks, router, endpoint).await?; - Ok(Some(shutdown)) - } - - /// Serve the router. - pub async fn serve( - &self, - tasks: &TaskExecutor, - router: Router<()>, - ) -> eyre::Result { - let (http, ws, ipc) = tokio::try_join!( - self.serve_http(tasks, router.clone()), - self.serve_ws(tasks, router.clone()), - self.serve_ipc(tasks, &router), - )?; - Ok(RpcServerGuard { http, ws, ipc }) - } -} diff --git a/crates/rpc/src/config/chain_notifier.rs b/crates/rpc/src/config/chain_notifier.rs new file mode 100644 index 0000000..3ae8507 --- /dev/null +++ b/crates/rpc/src/config/chain_notifier.rs @@ -0,0 +1,66 @@ +//! Shared chain state between the node and RPC layer. + +use crate::{config::resolve::BlockTags, interest::NewBlockNotification}; +use tokio::sync::broadcast; + +/// Shared chain state between the node and RPC layer. +/// +/// Combines block tag tracking and new-block notification into a single +/// unit that both the node and RPC context hold. Cloning is cheap — all +/// fields are backed by `Arc`. +/// +/// # Construction +/// +/// ``` +/// use signet_rpc::ChainNotifier; +/// +/// let notifier = ChainNotifier::new(128); +/// assert_eq!(notifier.tags().latest(), 0); +/// +/// notifier.tags().set_latest(42); +/// assert_eq!(notifier.tags().latest(), 42); +/// ``` +#[derive(Debug, Clone)] +pub struct ChainNotifier { + tags: BlockTags, + notif_tx: broadcast::Sender, +} + +impl ChainNotifier { + /// Create a new [`ChainNotifier`] with zeroed tags and a broadcast + /// channel of the given capacity. + pub fn new(channel_capacity: usize) -> Self { + let tags = BlockTags::new(0, 0, 0); + let (notif_tx, _) = broadcast::channel(channel_capacity); + Self { tags, notif_tx } + } + + /// Access the block tags. + pub const fn tags(&self) -> &BlockTags { + &self.tags + } + + /// Send a new block notification. + /// + /// Returns `Ok(receiver_count)` or `Err` if there are no active + /// receivers (which is not usually an error condition). + #[allow(clippy::result_large_err)] + pub fn send_notification( + &self, + notif: NewBlockNotification, + ) -> Result> { + self.notif_tx.send(notif) + } + + /// Subscribe to new block notifications. + pub fn subscribe(&self) -> broadcast::Receiver { + self.notif_tx.subscribe() + } + + /// Get a clone of the broadcast sender. + /// + /// Used by the subscription manager to create its own receiver. + pub fn notif_sender(&self) -> broadcast::Sender { + self.notif_tx.clone() + } +} diff --git a/crates/rpc/src/config/ctx.rs b/crates/rpc/src/config/ctx.rs new file mode 100644 index 0000000..2fff95a --- /dev/null +++ b/crates/rpc/src/config/ctx.rs @@ -0,0 +1,286 @@ +//! RPC context wrapping [`UnifiedStorage`]. + +use crate::{ + config::{ + ChainNotifier, StorageRpcConfig, + resolve::{BlockTags, ResolveError}, + }, + eth::EthError, + interest::{FilterManager, SubscriptionManager}, +}; +use alloy::eips::{BlockId, BlockNumberOrTag}; +use signet_cold::ColdStorageReadHandle; +use signet_hot::{ + HotKv, + db::HotDbRead, + model::{HotKvRead, RevmRead}, +}; +use signet_storage::UnifiedStorage; +use signet_tx_cache::TxCache; +use signet_types::constants::SignetSystemConstants; +use std::sync::Arc; +use tokio::sync::Semaphore; +use trevm::revm::database::{DBErrorMarker, StateBuilder}; + +/// Resolved block context for EVM execution. +/// +/// Contains the header and a revm-compatible database snapshot at the +/// resolved block height, ready for use with `signet_evm`. +#[derive(Debug)] +pub(crate) struct EvmBlockContext { + /// The resolved block header. + pub header: alloy::consensus::Header, + /// The revm database at the resolved height, wrapped in [`State`] to + /// provide [`DatabaseCommit`] support required by `eth_estimateGas` + /// (binary-search commits), `eth_createAccessList`, `signet_callBundle`, + /// and debug tracing. + /// + /// [`State`]: trevm::revm::database::State + /// [`DatabaseCommit`]: trevm::revm::database::DatabaseCommit + pub db: trevm::revm::database::State, +} + +/// RPC context backed by [`UnifiedStorage`]. +/// +/// Provides access to hot storage (state), cold storage (blocks/txs/receipts), +/// block tag resolution, and optional transaction forwarding. +/// +/// # Construction +/// +/// Call [`StorageRpcCtx::new`] with unified storage, system constants, +/// a [`ChainNotifier`], an optional [`TxCache`], and [`StorageRpcConfig`]. +#[derive(Debug)] +pub struct StorageRpcCtx { + inner: Arc>, +} + +impl Clone for StorageRpcCtx { + fn clone(&self) -> Self { + Self { inner: Arc::clone(&self.inner) } + } +} + +#[derive(Debug)] +struct StorageRpcCtxInner { + storage: Arc>, + constants: SignetSystemConstants, + chain: ChainNotifier, + tx_cache: Option, + config: StorageRpcConfig, + tracing_semaphore: Arc, + filter_manager: FilterManager, + sub_manager: SubscriptionManager, +} + +impl StorageRpcCtx { + /// Create a new storage-backed RPC context. + /// + /// The [`ChainNotifier`] provides block tag tracking and a broadcast + /// sender for new block notifications. The subscription manager + /// subscribes to this channel to push updates to WebSocket clients. + pub fn new( + storage: Arc>, + constants: SignetSystemConstants, + chain: ChainNotifier, + tx_cache: Option, + config: StorageRpcConfig, + ) -> Self { + let tracing_semaphore = Arc::new(Semaphore::new(config.max_tracing_requests)); + let filter_manager = FilterManager::new(config.stale_filter_ttl, config.stale_filter_ttl); + let sub_manager = SubscriptionManager::new(chain.notif_sender(), config.stale_filter_ttl); + Self { + inner: Arc::new(StorageRpcCtxInner { + storage, + constants, + chain, + tx_cache, + config, + tracing_semaphore, + filter_manager, + sub_manager, + }), + } + } + + /// Access the unified storage. + pub fn storage(&self) -> &UnifiedStorage { + &self.inner.storage + } + + /// Get a cold storage read handle. + pub fn cold(&self) -> ColdStorageReadHandle { + self.inner.storage.cold_reader() + } + + /// Get a hot storage read transaction. + pub fn hot_reader(&self) -> signet_storage::StorageResult { + self.inner.storage.reader() + } + + /// Access the block tags. + pub fn tags(&self) -> &BlockTags { + self.inner.chain.tags() + } + + /// Access the system constants. + pub fn constants(&self) -> &SignetSystemConstants { + &self.inner.constants + } + + /// Get the chain ID. + pub fn chain_id(&self) -> u64 { + self.inner.constants.ru_chain_id() + } + + /// Access the RPC configuration. + pub fn config(&self) -> &StorageRpcConfig { + &self.inner.config + } + + /// Acquire a permit from the tracing semaphore. + /// + /// Limits concurrent tracing/debug requests. Callers should hold + /// the permit for the duration of their tracing operation. + pub async fn acquire_tracing_permit(&self) -> tokio::sync::OwnedSemaphorePermit { + Arc::clone(&self.inner.tracing_semaphore) + .acquire_owned() + .await + .expect("tracing semaphore closed") + } + + /// Access the optional tx cache. + pub fn tx_cache(&self) -> Option<&TxCache> { + self.inner.tx_cache.as_ref() + } + + /// Access the filter manager. + pub(crate) fn filter_manager(&self) -> &FilterManager { + &self.inner.filter_manager + } + + /// Access the subscription manager. + pub(crate) fn sub_manager(&self) -> &SubscriptionManager { + &self.inner.sub_manager + } + + /// Resolve a [`BlockNumberOrTag`] to a block number. + /// + /// This is synchronous — no cold storage lookup is needed. + /// + /// - `Latest` / `Pending` → latest tag + /// - `Safe` → safe tag + /// - `Finalized` → finalized tag + /// - `Earliest` → `0` + /// - `Number(n)` → `n` + pub(crate) fn resolve_block_tag(&self, tag: BlockNumberOrTag) -> u64 { + match tag { + BlockNumberOrTag::Latest | BlockNumberOrTag::Pending => self.tags().latest(), + BlockNumberOrTag::Safe => self.tags().safe(), + BlockNumberOrTag::Finalized => self.tags().finalized(), + BlockNumberOrTag::Earliest => 0, + BlockNumberOrTag::Number(n) => n, + } + } + + /// Resolve a [`BlockId`] to a block number. + /// + /// For tag/number-based IDs, resolves synchronously via + /// [`resolve_block_tag`](Self::resolve_block_tag). For hash-based IDs, + /// looks up the block number from hot storage's `HeaderNumbers` table. + pub(crate) fn resolve_block_id(&self, id: BlockId) -> Result + where + ::Error: std::error::Error + Send + Sync + 'static, + { + match id { + BlockId::Number(tag) => Ok(self.resolve_block_tag(tag)), + BlockId::Hash(h) => { + let reader = self.hot_reader()?; + reader + .get_header_number(&h.block_hash) + .map_err(|e| ResolveError::Db(Box::new(e)))? + .ok_or(ResolveError::HashNotFound(h.block_hash)) + } + } + } + + /// Resolve a [`BlockId`] to a header from hot storage. + /// + /// For hash-based IDs, fetches the header directly by hash. For + /// tag/number-based IDs, resolves the tag then fetches the header by + /// number. Returns `None` if the header is not found. + pub(crate) fn resolve_header( + &self, + id: BlockId, + ) -> Result, ResolveError> + where + ::Error: std::error::Error + Send + Sync + 'static, + { + let reader = self.hot_reader()?; + match id { + BlockId::Hash(h) => { + reader.header_by_hash(&h.block_hash).map_err(|e| ResolveError::Db(Box::new(e))) + } + BlockId::Number(tag) => { + let height = self.resolve_block_tag(tag); + reader.get_header(height).map_err(|e| ResolveError::Db(Box::new(e))) + } + } + } + + /// Create a revm-compatible database at a specific block height. + /// + /// Wraps the underlying `RevmRead` in [`State`] so that the returned + /// database implements both `Database` and `DatabaseCommit`. This is + /// needed by EVM operations that mutate intermediate state (gas + /// estimation, access-list generation, bundle simulation, debug + /// tracing). + /// + /// [`State`]: trevm::revm::database::State + pub fn revm_state_at_height( + &self, + height: u64, + ) -> signet_storage::StorageResult>> + where + ::Error: DBErrorMarker, + { + let revm_read = self.inner.storage.revm_reader_at_height(height)?; + Ok(StateBuilder::new_with_database(revm_read).build()) + } + + /// Resolve a [`BlockId`] to a header and revm database in one pass. + /// + /// Fetches the header from hot storage and creates a revm-compatible + /// database snapshot at the resolved block height. + /// + /// For `Pending` block IDs, remaps to `Latest` and synthesizes a + /// next-block header (incremented number, timestamp +12s, projected + /// base fee, gas limit from config). State is loaded at the latest + /// finalized block in both cases. + pub(crate) fn resolve_evm_block( + &self, + id: BlockId, + ) -> Result>, EthError> + where + ::Error: DBErrorMarker, + { + let pending = id.is_pending(); + let id = if pending { BlockId::latest() } else { id }; + + let sealed = self.resolve_header(id)?.ok_or(EthError::BlockNotFound(id))?; + let db = self.revm_state_at_height(sealed.number)?; + + let parent_hash = sealed.hash(); + let mut header = sealed.into_inner(); + + if pending { + header.parent_hash = parent_hash; + header.number += 1; + header.timestamp += self.constants().host().slot_duration(); + header.base_fee_per_gas = + header.next_block_base_fee(alloy::eips::eip1559::BaseFeeParams::ethereum()); + header.gas_limit = self.config().rpc_gas_cap; + } + + Ok(EvmBlockContext { header, db }) + } +} diff --git a/crates/rpc/src/config/gas_oracle.rs b/crates/rpc/src/config/gas_oracle.rs new file mode 100644 index 0000000..76b1872 --- /dev/null +++ b/crates/rpc/src/config/gas_oracle.rs @@ -0,0 +1,82 @@ +//! Cold-storage gas oracle for computing gas price suggestions. +//! +//! Reads recent block headers and transactions from cold storage to +//! compute a suggested tip cap based on recent transaction activity. +//! Behavior mirrors reth's `GasPriceOracle`: a configurable default +//! price when no transactions exist, an `ignore_price` floor, and a +//! `max_price` cap. + +use alloy::{consensus::Transaction, primitives::U256}; +use signet_cold::{ColdStorageError, ColdStorageReadHandle, HeaderSpecifier}; + +use crate::config::StorageRpcConfig; + +/// Suggest a tip cap based on recent transaction tips. +/// +/// Reads the last `gas_oracle_block_count` blocks from cold storage, +/// computes the effective tip per gas for each transaction, filters +/// tips below `ignore_price`, sorts the remainder, and returns the +/// value at `gas_oracle_percentile`, clamped to `max_price`. +/// +/// Returns `default_gas_price` (default 1 Gwei) when no qualifying +/// transactions are found. +pub(crate) async fn suggest_tip_cap( + cold: &ColdStorageReadHandle, + latest: u64, + config: &StorageRpcConfig, +) -> Result { + let block_count = config.gas_oracle_block_count.min(latest + 1); + let start = latest.saturating_sub(block_count - 1); + + let specs: Vec<_> = (start..=latest).map(HeaderSpecifier::Number).collect(); + let headers = cold.get_headers(specs).await?; + + // Collect blocks that have headers, then read their transactions + // in parallel to avoid sequential cold-storage round-trips. + let blocks_with_fees: Vec<_> = headers + .into_iter() + .enumerate() + .filter_map(|(offset, h)| { + h.map(|header| (start + offset as u64, header.base_fee_per_gas.unwrap_or_default())) + }) + .collect(); + + let mut join_set = tokio::task::JoinSet::new(); + for (block_num, base_fee) in &blocks_with_fees { + let cold = cold.clone(); + let block_num = *block_num; + let base_fee = *base_fee; + join_set.spawn(async move { + cold.get_transactions_in_block(block_num).await.map(|txs| (txs, base_fee)) + }); + } + + let mut all_tips: Vec = Vec::new(); + while let Some(result) = join_set.join_next().await { + let (txs, base_fee) = result.expect("tx read task panicked")?; + for tx in &txs { + if let Some(tip) = tx.effective_tip_per_gas(base_fee) + && config.ignore_price.is_none_or(|floor| tip >= floor) + { + all_tips.push(tip); + } + } + } + + if all_tips.is_empty() { + return Ok(config.default_gas_price.map_or(U256::ZERO, U256::from)); + } + + all_tips.sort_unstable(); + + let index = ((config.gas_oracle_percentile / 100.0) * (all_tips.len() - 1) as f64) as usize; + let index = index.min(all_tips.len() - 1); + + let mut price = U256::from(all_tips[index]); + + if let Some(max) = config.max_price { + price = price.min(U256::from(max)); + } + + Ok(price) +} diff --git a/crates/rpc/src/config/mod.rs b/crates/rpc/src/config/mod.rs new file mode 100644 index 0000000..3dbba1a --- /dev/null +++ b/crates/rpc/src/config/mod.rs @@ -0,0 +1,19 @@ +//! Configuration, context, and block tag resolution. +//! +//! This module groups the crate's configuration types, the RPC context +//! that wraps [`signet_storage::UnifiedStorage`], gas oracle helpers, +//! and block tag / block ID resolution logic. + +mod chain_notifier; +pub use chain_notifier::ChainNotifier; +mod rpc_config; +pub use rpc_config::StorageRpcConfig; + +mod ctx; +pub(crate) use ctx::EvmBlockContext; +pub use ctx::StorageRpcCtx; + +pub(crate) mod gas_oracle; + +pub(crate) mod resolve; +pub use resolve::{BlockTags, SyncStatus}; diff --git a/crates/rpc/src/config/resolve.rs b/crates/rpc/src/config/resolve.rs new file mode 100644 index 0000000..47d4a1f --- /dev/null +++ b/crates/rpc/src/config/resolve.rs @@ -0,0 +1,145 @@ +//! Block tag tracking and BlockId resolution. +//! +//! [`BlockTags`] holds externally-updated atomic values for Latest, Safe, +//! and Finalized block numbers. The RPC context owner is responsible for +//! updating these as the chain progresses. + +use alloy::primitives::B256; +use signet_storage::StorageError; +use std::sync::{ + Arc, RwLock, + atomic::{AtomicU64, Ordering}, +}; + +/// Snapshot of the node's syncing progress. +/// +/// When the node is still catching up to the network, this struct +/// describes the sync window. Once fully synced, the context owner +/// should call [`BlockTags::clear_sync_status`] to indicate that +/// syncing is complete. +#[derive(Debug, Clone, Copy)] +pub struct SyncStatus { + /// Block number the node started syncing from. + pub starting_block: u64, + /// Current block the node has synced to. + pub current_block: u64, + /// Highest known block number on the network. + pub highest_block: u64, +} + +/// Externally-updated block tag tracker. +/// +/// Each tag is an `Arc` that the caller updates as the chain +/// progresses. The RPC layer reads these atomically for tag resolution. +/// +/// # Example +/// +/// ``` +/// use signet_rpc::BlockTags; +/// +/// let tags = BlockTags::new(100, 95, 90); +/// assert_eq!(tags.latest(), 100); +/// +/// tags.set_latest(101); +/// assert_eq!(tags.latest(), 101); +/// +/// // Update all tags at once. +/// tags.update_all(200, 195, 190); +/// assert_eq!(tags.latest(), 200); +/// assert_eq!(tags.safe(), 195); +/// assert_eq!(tags.finalized(), 190); +/// ``` +#[derive(Debug, Clone)] +pub struct BlockTags { + latest: Arc, + safe: Arc, + finalized: Arc, + sync_status: Arc>>, +} + +impl BlockTags { + /// Create new block tags with initial values. + pub fn new(latest: u64, safe: u64, finalized: u64) -> Self { + Self { + latest: Arc::new(AtomicU64::new(latest)), + safe: Arc::new(AtomicU64::new(safe)), + finalized: Arc::new(AtomicU64::new(finalized)), + sync_status: Arc::new(RwLock::new(None)), + } + } + + /// Get the latest block number. + pub fn latest(&self) -> u64 { + self.latest.load(Ordering::Acquire) + } + + /// Get the safe block number. + pub fn safe(&self) -> u64 { + self.safe.load(Ordering::Acquire) + } + + /// Get the finalized block number. + pub fn finalized(&self) -> u64 { + self.finalized.load(Ordering::Acquire) + } + + /// Set the latest block number. + pub fn set_latest(&self, n: u64) { + self.latest.store(n, Ordering::Release); + } + + /// Set the safe block number. + pub fn set_safe(&self, n: u64) { + self.safe.store(n, Ordering::Release); + } + + /// Set the finalized block number. + pub fn set_finalized(&self, n: u64) { + self.finalized.store(n, Ordering::Release); + } + + /// Update all three tags in one call. + /// + /// Stores are ordered finalized → safe → latest so that readers + /// always observe a consistent or slightly-stale view (never a + /// latest that is behind the finalized it was published with). + pub fn update_all(&self, latest: u64, safe: u64, finalized: u64) { + self.finalized.store(finalized, Ordering::Release); + self.safe.store(safe, Ordering::Release); + self.latest.store(latest, Ordering::Release); + } + + /// Returns `true` if the node is currently syncing. + pub fn is_syncing(&self) -> bool { + self.sync_status.read().expect("sync status lock poisoned").is_some() + } + + /// Returns the current sync status, if the node is syncing. + pub fn sync_status(&self) -> Option { + *self.sync_status.read().expect("sync status lock poisoned") + } + + /// Update the sync status to indicate the node is syncing. + pub fn set_sync_status(&self, status: SyncStatus) { + *self.sync_status.write().expect("sync status lock poisoned") = Some(status); + } + + /// Clear the sync status, indicating the node is fully synced. + pub fn clear_sync_status(&self) { + *self.sync_status.write().expect("sync status lock poisoned") = None; + } +} + +/// Error resolving a block identifier. +#[derive(Debug, thiserror::Error)] +pub enum ResolveError { + /// Storage error (e.g. failed to open a read transaction). + #[error(transparent)] + Storage(#[from] StorageError), + /// Database read error. + #[error("{0}")] + Db(Box), + /// Block hash not found. + #[error("block hash not found: {0}")] + HashNotFound(B256), +} diff --git a/crates/rpc/src/config/rpc_config.rs b/crates/rpc/src/config/rpc_config.rs new file mode 100644 index 0000000..b0841f8 --- /dev/null +++ b/crates/rpc/src/config/rpc_config.rs @@ -0,0 +1,223 @@ +//! Configuration for the storage-backed RPC server. + +use std::time::Duration; + +/// Configuration for the storage-backed ETH RPC server. +/// +/// Mirrors the subset of reth's `EthConfig` that applies to +/// storage-backed RPC. +/// +/// # Example +/// +/// ``` +/// use signet_rpc::StorageRpcConfig; +/// +/// // Use defaults (matches reth defaults). +/// let config = StorageRpcConfig::default(); +/// assert_eq!(config.rpc_gas_cap, 30_000_000); +/// +/// // Use the builder to customise individual fields. +/// let config = StorageRpcConfig::builder() +/// .rpc_gas_cap(50_000_000) +/// .max_blocks_per_filter(5_000) +/// .build(); +/// assert_eq!(config.rpc_gas_cap, 50_000_000); +/// assert_eq!(config.max_blocks_per_filter, 5_000); +/// // Other fields retain their defaults. +/// assert_eq!(config.max_logs_per_response, 20_000); +/// ``` +#[derive(Debug, Clone, Copy)] +pub struct StorageRpcConfig { + /// Maximum gas for `eth_call` and `eth_estimateGas`. + /// + /// Default: `30_000_000` (30M gas). + pub rpc_gas_cap: u64, + + /// Maximum block range per `eth_getLogs` query. + /// + /// Default: `10_000`. + pub max_blocks_per_filter: u64, + + /// Maximum number of logs returned per `eth_getLogs` response. + /// Set to `0` to disable the limit. + /// + /// Default: `20_000`. + pub max_logs_per_response: usize, + + /// Maximum wall-clock time for a single log query. + /// + /// If a log query exceeds this duration, the stream is terminated + /// early and the handler returns a deadline-exceeded error. + /// + /// Default: `10` seconds. + pub max_log_query_deadline: Duration, + + /// Maximum concurrent tracing/debug requests. + /// + /// Controls the size of the semaphore that gates debug + /// namespace calls. + /// + /// Default: `25`. + pub max_tracing_requests: usize, + + /// Time-to-live for stale filters and subscriptions. + /// + /// Default: `5 minutes`. + pub stale_filter_ttl: Duration, + + /// Number of recent blocks to consider for gas price suggestions. + /// + /// Default: `20`. + pub gas_oracle_block_count: u64, + + /// Percentile of effective tips to use as the gas price suggestion. + /// + /// Default: `60.0`. + pub gas_oracle_percentile: f64, + + /// Default gas price returned when no recent transactions exist. + /// + /// Reth defaults to 1 Gwei. Set to `None` to return zero. + /// + /// Default: `Some(1_000_000_000)` (1 Gwei). + pub default_gas_price: Option, + + /// Minimum effective tip to include in the oracle sample. + /// + /// Tips below this threshold are discarded, matching reth's + /// `ignore_price` behavior. + /// + /// Default: `Some(2)` (2 wei). + pub ignore_price: Option, + + /// Maximum gas price the oracle will ever suggest. + /// + /// Default: `Some(500_000_000_000)` (500 Gwei). + pub max_price: Option, + + /// Maximum header history for `eth_feeHistory` without percentiles. + /// + /// Default: `1024`. + pub max_header_history: u64, + + /// Maximum block history for `eth_feeHistory` with percentiles. + /// + /// Default: `1024`. + pub max_block_history: u64, + + /// Default timeout in milliseconds for bundle simulation. + /// + /// Used when the bundle request does not specify its own timeout. + /// + /// Default: `1000` (1 second). + pub default_bundle_timeout_ms: u64, +} + +impl StorageRpcConfig { + /// Create a new builder with all fields set to their defaults. + pub fn builder() -> StorageRpcConfigBuilder { + StorageRpcConfigBuilder::default() + } +} + +impl Default for StorageRpcConfig { + fn default() -> Self { + Self { + rpc_gas_cap: 30_000_000, + max_blocks_per_filter: 10_000, + max_logs_per_response: 20_000, + max_log_query_deadline: Duration::from_secs(10), + max_tracing_requests: 25, + stale_filter_ttl: Duration::from_secs(5 * 60), + gas_oracle_block_count: 20, + gas_oracle_percentile: 60.0, + default_gas_price: Some(1_000_000_000), + ignore_price: Some(2), + max_price: Some(500_000_000_000), + max_header_history: 1024, + max_block_history: 1024, + default_bundle_timeout_ms: 1000, + } + } +} + +/// Builder for [`StorageRpcConfig`]. +/// +/// All fields default to the same values as [`StorageRpcConfig::default`]. +#[derive(Debug, Clone, Copy, Default)] +pub struct StorageRpcConfigBuilder { + inner: StorageRpcConfig, +} + +impl StorageRpcConfigBuilder { + /// Set the maximum gas for `eth_call` and `eth_estimateGas`. + pub const fn rpc_gas_cap(mut self, cap: u64) -> Self { + self.inner.rpc_gas_cap = cap; + self + } + + /// Set the maximum block range per `eth_getLogs` query. + pub const fn max_blocks_per_filter(mut self, max: u64) -> Self { + self.inner.max_blocks_per_filter = max; + self + } + + /// Set the maximum number of logs returned per response. + pub const fn max_logs_per_response(mut self, max: usize) -> Self { + self.inner.max_logs_per_response = max; + self + } + + /// Set the maximum wall-clock time for a single log query. + pub const fn max_log_query_deadline(mut self, deadline: Duration) -> Self { + self.inner.max_log_query_deadline = deadline; + self + } + + /// Set the maximum concurrent tracing/debug requests. + pub const fn max_tracing_requests(mut self, max: usize) -> Self { + self.inner.max_tracing_requests = max; + self + } + + /// Set the time-to-live for stale filters and subscriptions. + pub const fn stale_filter_ttl(mut self, ttl: Duration) -> Self { + self.inner.stale_filter_ttl = ttl; + self + } + + /// Set the number of recent blocks for gas price suggestions. + pub const fn gas_oracle_block_count(mut self, count: u64) -> Self { + self.inner.gas_oracle_block_count = count; + self + } + + /// Set the percentile of effective tips for gas price suggestions. + pub const fn gas_oracle_percentile(mut self, percentile: f64) -> Self { + self.inner.gas_oracle_percentile = percentile; + self + } + + /// Set the maximum header history for `eth_feeHistory`. + pub const fn max_header_history(mut self, max: u64) -> Self { + self.inner.max_header_history = max; + self + } + + /// Set the maximum block history for `eth_feeHistory`. + pub const fn max_block_history(mut self, max: u64) -> Self { + self.inner.max_block_history = max; + self + } + + /// Set the default bundle simulation timeout in milliseconds. + pub const fn default_bundle_timeout_ms(mut self, ms: u64) -> Self { + self.inner.default_bundle_timeout_ms = ms; + self + } + + /// Build the configuration. + pub const fn build(self) -> StorageRpcConfig { + self.inner + } +} diff --git a/crates/rpc/src/ctx/fee_hist.rs b/crates/rpc/src/ctx/fee_hist.rs deleted file mode 100644 index 4ceaf14..0000000 --- a/crates/rpc/src/ctx/fee_hist.rs +++ /dev/null @@ -1,150 +0,0 @@ -use reth::{ - core::primitives::SealedBlock, - primitives::{Block, RecoveredBlock}, - providers::{CanonStateNotification, Chain}, -}; -use signet_types::MagicSig; -use std::sync::Arc; - -/// Removes Signet system transactions from the block. -fn strip_block(block: RecoveredBlock) -> RecoveredBlock { - let (sealed, mut senders) = block.split_sealed(); - let (header, mut body) = sealed.split_sealed_header_body(); - - // This is the index of the first transaction that has a system magic - // signature. - let sys_index = body - .transactions - .partition_point(|tx| MagicSig::try_from_signature(tx.signature()).is_none()); - - body.transactions.truncate(sys_index); - senders.truncate(sys_index); - - let sealed = SealedBlock::from_sealed_parts(header, body); - - RecoveredBlock::new_sealed(sealed, senders) -} - -/// Removes Signet system transactions from the chain. This function uses -/// `Arc::make_mut` to clone the contents of the Arc and modify the new -/// instance. -fn strip_chain(chain: &Chain) -> Arc { - // Takes the contents out, replacing with default - let (blocks, outcome, trie) = chain.clone().into_inner(); - - // Strip each block - let blocks: Vec> = blocks.into_blocks().map(strip_block).collect(); - - // Replace the original chain with the stripped version - Arc::new(Chain::new(blocks, outcome, trie)) -} - -/// Strips Signet system transactions from the `CanonStateNotification`. -pub(crate) fn strip_signet_system_txns(notif: CanonStateNotification) -> CanonStateNotification { - match notif { - CanonStateNotification::Commit { new } => { - CanonStateNotification::Commit { new: strip_chain(&new) } - } - CanonStateNotification::Reorg { mut old, mut new } => { - old = strip_chain(&old); - new = strip_chain(&new); - - CanonStateNotification::Reorg { old, new } - } - } -} - -#[cfg(test)] -mod test { - use alloy::{ - consensus::{TxEip1559, TxEnvelope}, - primitives::{Address, B256}, - signers::Signature, - }; - use reth::primitives::{BlockBody, Header, SealedHeader}; - - use super::*; - - fn test_magic_sig_tx() -> TxEnvelope { - let sig = MagicSig::enter(B256::repeat_byte(0x22), 3); - - let sig = sig.into(); - - dbg!(MagicSig::try_from_signature(&sig).is_some()); - - TxEnvelope::new_unchecked(TxEip1559::default().into(), sig, B256::repeat_byte(0x33)) - } - - fn test_non_magic_sig_tx() -> TxEnvelope { - let sig = Signature::test_signature(); - TxEnvelope::new_unchecked(TxEip1559::default().into(), sig, B256::repeat_byte(0x44)) - } - - fn test_block_body() -> BlockBody { - BlockBody { - transactions: vec![ - test_non_magic_sig_tx().into(), - test_non_magic_sig_tx().into(), - test_magic_sig_tx().into(), - test_magic_sig_tx().into(), - ], - ..Default::default() - } - } - - fn test_sealed_header(number: u64) -> SealedHeader { - let header = Header { number, ..Default::default() }; - SealedHeader::new_unhashed(header) - } - - fn test_sealed_block(block_num: u64) -> SealedBlock { - SealedBlock::from_sealed_parts(test_sealed_header(block_num), test_block_body()) - } - - fn test_block(block_num: u64) -> RecoveredBlock { - RecoveredBlock::new_sealed( - test_sealed_block(block_num), - vec![Address::repeat_byte(0x11); 4], - ) - } - - fn test_chain(count: u64) -> Arc { - let blocks = (0..count).map(test_block); - Arc::new(Chain::new(blocks, Default::default(), Default::default())) - } - - #[test] - fn test_strip_block() { - let block = test_block(0); - assert_eq!(block.body().transactions.len(), 4); - assert_eq!(block.senders().len(), 4); - - let stripped = strip_block(block); - assert_eq!(stripped.body().transactions.len(), 2); - assert_eq!(stripped.senders().len(), 2); - - for tx in stripped.body().transactions.iter() { - assert!(MagicSig::try_from_signature(tx.signature()).is_none()); - } - } - - #[test] - fn test_strip_chain() { - let original = test_chain(2); - assert_eq!(original.blocks().len(), 2); - - let chain = strip_chain(&original); - - assert_ne!(&*chain, &*original); - - assert_eq!(chain.blocks().len(), 2); - - for (_num, block) in chain.blocks().iter() { - assert_eq!(block.body().transactions.len(), 2); - assert_eq!(block.senders().len(), 2); - for tx in block.body().transactions.iter() { - assert!(MagicSig::try_from_signature(tx.signature()).is_none()); - } - } - } -} diff --git a/crates/rpc/src/ctx/full.rs b/crates/rpc/src/ctx/full.rs deleted file mode 100644 index 1300b8e..0000000 --- a/crates/rpc/src/ctx/full.rs +++ /dev/null @@ -1,282 +0,0 @@ -use crate::SignetCtx; -use alloy::{consensus::Header, eips::BlockId}; -use reth::{ - providers::{ProviderResult, providers::BlockchainProvider}, - rpc::server_types::eth::{EthApiError, EthConfig}, - rpc::types::BlockNumberOrTag, - tasks::{TaskExecutor, TaskSpawner}, -}; -use reth_node_api::FullNodeComponents; -use signet_db::RuRevmState; -use signet_evm::EvmNeedsTx; -use signet_node_types::Pnt; -use signet_tx_cache::TxCache; -use signet_types::constants::SignetSystemConstants; -use std::sync::Arc; -use tokio::sync::{AcquireError, OwnedSemaphorePermit, Semaphore}; -use trevm::{ - helpers::Ctx, - revm::{Inspector, inspector::NoOpInspector}, -}; - -/// State location when instantiating an EVM instance. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -#[repr(i8)] -pub enum LoadState { - /// Load the state before the block's transactions (i.e. at the start of - /// the block). - Before = -1, - /// Load the state after the block's transactions (i.e. at the end of the - /// block). - After = 0, -} - -impl LoadState { - /// Adjust the height based on the state location. - pub const fn adjust_height(&self, height: u64) -> u64 { - match self { - LoadState::Before => height.saturating_sub(1), - LoadState::After => height, - } - } - - /// Returns `true` if the state location is before the block. - pub const fn is_before_block(&self) -> bool { - matches!(self, Self::Before) - } - - /// Returns `true` if the state location is after the block. - pub const fn is_after_block(&self) -> bool { - matches!(self, Self::After) - } -} - -impl From for LoadState { - fn from(value: BlockId) -> Self { - match value { - BlockId::Number(no) => no.into(), - _ => LoadState::After, - } - } -} - -impl From for LoadState { - fn from(value: BlockNumberOrTag) -> Self { - match value { - BlockNumberOrTag::Pending => LoadState::Before, - _ => LoadState::After, - } - } -} - -impl From for bool { - fn from(value: LoadState) -> Self { - matches!(value, LoadState::Before) - } -} - -/// RPC context. Contains all necessary host and signet components for serving -/// RPC requests. -#[derive(Debug)] -pub struct RpcCtx -where - Host: FullNodeComponents, - Signet: Pnt, -{ - inner: Arc>, -} - -impl RpcCtx -where - Host: FullNodeComponents, - Signet: Pnt, -{ - /// Create a new `RpcCtx`. - /// - /// ## WARNING - /// - /// The [`BlockchainProvider`] passed in MUST be receiving updates from the - /// node wrt canonical chain changes. Some task MUST be calling relevant - /// [`CanonChainTracker`] methods on a clone of this [`BlockchainProvider`], - /// - /// If this is not correctly set up, [`BlockId`] resolution for `latest`, - /// `safe,` finalized, etc will not work correctly. - /// - /// [`CanonChainTracker`]: reth::providers::CanonChainTracker - pub fn new( - host: Host, - constants: SignetSystemConstants, - provider: BlockchainProvider, - eth_config: EthConfig, - tx_cache: Option, - spawner: Tasks, - ) -> ProviderResult - where - Tasks: TaskSpawner + Clone + 'static, - { - RpcCtxInner::new(host, constants, provider, eth_config, tx_cache, spawner) - .map(|inner| Self { inner: Arc::new(inner) }) - } -} - -impl Clone for RpcCtx -where - Host: FullNodeComponents, - Signet: Pnt, -{ - fn clone(&self) -> Self { - Self { inner: self.inner.clone() } - } -} - -impl core::ops::Deref for RpcCtx -where - Host: FullNodeComponents, - Signet: Pnt, -{ - type Target = RpcCtxInner; - - fn deref(&self) -> &Self::Target { - &self.inner - } -} - -/// Shared context between all RPC handlers. -#[derive(Debug)] -struct SharedContext { - tracing_semaphores: Arc, -} - -/// Inner context for [`RpcCtx`]. -#[derive(Debug)] -pub struct RpcCtxInner -where - Host: FullNodeComponents, - Signet: Pnt, -{ - host: Host, - signet: SignetCtx, - - shared: SharedContext, -} - -impl RpcCtxInner -where - Host: FullNodeComponents, - Signet: Pnt, -{ - /// Create a new `RpcCtxInner`. - /// - /// ## WARNING - /// - /// The [`BlockchainProvider`] passed in MUST be receiving updates from the - /// node wrt canonical chain changes. Some task MUST be calling relevant - /// [`CanonChainTracker`] methods on a clone of this [`BlockchainProvider`], - /// - /// If this is not correctly set up, [`BlockId`] resolution for `latest`, - /// `safe,` finalized, etc will not work correctly. - /// - /// [`CanonChainTracker`]: reth::providers::CanonChainTracker - pub fn new( - host: Host, - constants: SignetSystemConstants, - provider: BlockchainProvider, - eth_config: EthConfig, - tx_cache: Option, - spawner: Tasks, - ) -> ProviderResult - where - Tasks: TaskSpawner + Clone + 'static, - { - let tracing_semaphores = Semaphore::new(eth_config.max_tracing_requests).into(); - - SignetCtx::new(constants, provider, eth_config, tx_cache, spawner).map(|signet| Self { - host, - signet, - shared: SharedContext { tracing_semaphores }, - }) - } - - /// Acquire a permit for tracing. - pub async fn acquire_tracing_permit(&self) -> Result { - self.shared.tracing_semaphores.clone().acquire_owned().await - } - - pub const fn host(&self) -> &Host { - &self.host - } - - pub const fn signet(&self) -> &SignetCtx { - &self.signet - } - - pub fn task_executor(&self) -> &TaskExecutor { - self.host.task_executor() - } - - /// Instantiate a trevm instance with a custom inspector. - /// - /// The `header` argument is used to fill the block context of the EVM. If - /// the `block_id` is `Pending` the EVM state will be the block BEFORE the - /// `header`. I.e. if the block number of the `header` is `n`, the state - /// will be after block `n-1`, (effectively the state at the start of block - /// `n`). - /// - /// if the `block_id` is `Pending` the state will be based on the - /// and `block` arguments - pub fn trevm_with_inspector>>( - &self, - state: LoadState, - header: &Header, - inspector: I, - ) -> Result, EthApiError> { - let load_height = state.adjust_height(header.number); - let spec_id = self.signet.evm_spec_id(header); - - let db = self.signet.state_provider_database(load_height)?; - - let mut trevm = - signet_evm::signet_evm_with_inspector(db, inspector, self.signet.constants().clone()) - .fill_cfg(&self.signet) - .fill_block(header); - - trevm.set_spec_id(spec_id); - - Ok(trevm) - } - - /// Create a trevm instance. - pub fn trevm( - &self, - state: LoadState, - header: &Header, - ) -> Result, EthApiError> { - self.trevm_with_inspector(state, header, NoOpInspector) - } -} - -// Some code in this file has been copied and modified from reth -// -// The original license is included below: -// -// The MIT License (MIT) -// -// Copyright (c) 2022-2025 Reth Contributors -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -//. -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. diff --git a/crates/rpc/src/ctx/mod.rs b/crates/rpc/src/ctx/mod.rs deleted file mode 100644 index ea63b73..0000000 --- a/crates/rpc/src/ctx/mod.rs +++ /dev/null @@ -1,8 +0,0 @@ -mod signet; -pub use signet::SignetCtx; - -mod full; -pub use full::{LoadState, RpcCtx}; - -mod fee_hist; -pub(crate) use fee_hist::strip_signet_system_txns; diff --git a/crates/rpc/src/ctx/signet.rs b/crates/rpc/src/ctx/signet.rs deleted file mode 100644 index 5ca08dc..0000000 --- a/crates/rpc/src/ctx/signet.rs +++ /dev/null @@ -1,928 +0,0 @@ -use crate::{ - ctx::strip_signet_system_txns, - eth::EthError, - interest::{ActiveFilter, FilterManager, FilterOutput, SubscriptionManager}, - receipts::build_signet_receipt, - utils::BlockRangeInclusiveIter, -}; -use alloy::{ - consensus::{BlockHeader, Header, Signed, Transaction, TxEnvelope}, - eips::{BlockId, BlockNumberOrTag, NumHash}, - network::Ethereum, - primitives::{B256, U64}, - rpc::types::{FeeHistory, Filter, Log}, -}; -use futures_util::StreamExt; -use reth::{ - core::primitives::SignerRecoverable, - primitives::{Block, Receipt, Recovered, RecoveredBlock, TransactionSigned}, - providers::{ - BlockHashReader, BlockIdReader, BlockNumReader, CanonStateSubscriptions, HeaderProvider, - ProviderError, ProviderResult, ReceiptProvider, StateProviderFactory, TransactionsProvider, - providers::BlockchainProvider, - }, - revm::database::StateProviderDatabase, - rpc::{ - eth::{filter::EthFilterError, helpers::types::EthRpcConverter}, - server_types::eth::{ - EthApiError, EthConfig, EthStateCache, FeeHistoryCache, FeeHistoryEntry, - GasPriceOracle, - fee_history::{ - calculate_reward_percentiles_for_block, fee_history_cache_new_blocks_task, - }, - logs_utils::{self, ProviderOrBlock, append_matching_block_logs}, - receipt::EthReceiptConverter, - }, - types::{FilterBlockOption, FilteredParams}, - }, - tasks::TaskSpawner, -}; -use reth_chainspec::{BaseFeeParams, ChainSpec, ChainSpecProvider}; -use reth_node_api::BlockBody; -use reth_rpc_eth_api::{RpcBlock, RpcConvert, RpcReceipt, RpcTransaction}; -use signet_db::RuRevmState; -use signet_node_types::Pnt; -use signet_tx_cache::TxCache; -use signet_types::{MagicSig, constants::SignetSystemConstants}; -use std::{marker::PhantomData, sync::Arc}; -use tracing::{Level, instrument, trace}; -use trevm::{ - Cfg, - revm::{context::CfgEnv, database::StateBuilder, primitives::hardfork::SpecId}, -}; - -/// The maximum number of headers we read at once when handling a range filter. -const MAX_HEADERS_RANGE: u64 = 1_000; // with ~530bytes per header this is ~500kb - -/// Signet context. This struct contains all the necessary components for -/// accessing Signet node state, and serving RPC requests. -#[derive(Debug)] -pub struct SignetCtx -where - Inner: Pnt, -{ - // Basics - constants: SignetSystemConstants, - eth_config: EthConfig, - - // State stuff - provider: BlockchainProvider, - cache: EthStateCache, - - // Gas stuff - gas_oracle: GasPriceOracle>, - fee_history: FeeHistoryCache, - - // Tx stuff - tx_cache: Option, - - // Filter and subscription stuff - filters: FilterManager, - subs: SubscriptionManager, - - // Spooky ghost stuff - _pd: std::marker::PhantomData Inner>, -} - -impl SignetCtx -where - Inner: Pnt, -{ - /// Instantiate a new `SignetCtx`, spawning necessary tasks to keep the - /// relevant caches up to date. - /// - /// ## WARNING - /// - /// The [`BlockchainProvider`] passed in MUST be receiving updates from the - /// node wrt canonical chain changes. Some task MUST be calling relevant - /// [`CanonChainTracker`] methods on a clone of this [`BlockchainProvider`], - /// - /// If this is not correctly set up, [`BlockId`] resolution for `latest`, - /// `safe,` finalized, etc will not work correctly. - /// - /// [`CanonChainTracker`]: reth::providers::CanonChainTracker - pub fn new( - constants: SignetSystemConstants, - provider: BlockchainProvider, - eth_config: EthConfig, - tx_cache: Option, - spawner: Tasks, - ) -> ProviderResult - where - Tasks: TaskSpawner + Clone + 'static, - { - let cache = EthStateCache::spawn_with(provider.clone(), eth_config.cache, spawner.clone()); - let gas_oracle = - GasPriceOracle::new(provider.clone(), eth_config.gas_oracle, cache.clone()); - - let fee_history = FeeHistoryCache::new(eth_config.fee_history_cache); - - // The fee task pre-calculates and caches common percentiles for the - // `eth_feeHistory` RPC method. - let fee_task = fee_history_cache_new_blocks_task( - fee_history.clone(), - provider.canonical_state_stream().map(strip_signet_system_txns), - provider.clone(), - cache.clone(), - ); - - spawner.spawn_critical("fee_history_cache_new_blocks", Box::pin(fee_task)); - - let filters = FilterManager::new(eth_config.stale_filter_ttl, eth_config.stale_filter_ttl); - - let subs = SubscriptionManager::new(provider.clone(), eth_config.stale_filter_ttl); - - Ok(Self { - constants, - provider, - eth_config, - cache, - gas_oracle, - fee_history, - tx_cache, - filters, - subs, - _pd: PhantomData, - }) - } - - /// Access the signet constants - pub const fn constants(&self) -> &SignetSystemConstants { - &self.constants - } - - /// Access the signet DB - pub const fn provider(&self) -> &BlockchainProvider { - &self.provider - } - - /// Access the signet [`EthConfig`] - pub const fn config(&self) -> &EthConfig { - &self.eth_config - } - - /// Access the tx_cache - pub fn tx_cache(&self) -> Option { - self.tx_cache.clone() - } - - /// Access the [`ChainSpec`]. - pub fn chain_spec(&self) -> Arc { - self.provider.chain_spec() - } - - /// Get the EVM spec ID for a given block. - pub fn evm_spec_id(&self, header: &Header) -> SpecId { - signet_block_processor::revm_spec(&self.chain_spec(), header.timestamp()) - } - - /// Access the subscription manager. - pub const fn subscriptions(&self) -> &SubscriptionManager { - &self.subs - } - - /// Make a [`StateProviderDatabase`] from the read-write provider, suitable - /// for use with Trevm. - pub fn state_provider_database(&self, height: u64) -> Result { - // Get the state provider for the block number - let sp = self.provider.history_by_block_number(height)?; - - // Wrap in Revm compatibility layer - let spd = StateProviderDatabase::new(sp); - - let builder = StateBuilder::new_with_database(spd); - - Ok(builder.build()) - } - - /// Get the [`Header`] for a given block. - pub async fn raw_header( - &self, - t: impl Into, - ) -> Result, EthApiError> { - let Some(hash) = self.provider.block_hash_for_id(t.into())? else { - return Ok(None); - }; - - let header = self.cache.get_header(hash).await.map_err(EthApiError::from)?; - - Ok(Some((hash, header))) - } - - /// Get the block for a given block, returning the block hash and - /// the block itself. - pub async fn raw_block( - &self, - t: impl Into, - ) -> Result>)>, EthApiError> { - let Some(hash) = self.provider.block_hash_for_id(t.into())? else { - return Ok(None); - }; - - self.cache.get_recovered_block(hash).await.map_err(Into::into).map(|b| b.map(|b| (hash, b))) - } - - /// Create a transaction response builder for the RPC API. - pub fn rpc_converter(&self) -> EthRpcConverter { - EthRpcConverter::new(EthReceiptConverter::new(self.chain_spec())) - } - - /// Get the block for a given block, formatting the block for - /// the RPC API. - pub async fn block( - &self, - t: impl Into, - full: Option, - ) -> Result>, EthApiError> { - let Some(hash) = self.provider.block_hash_for_id(t.into())? else { - return Ok(None); - }; - - let Some(block) = self.cache.get_recovered_block(hash).await? else { - return Ok(None); - }; - - (*block) - .clone() - .into_rpc_block( - full.unwrap_or_default().into(), - |tx, tx_info| self.rpc_converter().fill(tx, tx_info), - |header, rlp_len| self.rpc_converter().convert_header(header, rlp_len), - ) - .map(Some) - } - - /// Get the tx count for a given block. - pub async fn tx_count(&self, t: impl Into) -> Result, EthApiError> { - let Some(hash) = self.provider.block_hash_for_id(t.into())? else { - return Ok(None); - }; - - if let Some(block) = self.cache.get_recovered_block(hash).await? { - // ambiguous function names - let txns = BlockBody::transactions(block.body()); - Ok(Some(U64::from(txns.len()))) - } else { - Ok(None) - } - } - - /// Get the receipts for a given block. - pub async fn raw_receipts( - &self, - t: impl Into, - ) -> Result>>, EthApiError> { - let Some(hash) = self.provider.block_hash_for_id(t.into())? else { - return Ok(None); - }; - - self.cache.get_receipts(hash).await.map_err(Into::into) - } - - /// Get the transaction for a given hash, returning the transaction and its - /// block-related metadata. - pub fn raw_transaction_by_hash( - &self, - h: B256, - ) -> Result, EthApiError> { - self.provider.transaction_by_hash_with_meta(h).map_err(Into::into) - } - - /// Format a transaction for the RPC API. - fn format_rpc_tx( - tx: TransactionSigned, - block_hash: B256, - block_number: u64, - index: usize, - base_fee: Option, - ) -> Result { - let sig = tx.signature(); - - let sender = - if let Some(sender) = MagicSig::try_from_signature(sig).map(|s| s.rollup_sender()) { - sender - } else { - tx.recover_signer().map_err(|_| EthApiError::InvalidTransactionSignature)? - }; - - let tx = Recovered::new_unchecked(tx, sender); - - let from = tx.signer(); - let hash = *tx.hash(); - let signature = *tx.signature(); - - let inner: TxEnvelope = match tx.into_inner().into_typed_transaction() { - reth::primitives::Transaction::Legacy(tx) => { - Signed::new_unchecked(tx, signature, hash).into() - } - reth::primitives::Transaction::Eip2930(tx) => { - Signed::new_unchecked(tx, signature, hash).into() - } - reth::primitives::Transaction::Eip1559(tx) => { - Signed::new_unchecked(tx, signature, hash).into() - } - reth::primitives::Transaction::Eip4844(tx) => { - Signed::new_unchecked(tx, signature, hash).into() - } - reth::primitives::Transaction::Eip7702(tx) => { - Signed::new_unchecked(tx, signature, hash).into() - } - }; - let inner = Recovered::new_unchecked(inner, from); - - let egp = base_fee - .map(|base_fee| { - inner.effective_tip_per_gas(base_fee).unwrap_or_default() as u64 + base_fee - }) - .unwrap_or_else(|| inner.max_fee_per_gas() as u64); - - Ok(alloy::rpc::types::Transaction { - inner, - block_hash: Some(block_hash), - block_number: Some(block_number), - transaction_index: Some(index as u64), - effective_gas_price: Some(egp as u128), - }) - } - - /// Get a transaction by its hash, and format it for the RPC API. - pub fn rpc_transaction_by_hash( - &self, - hash: B256, - ) -> Result>, EthApiError> { - let Some((tx, meta)) = self.raw_transaction_by_hash(hash)? else { - return Ok(None); - }; - - Self::format_rpc_tx( - tx, - meta.block_hash, - meta.block_number, - meta.index as usize, - meta.base_fee, - ) - .map(Some) - } - - /// Get a transaction by its block and index, and format it for the RPC API. - pub async fn rpc_transaction_by_block_idx( - &self, - id: impl Into, - index: usize, - ) -> Result>, EthApiError> { - let Some((hash, block)) = self.raw_block(id).await? else { - return Ok(None); - }; - - block - .body() - .transactions - .get(index) - .map(|tx| { - Self::format_rpc_tx( - tx.clone(), - hash, - block.number(), - index, - block.base_fee_per_gas(), - ) - }) - .transpose() - } - - /// Get a receipt by its hash, and format it for the RPC API. - pub async fn rpc_receipt_by_hash( - &self, - hash: B256, - ) -> Result>, EthApiError> { - let Some((tx, meta)) = self.raw_transaction_by_hash(hash)? else { - trace!(%hash, "Transaction not found for receipt hash"); - return Ok(None); - }; - - let Some(receipt) = self.provider.receipt_by_hash(hash)? else { - trace!(%hash, "Receipt not found for transaction hash"); - return Ok(None); - }; - - let Some(all_receipts) = self.cache.get_receipts(meta.block_hash).await? else { - trace!(%hash, "Block not found for transaction hash"); - return Ok(None); - }; - - build_signet_receipt(tx, meta, receipt, all_receipts.to_vec()).map(Some) - } - - /// Create the [`Header`] object for a specific [`BlockId`]. - pub async fn block_cfg(&self, mut block_id: BlockId) -> Result { - // If the block is pending, we'll load the latest and - let pending = block_id.is_pending(); - if pending { - block_id = BlockId::latest(); - } - - let Some((_, mut header)) = self.raw_header(block_id).await? else { - return Err(EthApiError::HeaderNotFound(block_id)); - }; - - // Modify the header for pending blocks, to simulate the next block. - if pending { - header.parent_hash = header.hash_slow(); - header.number += 1; - header.timestamp += 12; - header.base_fee_per_gas = header.next_block_base_fee(BaseFeeParams::ethereum()); - header.gas_limit = self.eth_config.rpc_gas_cap; - } - - Ok(header) - } - - /// Create a gas price oracle. - pub const fn gas_oracle(&self) -> &GasPriceOracle> { - &self.gas_oracle - } - - /// Approximates reward at a given percentile for a specific block - /// Based on the configured resolution - /// - /// Implementation reproduced from reth. - fn approximate_percentile(&self, entry: &FeeHistoryEntry, requested_percentile: f64) -> u128 { - let resolution = self.fee_history.resolution(); - let rounded_percentile = - (requested_percentile * resolution as f64).round() / resolution as f64; - let clamped_percentile = rounded_percentile.clamp(0.0, 100.0); - - // Calculate the index in the precomputed rewards array - let index = (clamped_percentile / (1.0 / resolution as f64)).round() as usize; - // Fetch the reward from the FeeHistoryEntry - entry.rewards.get(index).copied().unwrap_or_default() - } - - /// Implements the `eth_feeHistory` RPC method. - /// - /// Implementation reproduced from reth, trimmed of 4844 support. - pub async fn fee_history( - &self, - mut block_count: u64, - mut newest: BlockNumberOrTag, - reward_percentiles: Option>, - ) -> Result { - if block_count == 0 { - return Ok(FeeHistory::default()); - } - - // See https://github.com/ethereum/go-ethereum/blob/2754b197c935ee63101cbbca2752338246384fec/eth/gasprice/feehistory.go#L218C8-L225 - let max_fee_history = if reward_percentiles.is_none() { - self.gas_oracle().config().max_header_history - } else { - self.gas_oracle().config().max_block_history - }; - - if block_count > max_fee_history { - block_count = max_fee_history - } - - if newest.is_pending() { - // cap the target block since we don't have fee history for the pending block - newest = BlockNumberOrTag::Latest; - // account for missing pending block - block_count = block_count.saturating_sub(1); - } - - let end_block = self - .provider() - .block_number_for_id(newest.into())? - .ok_or(EthApiError::HeaderNotFound(newest.into()))?; - - // need to add 1 to the end block to get the correct (inclusive) range - let end_block_plus = end_block + 1; - // Ensure that we would not be querying outside of genesis - if end_block_plus < block_count { - block_count = end_block_plus; - } - - // If reward percentiles were specified, we - // need to validate that they are monotonically - // increasing and 0 <= p <= 100 - // Note: The types used ensure that the percentiles are never < 0 - if let Some(percentiles) = &reward_percentiles - && percentiles.windows(2).any(|w| w[0] > w[1] || w[0] > 100.) - { - return Err(EthApiError::InvalidRewardPercentiles); - } - - // Fetch the headers and ensure we got all of them - // - // Treat a request for 1 block as a request for `newest_block..=newest_block`, - // otherwise `newest_block - 2 - // NOTE: We ensured that block count is capped - let start_block = end_block_plus - block_count; - - // Collect base fees, gas usage ratios and (optionally) reward percentile data - let mut base_fee_per_gas: Vec = Vec::new(); - let mut gas_used_ratio: Vec = Vec::new(); - - let mut rewards: Vec> = Vec::new(); - - // Check if the requested range is within the cache bounds - let fee_entries = self.fee_history.get_history(start_block, end_block).await; - - if let Some(fee_entries) = fee_entries { - if fee_entries.len() != block_count as usize { - return Err(EthApiError::InvalidBlockRange); - } - - for entry in &fee_entries { - base_fee_per_gas.push( - entry.header.base_fee_per_gas().expect("signet only has post-eip1559 headers") - as u128, - ); - gas_used_ratio.push(entry.gas_used_ratio); - - if let Some(percentiles) = &reward_percentiles { - let mut block_rewards = Vec::with_capacity(percentiles.len()); - for &percentile in percentiles { - block_rewards.push(self.approximate_percentile(entry, percentile)); - } - rewards.push(block_rewards); - } - } - let last_entry = fee_entries.last().expect("is not empty"); - - // Also need to include the `base_fee_per_gas` and `base_fee_per_blob_gas` for the - // next block - base_fee_per_gas.push( - last_entry - .header - .next_block_base_fee(BaseFeeParams::ethereum()) - .expect("signet only has post-eip1559 headers") as u128, - ); - } else { - // read the requested header range - let headers = self.provider().sealed_headers_range(start_block..=end_block)?; - if headers.len() != block_count as usize { - return Err(EthApiError::InvalidBlockRange); - } - - for header in &headers { - base_fee_per_gas.push(header.base_fee_per_gas().unwrap_or_default() as u128); - gas_used_ratio.push(header.gas_used() as f64 / header.gas_limit() as f64); - - // Percentiles were specified, so we need to collect reward percentile ino - if let Some(percentiles) = &reward_percentiles { - let (block, receipts) = self - .cache - .get_block_and_receipts(header.hash()) - .await? - .ok_or(EthApiError::InvalidBlockRange)?; - rewards.push( - calculate_reward_percentiles_for_block( - percentiles, - header.gas_used(), - header.base_fee_per_gas().unwrap_or_default(), - &block.body().transactions, - &receipts, - ) - .unwrap_or_default(), - ); - } - } - - // The spec states that `base_fee_per_gas` "[..] includes the next block after the - // newest of the returned range, because this value can be derived from the - // newest block" - // - // The unwrap is safe since we checked earlier that we got at least 1 header. - let last_header = headers.last().expect("is present"); - base_fee_per_gas.push( - last_header - .next_block_base_fee( - self.provider() - .chain_spec() - .base_fee_params_at_timestamp(last_header.timestamp()), - ) - .unwrap_or_default() as u128, - ); - }; - - let base_fee_per_blob_gas = vec![0; base_fee_per_gas.len()]; - let blob_gas_used_ratio = vec![0.; gas_used_ratio.len()]; - - Ok(FeeHistory { - base_fee_per_gas, - gas_used_ratio, - base_fee_per_blob_gas, - blob_gas_used_ratio, - oldest_block: start_block, - reward: reward_percentiles.map(|_| rewards), - }) - } - - /// Get logs for a given block hash based on a filter - /// - /// ## Panics - /// - /// Panics if the filter is a range filter - async fn logs_at_hash(&self, filter: &Filter) -> Result, EthApiError> { - let hash = *filter.block_option.as_block_hash().expect("COU"); - - let (block, receipts) = tokio::try_join!(self.raw_block(hash), self.raw_receipts(hash),)?; - - // Return an error if the block isn't found - let (_, block) = block.ok_or(EthApiError::HeaderNotFound(hash.into()))?; - // Return an error if the receipts aren't found - let receipts = receipts.ok_or(EthApiError::HeaderNotFound(hash.into()))?; - - let block_num_hash = NumHash::new(block.number(), hash); - let timestamp = block.timestamp(); - - let mut all_logs = Vec::new(); - append_matching_block_logs( - &mut all_logs, - ProviderOrBlock::>::Block(block), - filter, - block_num_hash, - &receipts, - false, - timestamp, - )?; - - Ok(all_logs) - } - - /// Returns all logs in the given _inclusive_ range that match the filter - /// - /// Returns an error if: - /// - underlying database error - /// - amount of matches exceeds configured limit - /// - // https://github.com/paradigmxyz/reth/blob/d01658e516abbf2a1a76855a26d7123286865f22/crates/rpc/rpc/src/eth/filter.rs#L506 - async fn get_logs_in_block_range( - &self, - filter: &Filter, - from_block: u64, - to_block: u64, - ) -> Result, EthFilterError> { - trace!(target: "rpc::eth::filter", from=from_block, to=to_block, ?filter, "finding logs in range"); - - if to_block < from_block { - return Err(EthFilterError::InvalidBlockRangeParams); - } - let max_blocks = self.config().max_blocks_per_filter; - - if to_block - from_block > max_blocks { - return Err(EthFilterError::QueryExceedsMaxBlocks(max_blocks)); - } - - let mut all_logs = Vec::new(); - - // derive bloom filters from filter input, so we can check headers for matching logs - let address_filter = FilteredParams::address_filter(&filter.address); - let topics_filter = FilteredParams::topics_filter(&filter.topics); - - // loop over the range of new blocks and check logs if the filter matches the log's bloom - // filter - for (from, to) in BlockRangeInclusiveIter::new(from_block..=to_block, MAX_HEADERS_RANGE) { - let headers = self.provider().headers_range(from..=to)?; - - for (idx, header) in headers.iter().enumerate() { - // only if filter matches - if FilteredParams::matches_address(header.logs_bloom(), &address_filter) - && FilteredParams::matches_topics(header.logs_bloom(), &topics_filter) - { - // these are consecutive headers, so we can use the parent hash of the next - // block to get the current header's hash - let hash = match headers.get(idx + 1) { - Some(parent) => parent.parent_hash(), - None => self - .provider() - .block_hash(header.number())? - .ok_or_else(|| ProviderError::HeaderNotFound(header.number().into()))?, - }; - - let (block, receipts) = - tokio::try_join!(self.raw_block(hash), self.raw_receipts(hash),)?; - - // Return an error if the block isn't found - let (_, block) = block.ok_or(EthApiError::HeaderNotFound(hash.into()))?; - // Return an error if the receipts aren't found - let receipts = receipts.ok_or(EthApiError::HeaderNotFound(hash.into()))?; - - let block_num_hash = NumHash::new(block.number(), hash); - let timestamp = block.timestamp(); - - append_matching_block_logs( - &mut all_logs, - ProviderOrBlock::>::Block(block), - filter, - block_num_hash, - &receipts, - false, - timestamp, - )?; - - // size check but only if range is multiple blocks, so we always return all - // logs of a single block - let max_logs = self.config().max_logs_per_response; - let is_multi_block_range = from_block != to_block; - if is_multi_block_range && all_logs.len() > max_logs { - return Err(EthFilterError::QueryExceedsMaxResults { - max_logs, - from_block, - to_block: block_num_hash.number.saturating_sub(1), - }); - } - } - } - } - - Ok(all_logs) - } - - /// Get logs for a given block range based on a filter - /// - /// ## Panics - /// - /// Panics if the filter is not a range filter - async fn logs_in_range(&self, filter: &Filter) -> Result, EthFilterError> { - // compute the range - let (from_block, to_block) = filter.block_option.as_range(); - - let info = self.provider().chain_info()?; - - // we start at the most recent block if unset in filter - let start_block = info.best_number; - let from = - from_block.map(|num| self.provider().convert_block_number(*num)).transpose()?.flatten(); - let to = - to_block.map(|num| self.provider().convert_block_number(*num)).transpose()?.flatten(); - let (from_block_number, to_block_number) = - logs_utils::get_filter_block_range(from, to, start_block, info)?; - self.get_logs_in_block_range(filter, from_block_number, to_block_number).await - } - - /// Logic for `eth_getLogs` RPC method. - pub async fn logs(&self, filter: &Filter) -> Result, EthError> { - if filter.block_option.is_range() { - self.logs_in_range(filter).await.map_err(Into::into) - } else { - self.logs_at_hash(filter).await.map_err(Into::into) - } - } - - /// Install a log filter. - pub fn install_log_filter(&self, filter: Filter) -> Result { - let chain_info = self.provider().chain_info()?; - - Ok(self.filters.install_log_filter(chain_info.best_number, filter)) - } - - /// Install a block filter. - pub fn install_block_filter(&self) -> Result { - let chain_info = self.provider().chain_info()?; - - Ok(self.filters.install_block_filter(chain_info.best_number)) - } - - /// Poll an active log filter for changes. - /// - /// # Panics - /// - /// Panics if the filter is not a Log filter - #[instrument(level = Level::DEBUG, skip_all, fields(since_last_poll = filter.time_since_last_poll().as_millis(), next_start_block = filter.next_start_block()))] - async fn get_log_filter_changes( - &self, - filter: &ActiveFilter, - ) -> Result<(u64, FilterOutput), EthError> { - debug_assert!(filter.is_filter()); - - // Load the current tip - let info = self.provider().chain_info()?; - let current_height = info.best_number; - - trace!(%filter, current_height, "Polling filter"); - - // If the filter was polled AFTER the current tip, we return an empty - // result - let start_block = filter.next_start_block(); - if start_block > current_height { - return Ok((current_height, FilterOutput::empty())); - } - - // Cast to a filter (this is checked by dbg_assert and by the caller) - let filter = filter.as_filter().unwrap(); - - let (from_block_number, to_block_number) = match filter.block_option { - FilterBlockOption::Range { from_block, to_block } => { - let from = from_block - .map(|num| self.provider().convert_block_number(num)) - .transpose()? - .flatten(); - let to = to_block - .map(|num| self.provider().convert_block_number(num)) - .transpose()? - .flatten(); - logs_utils::get_filter_block_range(from, to, start_block, info) - .map_err(EthFilterError::from)? - } - FilterBlockOption::AtBlockHash(_) => { - // blockHash is equivalent to fromBlock = toBlock = the block number with - // hash blockHash - // get_logs_in_block_range is inclusive - (start_block, current_height) - } - }; - let logs = self.get_logs_in_block_range(filter, from_block_number, to_block_number).await?; - - Ok((to_block_number, logs.into())) - } - - #[instrument(level = Level::DEBUG, skip_all, fields(since_last_poll = filter.time_since_last_poll().as_millis(), next_start_block = filter.next_start_block()))] - async fn get_block_filter_changes( - &self, - filter: &ActiveFilter, - ) -> Result<(u64, FilterOutput), EthError> { - debug_assert!(filter.is_block()); - // Get the current tip number - let info = self.provider().chain_info()?; - let current_height = info.best_number; - - trace!(%filter, current_height, "Polling filter"); - - let start_block = filter.next_start_block(); - if start_block > current_height { - return Ok((current_height, FilterOutput::empty())); - } - - // Note: we need to fetch the block hashes from inclusive range - // [start_block..best_block] - let end_block = current_height + 1; - - let block_hashes = self - .provider() - .canonical_hashes_range(start_block, end_block) - .map_err(|_| EthApiError::HeaderRangeNotFound(start_block.into(), end_block.into()))?; - Ok((current_height, block_hashes.into())) - } - - /// Get the changes for a filter - #[instrument(level = Level::DEBUG, skip(self))] - pub async fn filter_changes(&self, id: U64) -> Result { - let mut ref_mut = self - .filters - .get_mut(id) - .ok_or_else(|| EthFilterError::FilterNotFound(id.saturating_to::().into()))?; - let filter = ref_mut.value_mut(); - - let (polled_to_block, res) = if filter.is_block() { - self.get_block_filter_changes(filter).await? - } else { - self.get_log_filter_changes(filter).await? - }; - filter.mark_polled(polled_to_block); - - trace!(%filter, "Marked polled"); - Ok(res) - } - - /// Uninstall a filter. - pub fn uninstall_filter(&self, id: U64) -> bool { - self.filters.uninstall(id).is_some() - } -} - -impl Cfg for SignetCtx -where - Inner: Pnt, -{ - fn fill_cfg_env(&self, cfg_env: &mut CfgEnv) { - let CfgEnv { chain_id, .. } = cfg_env; - *chain_id = self.constants.ru_chain_id(); - } -} - -// Some code in this file has been copied and modified from reth -// -// The original license is included below: -// -// The MIT License (MIT) -// -// Copyright (c) 2022-2025 Reth Contributors -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -//. -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. diff --git a/crates/rpc/src/debug/endpoints.rs b/crates/rpc/src/debug/endpoints.rs index 4933705..de6aa62 100644 --- a/crates/rpc/src/debug/endpoints.rs +++ b/crates/rpc/src/debug/endpoints.rs @@ -1,171 +1,203 @@ +//! Debug namespace RPC endpoint implementations. + use crate::{ - DebugError, RpcCtx, - utils::{await_handler, response_tri}, + config::StorageRpcCtx, + debug::{ + DebugError, + types::{TraceBlockParams, TraceTransactionParams}, + }, + eth::helpers::{CfgFiller, await_handler, response_tri}, }; use ajj::{HandlerCtx, ResponsePayload}; -use alloy::{consensus::BlockHeader, eips::BlockId, primitives::B256}; -use itertools::Itertools; -use reth::rpc::{ - server_types::eth::EthApiError, - types::{ - TransactionInfo, - trace::geth::{GethDebugTracingOptions, GethTrace, TraceResult}, - }, +use alloy::{ + consensus::BlockHeader, + eips::BlockId, + rpc::types::trace::geth::{GethTrace, TraceResult}, }; -use reth_node_api::FullNodeComponents; +use itertools::Itertools; use signet_evm::EvmErrored; -use signet_node_types::Pnt; +use signet_hot::{HotKv, model::HotKvRead}; use signet_types::MagicSig; use tracing::Instrument; +use trevm::revm::database::DBErrorMarker; -/// Params for the `debug_traceBlockByNumber` and `debug_traceBlockByHash` -/// endpoints. -#[derive(Debug, serde::Deserialize)] -pub(super) struct TraceBlockParams(T, #[serde(default)] Option); - -/// Params type for `debug_traceTransaction`. -#[derive(Debug, serde::Deserialize)] -pub(super) struct TraceTransactionParams(B256, #[serde(default)] Option); - -/// `debug_traceBlockByNumber` and `debug_traceBlockByHash` endpoint handler. -pub(super) async fn trace_block( +/// `debug_traceBlockByNumber` and `debug_traceBlockByHash` handler. +pub(super) async fn trace_block( hctx: HandlerCtx, TraceBlockParams(id, opts): TraceBlockParams, - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> ResponsePayload, DebugError> where T: Into, - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let opts = response_tri!(opts.ok_or(DebugError::from(EthApiError::InvalidTracerConfig))); + let opts = response_tri!(opts.ok_or(DebugError::InvalidTracerConfig)); - let _permit = response_tri!( - ctx.acquire_tracing_permit() - .await - .map_err(|_| DebugError::rpc_error("Failed to acquire tracing permit".into())) - ); + // Acquire a tracing semaphore permit to limit concurrent debug + // requests. The permit is held for the entire handler lifetime and + // is dropped when the async block completes. + let _permit = ctx.acquire_tracing_permit().await; let id = id.into(); let span = tracing::debug_span!("traceBlock", ?id, tracer = ?opts.tracer.as_ref()); let fut = async move { - // Fetch the block by ID - let Some((hash, block)) = response_tri!(ctx.signet().raw_block(id).await) else { + let cold = ctx.cold(); + let block_num = response_tri!(ctx.resolve_block_id(id).map_err(|e| { + tracing::warn!(error = %e, ?id, "block resolution failed"); + DebugError::BlockNotFound(id) + })); + + let sealed = + response_tri!(ctx.resolve_header(BlockId::Number(block_num.into())).map_err(|e| { + tracing::warn!(error = %e, block_num, "header resolution failed"); + DebugError::BlockNotFound(id) + })); + + let Some(sealed) = sealed else { return ResponsePayload::internal_error_message( - EthApiError::HeaderNotFound(id).to_string().into(), + format!("block not found: {id}").into(), ); }; - tracing::debug!(number = block.number(), "Loaded block"); + let block_hash = sealed.hash(); + let header = sealed.into_inner(); + + let txs = response_tri!(cold.get_transactions_in_block(block_num).await.map_err(|e| { + tracing::warn!(error = %e, block_num, "cold storage read failed"); + DebugError::from(e) + })); - // Allocate space for the frames - let mut frames = Vec::with_capacity(block.transaction_count()); + tracing::debug!(number = header.number, "Loaded block"); - // Instantiate the EVM with the block - let mut trevm = response_tri!(ctx.trevm(crate::LoadState::Before, block.header())); + let mut frames = Vec::with_capacity(txs.len()); - // Apply all transactions in the block up, tracing each one - tracing::trace!(?opts, "Tracing block transactions"); + // State BEFORE this block. + let db = + response_tri!(ctx.revm_state_at_height(header.number.saturating_sub(1)).map_err(|e| { + tracing::warn!(error = %e, block_num, "hot storage read failed"); + DebugError::from(e) + })); - let mut txns = block.body().transactions().enumerate().peekable(); + let mut trevm = signet_evm::signet_evm(db, ctx.constants().clone()) + .fill_cfg(&CfgFiller(ctx.chain_id())) + .fill_block(&header); + + let mut txns = txs.iter().enumerate().peekable(); for (idx, tx) in txns .by_ref() .peeking_take_while(|(_, t)| MagicSig::try_from_signature(t.signature()).is_none()) { - let tx_info = TransactionInfo { - hash: Some(*tx.hash()), + let tx_info = alloy::rpc::types::TransactionInfo { + hash: Some(*tx.tx_hash()), index: Some(idx as u64), - block_hash: Some(hash), - block_number: Some(block.header().number()), - base_fee: block.header().base_fee_per_gas(), + block_hash: Some(block_hash), + block_number: Some(header.number), + base_fee: header.base_fee_per_gas(), }; let t = trevm.fill_tx(tx); - let frame; (frame, trevm) = response_tri!(crate::debug::tracer::trace(t, &opts, tx_info)); - frames.push(TraceResult::Success { result: frame, tx_hash: Some(*tx.hash()) }); + frames.push(TraceResult::Success { result: frame, tx_hash: Some(*tx.tx_hash()) }); - tracing::debug!(tx_index = idx, tx_hash = ?tx.hash(), "Traced transaction"); + tracing::debug!(tx_index = idx, tx_hash = ?tx.tx_hash(), "Traced transaction"); } - ResponsePayload::Success(frames) + ResponsePayload(Ok(frames)) } .instrument(span); - await_handler!(@response_option hctx.spawn_blocking(fut)) + await_handler!(@response_option hctx.spawn(fut)) } -/// Handle for `debug_traceTransaction`. -pub(super) async fn trace_transaction( +/// `debug_traceTransaction` handler. +pub(super) async fn trace_transaction( hctx: HandlerCtx, TraceTransactionParams(tx_hash, opts): TraceTransactionParams, - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> ResponsePayload where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let opts = response_tri!(opts.ok_or(DebugError::from(EthApiError::InvalidTracerConfig))); + let opts = response_tri!(opts.ok_or(DebugError::InvalidTracerConfig)); - let _permit = response_tri!( - ctx.acquire_tracing_permit() - .await - .map_err(|_| DebugError::rpc_error("Failed to acquire tracing permit".into())) - ); + // Held for the handler duration; dropped when the async block completes. + let _permit = ctx.acquire_tracing_permit().await; let span = tracing::debug_span!("traceTransaction", %tx_hash, tracer = ?opts.tracer.as_ref()); let fut = async move { - // Load the transaction by hash - let (tx, meta) = response_tri!( - response_tri!(ctx.signet().raw_transaction_by_hash(tx_hash)) - .ok_or(EthApiError::TransactionNotFound) - ); - - tracing::debug!("Loaded transaction metadata"); - - // Load the block containing the transaction - let res = response_tri!(ctx.signet().raw_block(meta.block_hash).await); - let (_, block) = - response_tri!(res.ok_or_else(|| EthApiError::HeaderNotFound(meta.block_hash.into()))); - - tracing::debug!(number = block.number(), "Loaded containing block"); - - // Load trevm at the start of the block (i.e. before any transactions are applied) - let mut trevm = response_tri!(ctx.trevm(crate::LoadState::Before, block.header())); - - // Apply all transactions in the block up to (but not including) the - // target one - let mut txns = block.body().transactions().enumerate().peekable(); - for (_idx, tx) in txns.by_ref().peeking_take_while(|(_, t)| t.hash() != tx.hash()) { + let cold = ctx.cold(); + + // Look up the transaction and its containing block. + let confirmed = response_tri!(cold.get_tx_by_hash(tx_hash).await.map_err(|e| { + tracing::warn!(error = %e, %tx_hash, "cold storage read failed"); + DebugError::from(e) + })); + + let confirmed = response_tri!(confirmed.ok_or(DebugError::TransactionNotFound)); + let (_tx, meta) = confirmed.into_parts(); + + let block_num = meta.block_number(); + let block_hash = meta.block_hash(); + + let block_id = BlockId::Number(block_num.into()); + let sealed = response_tri!(ctx.resolve_header(block_id).map_err(|e| { + tracing::warn!(error = %e, block_num, "header resolution failed"); + DebugError::BlockNotFound(block_id) + })); + let header = response_tri!(sealed.ok_or(DebugError::BlockNotFound(block_id))).into_inner(); + + let txs = response_tri!(cold.get_transactions_in_block(block_num).await.map_err(|e| { + tracing::warn!(error = %e, block_num, "cold storage read failed"); + DebugError::from(e) + })); + + tracing::debug!(number = block_num, "Loaded containing block"); + + // State BEFORE this block. + let db = + response_tri!(ctx.revm_state_at_height(block_num.saturating_sub(1)).map_err(|e| { + tracing::warn!(error = %e, block_num, "hot storage read failed"); + DebugError::from(e) + })); + + let mut trevm = signet_evm::signet_evm(db, ctx.constants().clone()) + .fill_cfg(&CfgFiller(ctx.chain_id())) + .fill_block(&header); + + // Replay all transactions up to (but not including) the target + let mut txns = txs.iter().enumerate().peekable(); + for (_idx, tx) in txns.by_ref().peeking_take_while(|(_, t)| t.tx_hash() != &tx_hash) { if MagicSig::try_from_signature(tx.signature()).is_some() { return ResponsePayload::internal_error_message( - EthApiError::TransactionNotFound.to_string().into(), + DebugError::TransactionNotFound.to_string().into(), ); } trevm = response_tri!(trevm.run_tx(tx).map_err(EvmErrored::into_error)).accept_state(); } - let (index, tx) = response_tri!(txns.next().ok_or(EthApiError::TransactionNotFound)); + let (index, tx) = response_tri!(txns.next().ok_or(DebugError::TransactionNotFound)); let trevm = trevm.fill_tx(tx); - let tx_info = TransactionInfo { - hash: Some(*tx.hash()), + let tx_info = alloy::rpc::types::TransactionInfo { + hash: Some(*tx.tx_hash()), index: Some(index as u64), - block_hash: Some(block.hash()), - block_number: Some(block.header().number()), - base_fee: block.header().base_fee_per_gas(), + block_hash: Some(block_hash), + block_number: Some(header.number), + base_fee: header.base_fee_per_gas(), }; let res = response_tri!(crate::debug::tracer::trace(trevm, &opts, tx_info)).0; - ResponsePayload::Success(res) + ResponsePayload(Ok(res)) } .instrument(span); - await_handler!(@response_option hctx.spawn_blocking(fut)) + await_handler!(@response_option hctx.spawn(fut)) } diff --git a/crates/rpc/src/debug/error.rs b/crates/rpc/src/debug/error.rs index c4fde13..6b8a8cd 100644 --- a/crates/rpc/src/debug/error.rs +++ b/crates/rpc/src/debug/error.rs @@ -1,53 +1,36 @@ -use reth::{ - providers::ProviderError, - rpc::{eth::filter::EthFilterError, server_types::eth::EthApiError}, -}; -use std::borrow::Cow; +//! Error types for the debug namespace. -/// Errors that can occur when interacting with the `eth_` namespace. -#[derive(Debug, thiserror::Error, Clone)] -pub enum DebugError { - /// Provider error: [`ProviderError`]. - #[error("Provider error: {0}")] - Provider(#[from] ProviderError), - /// Filter error [`EthFilterError`]. - #[error("Filter error: {0}")] - Filter(Cow<'static, str>), - /// Eth API error: [`EthApiError`]. - #[error("Eth API error: {0}")] - Rpc(Cow<'static, str>), -} - -impl DebugError { - /// Create a new filter error. - pub const fn filter_error(msg: Cow<'static, str>) -> Self { - Self::Filter(msg) - } +use alloy::eips::BlockId; - /// Create a new RPC error. - pub const fn rpc_error(msg: Cow<'static, str>) -> Self { - Self::Rpc(msg) - } -} - -impl From for DebugError { - fn from(err: EthFilterError) -> Self { - Self::filter_error(err.to_string().into()) - } -} - -impl From for DebugError { - fn from(err: EthApiError) -> Self { - Self::rpc_error(err.to_string().into()) - } -} - -impl DebugError { - /// Turn into a string by value, allows for `.map_err(EthError::to_string)` - /// to be used. - pub fn into_string(self) -> String { - ToString::to_string(&self) - } +/// Errors that can occur in the `debug` namespace. +/// +/// The [`serde::Serialize`] impl emits sanitized messages suitable for +/// API responses — internal storage details are not exposed to callers. +/// Use [`tracing`] to log the full error chain before constructing the +/// variant. +#[derive(Debug, thiserror::Error)] +pub enum DebugError { + /// Cold storage error. + #[error("cold storage error")] + Cold(#[from] signet_cold::ColdStorageError), + /// Hot storage error. + #[error("hot storage error")] + Hot(#[from] signet_storage::StorageError), + /// Invalid tracer configuration. + #[error("invalid tracer config")] + InvalidTracerConfig, + /// Unsupported tracer type. + #[error("unsupported: {0}")] + Unsupported(&'static str), + /// EVM execution error. + #[error("evm execution error")] + Evm(String), + /// Block not found. + #[error("block not found: {0}")] + BlockNotFound(BlockId), + /// Transaction not found. + #[error("transaction not found")] + TransactionNotFound, } impl serde::Serialize for DebugError { diff --git a/crates/rpc/src/debug/mod.rs b/crates/rpc/src/debug/mod.rs index 5a8648e..11e373d 100644 --- a/crates/rpc/src/debug/mod.rs +++ b/crates/rpc/src/debug/mod.rs @@ -1,24 +1,25 @@ -mod endpoints; -use endpoints::*; +//! Debug namespace RPC router backed by storage. +mod endpoints; +use endpoints::{trace_block, trace_transaction}; mod error; pub use error::DebugError; +pub(crate) mod tracer; +mod types; -mod tracer; - -use crate::ctx::RpcCtx; +use crate::config::StorageRpcCtx; use alloy::{eips::BlockNumberOrTag, primitives::B256}; -use reth_node_api::FullNodeComponents; -use signet_node_types::Pnt; +use signet_hot::{HotKv, model::HotKvRead}; +use trevm::revm::database::DBErrorMarker; -/// Instantiate a `debug` API router. -pub fn debug() -> ajj::Router> +/// Instantiate a `debug` API router backed by storage. +pub(crate) fn debug() -> ajj::Router> where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { ajj::Router::new() - .route("traceBlockByNumber", trace_block::) - .route("traceBlockByHash", trace_block::) - .route("traceTransaction", trace_transaction) + .route("traceBlockByNumber", trace_block::) + .route("traceBlockByHash", trace_block::) + .route("traceTransaction", trace_transaction::) } diff --git a/crates/rpc/src/debug/tracer.rs b/crates/rpc/src/debug/tracer.rs index 36befa4..fa24073 100644 --- a/crates/rpc/src/debug/tracer.rs +++ b/crates/rpc/src/debug/tracer.rs @@ -1,16 +1,13 @@ -//! This file is largely adapted from reth: `crates/rpc/rpc/src/debug.rs` +//! Core tracing logic for the debug namespace. //! -//! In particular the `debug_trace_call` function. - -use crate::DebugError; -use reth::rpc::{ - server_types::eth::EthApiError, - types::{ - TransactionInfo, - trace::geth::{ - FourByteFrame, GethDebugBuiltInTracerType, GethDebugTracerConfig, GethDebugTracerType, - GethDebugTracingOptions, GethTrace, NoopFrame, - }, +//! Largely adapted from reth: `crates/rpc/rpc/src/debug.rs`. + +use crate::debug::DebugError; +use alloy::rpc::types::{ + TransactionInfo, + trace::geth::{ + FourByteFrame, GethDebugBuiltInTracerType, GethDebugTracerConfig, GethDebugTracerType, + GethDebugTracingOptions, GethTrace, NoopFrame, }, }; use revm_inspectors::tracing::{ @@ -34,83 +31,66 @@ where Db: Database + DatabaseCommit + DatabaseRef, Insp: Inspector>, { - let Some(tracer) = &config.tracer else { return Err(EthApiError::InvalidTracerConfig.into()) }; + let Some(tracer) = &config.tracer else { + return Err(DebugError::InvalidTracerConfig); + }; let GethDebugTracerType::BuiltInTracer(built_in) = tracer else { - return Err(EthApiError::Unsupported("JS tracer").into()); + return Err(DebugError::Unsupported("JS tracer")); }; match built_in { - GethDebugBuiltInTracerType::Erc7562Tracer => trace_erc7562(trevm).map_err(Into::into), - GethDebugBuiltInTracerType::FourByteTracer => trace_four_byte(trevm).map_err(Into::into), - GethDebugBuiltInTracerType::CallTracer => { - trace_call(&config.tracer_config, trevm).map_err(Into::into) + GethDebugBuiltInTracerType::Erc7562Tracer => { + Err(DebugError::Unsupported("ERC-7562 tracing is not yet implemented")) } + GethDebugBuiltInTracerType::FourByteTracer => trace_four_byte(trevm), + GethDebugBuiltInTracerType::CallTracer => trace_call(&config.tracer_config, trevm), GethDebugBuiltInTracerType::FlatCallTracer => { - trace_flat_call(&config.tracer_config, trevm, tx_info).map_err(Into::into) - } - GethDebugBuiltInTracerType::PreStateTracer => { - trace_pre_state(&config.tracer_config, trevm).map_err(Into::into) + trace_flat_call(&config.tracer_config, trevm, tx_info) } + GethDebugBuiltInTracerType::PreStateTracer => trace_pre_state(&config.tracer_config, trevm), GethDebugBuiltInTracerType::NoopTracer => Ok(( NoopFrame::default().into(), trevm .run() - .map_err(|err| EthApiError::EvmCustom(err.into_error().to_string()))? + .map_err(|err| DebugError::Evm(err.into_error().to_string()))? .accept_state(), )), - GethDebugBuiltInTracerType::MuxTracer => { - trace_mux(&config.tracer_config, trevm, tx_info).map_err(Into::into) - } + GethDebugBuiltInTracerType::MuxTracer => trace_mux(&config.tracer_config, trevm, tx_info), } } -fn trace_erc7562( - _trevm: EvmReady, -) -> Result<(GethTrace, EvmNeedsTx), EthApiError> -where - Db: Database + DatabaseCommit, - Insp: Inspector>, -{ - // ERC-7562 tracing is not yet implemented. - Err(EthApiError::Unsupported("ERC-7562 tracing is not yet implemented")) -} - -/// Traces a call using [`GethDebugBuiltInTracerType::FourByteTracer`]. fn trace_four_byte( trevm: EvmReady, -) -> Result<(GethTrace, EvmNeedsTx), EthApiError> +) -> Result<(GethTrace, EvmNeedsTx), DebugError> where Db: Database + DatabaseCommit, Insp: Inspector>, { let mut four_byte = FourByteInspector::default(); - - let trevm = trevm.try_with_inspector(&mut four_byte, |trevm| trevm.run()); - - let trevm = trevm.map_err(|e| EthApiError::EvmCustom(e.into_error().to_string()))?; - + let trevm = trevm + .try_with_inspector(&mut four_byte, |trevm| trevm.run()) + .map_err(|e| DebugError::Evm(e.into_error().to_string()))?; Ok((FourByteFrame::from(four_byte).into(), trevm.accept_state())) } -/// Traces a call using [`GethDebugBuiltInTracerType::CallTracer`]. fn trace_call( tracer_config: &GethDebugTracerConfig, trevm: EvmReady, -) -> Result<(GethTrace, EvmNeedsTx), EthApiError> +) -> Result<(GethTrace, EvmNeedsTx), DebugError> where Db: Database + DatabaseCommit, Insp: Inspector>, { let call_config = - tracer_config.clone().into_call_config().map_err(|_| EthApiError::InvalidTracerConfig)?; + tracer_config.clone().into_call_config().map_err(|_| DebugError::InvalidTracerConfig)?; let mut inspector = TracingInspector::new(TracingInspectorConfig::from_geth_call_config(&call_config)); - let trevm = trevm.try_with_inspector(&mut inspector, |trevm| trevm.run()); - - let trevm = trevm.map_err(|e| EthApiError::EvmCustom(e.into_error().to_string()))?; + let trevm = trevm + .try_with_inspector(&mut inspector, |trevm| trevm.run()) + .map_err(|e| DebugError::Evm(e.into_error().to_string()))?; let frame = inspector .with_transaction_gas_limit(trevm.gas_limit()) @@ -120,11 +100,10 @@ where Ok((frame.into(), trevm.accept_state())) } -/// Traces a call using [`GethDebugBuiltInTracerType::PreStateTracer`] fn trace_pre_state( tracer_config: &GethDebugTracerConfig, trevm: EvmReady, -) -> Result<(GethTrace, EvmNeedsTx), EthApiError> +) -> Result<(GethTrace, EvmNeedsTx), DebugError> where Db: Database + DatabaseCommit + DatabaseRef, Insp: Inspector>, @@ -132,28 +111,26 @@ where let prestate_config = tracer_config .clone() .into_pre_state_config() - .map_err(|_| EthApiError::InvalidTracerConfig)?; + .map_err(|_| DebugError::InvalidTracerConfig)?; let mut inspector = TracingInspector::new(TracingInspectorConfig::from_geth_prestate_config(&prestate_config)); - let trevm = trevm.try_with_inspector(&mut inspector, |trevm| trevm.run()); - - let trevm = trevm.map_err(|e| EthApiError::EvmCustom(e.into_error().to_string()))?; + let trevm = trevm + .try_with_inspector(&mut inspector, |trevm| trevm.run()) + .map_err(|e| DebugError::Evm(e.into_error().to_string()))?; let gas_limit = trevm.gas_limit(); - // NB: Normally we would call `trevm.accept_state()` here, but we need the - // state after execution to be UNCOMMITED when we compute the prestate - // diffs. + // NB: state must be UNCOMMITTED for prestate diff computation. let (result, mut trevm) = trevm.take_result_and_state(); let frame = inspector .with_transaction_gas_limit(gas_limit) .into_geth_builder() .geth_prestate_traces(&result, &prestate_config, trevm.inner_mut_unchecked().db_mut()) - .map_err(|err| EthApiError::EvmCustom(err.to_string()))?; + .map_err(|err| DebugError::Evm(err.to_string()))?; - // This is equivalent to calling `trevm.accept_state()`. + // Equivalent to `trevm.accept_state()`. trevm.inner_mut_unchecked().db_mut().commit(result.state); Ok((frame.into(), trevm)) @@ -163,7 +140,7 @@ fn trace_flat_call( tracer_config: &GethDebugTracerConfig, trevm: EvmReady, tx_info: TransactionInfo, -) -> Result<(GethTrace, EvmNeedsTx), EthApiError> +) -> Result<(GethTrace, EvmNeedsTx), DebugError> where Db: Database + DatabaseCommit, Insp: Inspector>, @@ -171,14 +148,14 @@ where let flat_call_config = tracer_config .clone() .into_flat_call_config() - .map_err(|_| EthApiError::InvalidTracerConfig)?; + .map_err(|_| DebugError::InvalidTracerConfig)?; let mut inspector = TracingInspector::new(TracingInspectorConfig::from_flat_call_config(&flat_call_config)); - let trevm = trevm.try_with_inspector(&mut inspector, |trevm| trevm.run()); - - let trevm = trevm.map_err(|e| EthApiError::EvmCustom(e.into_error().to_string()))?; + let trevm = trevm + .try_with_inspector(&mut inspector, |trevm| trevm.run()) + .map_err(|e| DebugError::Evm(e.into_error().to_string()))?; let frame = inspector .with_transaction_gas_limit(trevm.gas_limit()) @@ -192,30 +169,29 @@ fn trace_mux( tracer_config: &GethDebugTracerConfig, trevm: EvmReady, tx_info: TransactionInfo, -) -> Result<(GethTrace, EvmNeedsTx), EthApiError> +) -> Result<(GethTrace, EvmNeedsTx), DebugError> where Db: Database + DatabaseCommit + DatabaseRef, Insp: Inspector>, { let mux_config = - tracer_config.clone().into_mux_config().map_err(|_| EthApiError::InvalidTracerConfig)?; + tracer_config.clone().into_mux_config().map_err(|_| DebugError::InvalidTracerConfig)?; let mut inspector = MuxInspector::try_from_config(mux_config) - .map_err(|err| EthApiError::EvmCustom(err.to_string()))?; + .map_err(|err| DebugError::Evm(err.to_string()))?; - let trevm = trevm.try_with_inspector(&mut inspector, |trevm| trevm.run()); - let trevm = trevm.map_err(|e| EthApiError::EvmCustom(e.into_error().to_string()))?; + let trevm = trevm + .try_with_inspector(&mut inspector, |trevm| trevm.run()) + .map_err(|e| DebugError::Evm(e.into_error().to_string()))?; - // NB: Normally we would call `trevm.accept_state()` here, but we need the - // state after execution to be UNCOMMITED when we compute the prestate - // diffs. + // NB: state must be UNCOMMITTED for prestate diff computation. let (result, mut trevm) = trevm.take_result_and_state(); let frame = inspector .try_into_mux_frame(&result, trevm.inner_mut_unchecked().db_mut(), tx_info) - .map_err(|err| EthApiError::EvmCustom(err.to_string()))?; + .map_err(|err| DebugError::Evm(err.to_string()))?; - // This is equivalent to calling `trevm.accept_state()`. + // Equivalent to `trevm.accept_state()`. trevm.inner_mut_unchecked().db_mut().commit(result.state); Ok((frame.into(), trevm)) @@ -235,7 +211,7 @@ where // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: -//. +// // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // diff --git a/crates/rpc/src/debug/types.rs b/crates/rpc/src/debug/types.rs new file mode 100644 index 0000000..d401489 --- /dev/null +++ b/crates/rpc/src/debug/types.rs @@ -0,0 +1,17 @@ +//! Parameter types for debug namespace RPC endpoints. + +use alloy::{primitives::B256, rpc::types::trace::geth::GethDebugTracingOptions}; + +/// Params for `debug_traceBlockByNumber` and `debug_traceBlockByHash`. +#[derive(Debug, serde::Deserialize)] +pub(crate) struct TraceBlockParams( + pub(crate) T, + #[serde(default)] pub(crate) Option, +); + +/// Params for `debug_traceTransaction`. +#[derive(Debug, serde::Deserialize)] +pub(crate) struct TraceTransactionParams( + pub(crate) B256, + #[serde(default)] pub(crate) Option, +); diff --git a/crates/rpc/src/eth/endpoints.rs b/crates/rpc/src/eth/endpoints.rs index 23f099f..e54fd9d 100644 --- a/crates/rpc/src/eth/endpoints.rs +++ b/crates/rpc/src/eth/endpoints.rs @@ -1,504 +1,784 @@ +//! ETH namespace RPC endpoint implementations. + use crate::{ - ctx::RpcCtx, - eth::{CallErrorData, EthError}, + config::{EvmBlockContext, StorageRpcCtx, gas_oracle}, + eth::{ + error::{CallErrorData, EthError}, + helpers::{ + AddrWithBlock, BlockParams, CfgFiller, FeeHistoryArgs, StorageAtArgs, SubscribeArgs, + TxParams, await_handler, build_receipt, build_rpc_transaction, hot_reader_at_block, + normalize_gas_stateless, response_tri, + }, + types::{ + BlockTransactions, EmptyArray, LazyReceipts, RpcBlock, RpcHeader, RpcReceipt, + RpcTransaction, + }, + }, interest::{FilterOutput, InterestKind}, - receipts::build_signet_receipt, - utils::{await_handler, response_tri}, }; use ajj::{HandlerCtx, ResponsePayload}; use alloy::{ - consensus::{BlockHeader, TxEnvelope}, + consensus::Transaction, eips::{ BlockId, BlockNumberOrTag, + eip1559::BaseFeeParams, eip2718::{Decodable2718, Encodable2718}, + eip2930::AccessListResult, }, - network::Ethereum, - primitives::{Address, B256, U64, U256}, - rpc::types::{ - BlockOverrides, Filter, TransactionRequest, pubsub::SubscriptionKind, state::StateOverride, - }, + primitives::{B256, U64, U256}, + rpc::types::{FeeHistory, Filter, Log}, }; -use reth::{ - network::NetworkInfo, - primitives::TransactionMeta, - providers::{BlockNumReader, StateProviderFactory, TransactionsProvider}, -}; -use reth_node_api::FullNodeComponents; -use reth_rpc_eth_api::{RpcBlock, RpcHeader, RpcReceipt, RpcTransaction}; -use serde::Deserialize; -use signet_evm::EvmErrored; -use signet_node_types::Pnt; -use std::borrow::Cow; +use revm_inspectors::access_list::AccessListInspector; +use serde::Serialize; +use signet_cold::{HeaderSpecifier, ReceiptSpecifier}; +use signet_hot::{HistoryRead, HotKv, db::HotDbRead, model::HotKvRead}; use tracing::{Instrument, debug, trace_span}; -use trevm::{EstimationResult, MIN_TRANSACTION_GAS, revm::context::result::ExecutionResult}; - -/// Args for `eth_estimateGas` and `eth_call`. -#[derive(Debug, Deserialize)] -pub(super) struct TxParams( - TransactionRequest, - #[serde(default)] Option, - #[serde(default)] Option, - #[serde(default)] Option>, -); - -/// Args for `eth_getBlockByHash` and `eth_getBlockByNumber`. -#[derive(Debug, Deserialize)] -pub(super) struct BlockParams(T, #[serde(default)] Option); - -/// Args for `eth_feeHistory`. -#[derive(Debug, Deserialize)] -pub(super) struct FeeHistoryArgs(U64, BlockNumberOrTag, #[serde(default)] Option>); - -/// Args for `eth_getStorageAt`. -#[derive(Debug, Deserialize)] -pub(super) struct StorageAtArgs(Address, U256, #[serde(default)] Option); - -/// Args for `eth_getBalance`, `eth_getTransactionCount`, and `eth_getCode`. -#[derive(Debug, Deserialize)] -pub(super) struct AddrWithBlock(Address, #[serde(default)] Option); - -/// Args for `eth_subscribe`. -#[derive(Debug, Deserialize)] -pub struct SubscribeArgs(pub SubscriptionKind, #[serde(default)] pub Option>); - -impl TryFrom for InterestKind { - type Error = String; - - fn try_from(args: SubscribeArgs) -> Result { - match args.0 { - SubscriptionKind::Logs => { - if let Some(filter) = args.1 { - Ok(InterestKind::Log(filter)) - } else { - Err("missing filter for Logs subscription".to_string()) - } - } - SubscriptionKind::NewHeads => { - if args.1.is_some() { - Err("filter not supported for NewHeads subscription".to_string()) - } else { - Ok(InterestKind::Block) - } - } +use trevm::{ + EstimationResult, revm::context::result::ExecutionResult, revm::database::DBErrorMarker, +}; - _ => Err(format!("unsupported subscription kind: {:?}", args.0)), - } +/// Response for `eth_syncing`. +/// +/// Returns `false` when the node is fully synced, or a sync-status +/// object when it is still catching up. +#[derive(Debug, Clone, Serialize)] +#[serde(untagged)] +pub(crate) enum SyncingResponse { + /// Node is fully synced. + NotSyncing(bool), + /// Node is still syncing. + Syncing { + /// Block number the node started syncing from. + starting_block: U64, + /// Current block the node has synced to. + current_block: U64, + /// Highest known block number on the network. + highest_block: U64, + }, +} + +/// `eth_syncing` — returns sync status or `false` when fully synced. +pub(crate) async fn syncing(ctx: StorageRpcCtx) -> Result { + match ctx.tags().sync_status() { + Some(status) => Ok(SyncingResponse::Syncing { + starting_block: U64::from(status.starting_block), + current_block: U64::from(status.current_block), + highest_block: U64::from(status.highest_block), + }), + None => Ok(SyncingResponse::NotSyncing(false)), } } -pub(super) async fn not_supported() -> ResponsePayload<(), ()> { - ResponsePayload::internal_error_message(Cow::Borrowed( - "Method not supported. See signet documentation for a list of unsupported methods: https://signet.sh/docs", - )) +/// Uncle count is always zero — Signet has no uncle blocks. +pub(crate) async fn uncle_count() -> Result { + Ok(U64::ZERO) } -pub(super) async fn protocol_version(ctx: RpcCtx) -> Result -where - Host: FullNodeComponents, - Signet: Pnt, -{ - ctx.host() - .network() - .network_status() - .await - .map(|info| info.protocol_version) - .map(U64::from) - .map_err(|s| s.to_string()) +/// Uncle block is always absent — Signet has no uncle blocks. +pub(crate) async fn uncle_block() -> Result, ()> { + Ok(None) +} + +// --------------------------------------------------------------------------- +// Simple Queries +// --------------------------------------------------------------------------- + +/// `eth_blockNumber` — returns the latest block number from block tags. +pub(crate) async fn block_number(ctx: StorageRpcCtx) -> Result { + Ok(U64::from(ctx.tags().latest())) +} + +/// `eth_chainId` — returns the configured chain ID. +pub(crate) async fn chain_id(ctx: StorageRpcCtx) -> Result { + Ok(U64::from(ctx.chain_id())) } -pub(super) async fn syncing(ctx: RpcCtx) -> Result +// --------------------------------------------------------------------------- +// Gas & Fee Queries +// --------------------------------------------------------------------------- + +/// `eth_gasPrice` — suggests gas price based on recent block tips + base fee. +pub(crate) async fn gas_price(hctx: HandlerCtx, ctx: StorageRpcCtx) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - Ok(ctx.host().network().is_syncing()) + let task = async move { + let latest = ctx.tags().latest(); + let cold = ctx.cold(); + + let tip = gas_oracle::suggest_tip_cap(&cold, latest, ctx.config()) + .await + .map_err(|e| e.to_string())?; + + let base_fee = cold + .get_header_by_number(latest) + .await + .map_err(|e| e.to_string())? + .and_then(|h| h.base_fee_per_gas) + .unwrap_or_default(); + + Ok(tip + U256::from(base_fee)) + }; + + await_handler!(@option hctx.spawn(task)) } -pub(super) async fn block_number(ctx: RpcCtx) -> Result +/// `eth_maxPriorityFeePerGas` — suggests priority fee from recent block tips. +pub(crate) async fn max_priority_fee_per_gas( + hctx: HandlerCtx, + ctx: StorageRpcCtx, +) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - ctx.signet().provider().last_block_number().map(U64::from).map_err(|s| s.to_string()) + let task = async move { + let latest = ctx.tags().latest(); + gas_oracle::suggest_tip_cap(&ctx.cold(), latest, ctx.config()) + .await + .map_err(|e| e.to_string()) + }; + + await_handler!(@option hctx.spawn(task)) } -pub(super) async fn chain_id(ctx: RpcCtx) -> Result +/// `eth_feeHistory` — returns base fee and reward percentile data. +pub(crate) async fn fee_history( + hctx: HandlerCtx, + FeeHistoryArgs(block_count, newest, reward_percentiles): FeeHistoryArgs, + ctx: StorageRpcCtx, +) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - Ok(U64::from(ctx.signet().constants().ru_chain_id())) + let task = async move { + let mut block_count = block_count.to::(); + + if block_count == 0 { + return Ok(FeeHistory::default()); + } + + let max_fee_history = if reward_percentiles.is_none() { + ctx.config().max_header_history + } else { + ctx.config().max_block_history + }; + + block_count = block_count.min(max_fee_history); + + let newest = if newest.is_pending() { + block_count = block_count.saturating_sub(1); + BlockNumberOrTag::Latest + } else { + newest + }; + + let end_block = ctx.resolve_block_tag(newest); + let end_block_plus = end_block + 1; + + block_count = block_count.min(end_block_plus); + + // Validate percentiles + if let Some(percentiles) = &reward_percentiles + && percentiles.windows(2).any(|w| w[0] > w[1] || w[0] > 100.) + { + return Err("invalid reward percentiles".to_string()); + } + + let start_block = end_block_plus - block_count; + let cold = ctx.cold(); + + let specs: Vec<_> = (start_block..=end_block).map(HeaderSpecifier::Number).collect(); + let headers = cold.get_headers(specs).await.map_err(|e| e.to_string())?; + + let mut base_fee_per_gas: Vec = Vec::with_capacity(headers.len() + 1); + let mut gas_used_ratio: Vec = Vec::with_capacity(headers.len()); + let mut rewards: Vec> = Vec::new(); + + for (offset, maybe_header) in headers.iter().enumerate() { + let Some(header) = maybe_header else { + return Err(format!("missing header at block {}", start_block + offset as u64)); + }; + + base_fee_per_gas.push(header.base_fee_per_gas.unwrap_or_default() as u128); + gas_used_ratio.push(if header.gas_limit > 0 { + header.gas_used as f64 / header.gas_limit as f64 + } else { + 0.0 + }); + + if let Some(percentiles) = &reward_percentiles { + let block_num = start_block + offset as u64; + + let (txs, receipts) = tokio::try_join!( + cold.get_transactions_in_block(block_num), + cold.get_receipts_in_block(block_num), + ) + .map_err(|e| e.to_string())?; + + let block_rewards = calculate_reward_percentiles( + percentiles, + header.gas_used, + header.base_fee_per_gas.unwrap_or_default(), + &txs, + &receipts, + ); + rewards.push(block_rewards); + } + } + + // Next block base fee + if let Some(last_header) = headers.last().and_then(|h| h.as_ref()) { + base_fee_per_gas.push( + last_header.next_block_base_fee(BaseFeeParams::ethereum()).unwrap_or_default() + as u128, + ); + } + + // NB: Signet has no blob transactions; zero-filled for client compatibility. + let base_fee_per_blob_gas = vec![0; base_fee_per_gas.len()]; + let blob_gas_used_ratio = vec![0.; gas_used_ratio.len()]; + + Ok(FeeHistory { + base_fee_per_gas, + gas_used_ratio, + base_fee_per_blob_gas, + blob_gas_used_ratio, + oldest_block: start_block, + reward: reward_percentiles.map(|_| rewards), + }) + }; + + await_handler!(@option hctx.spawn(task)) } -pub(super) async fn block( +/// Calculate reward percentiles for a single block. +/// +/// Sorts transactions by effective tip ascending, then walks +/// cumulative gas used to find the tip value at each percentile. +fn calculate_reward_percentiles( + percentiles: &[f64], + gas_used: u64, + base_fee: u64, + txs: &[signet_storage_types::RecoveredTx], + receipts: &[signet_cold::ColdReceipt], +) -> Vec { + if gas_used == 0 || txs.is_empty() { + return vec![0; percentiles.len()]; + } + + // Pair each tx's effective tip with its gas used. + let mut tx_gas_and_tip: Vec<(u64, u128)> = txs + .iter() + .zip(receipts.iter()) + .map(|(tx, receipt)| { + let tip = tx.effective_tip_per_gas(base_fee).unwrap_or_default(); + (receipt.gas_used, tip) + }) + .collect(); + + // Sort by tip ascending + tx_gas_and_tip.sort_by_key(|&(_, tip)| tip); + + let mut result = Vec::with_capacity(percentiles.len()); + let mut cumulative_gas: u64 = 0; + let mut tx_idx = 0; + + for &percentile in percentiles { + let threshold = (gas_used as f64 * percentile / 100.0) as u64; + + while tx_idx < tx_gas_and_tip.len() - 1 { + cumulative_gas += tx_gas_and_tip[tx_idx].0; + if cumulative_gas >= threshold { + break; + } + tx_idx += 1; + } + + result.push(tx_gas_and_tip[tx_idx].1); + } + + result +} + +// --------------------------------------------------------------------------- +// Block Queries +// --------------------------------------------------------------------------- + +/// `eth_getBlockByHash` / `eth_getBlockByNumber` — resolve block, fetch +/// header + transactions from cold storage, assemble RPC block response. +pub(crate) async fn block( hctx: HandlerCtx, BlockParams(t, full): BlockParams, - ctx: RpcCtx, -) -> Result>, String> + ctx: StorageRpcCtx, +) -> Result, String> where T: Into, - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { let id = t.into(); - let task = async move { ctx.signet().block(id, full).await.map_err(|e| e.to_string()) }; + let full = full.unwrap_or(false); - await_handler!(@option hctx.spawn_blocking(task)) + let task = async move { + let cold = ctx.cold(); + let block_num = ctx.resolve_block_id(id).map_err(|e| e.to_string())?; + + let (header, txs) = tokio::try_join!( + cold.get_header_by_number(block_num), + cold.get_transactions_in_block(block_num), + ) + .map_err(|e| e.to_string())?; + + let Some(header) = header else { + return Ok(None); + }; + + let block_hash = header.hash(); + let base_fee = header.base_fee_per_gas; + + let transactions = if full { + BlockTransactions::Full { txs, block_num, block_hash, base_fee } + } else { + BlockTransactions::Hashes(txs) + }; + + Ok(Some(RpcBlock { + header: alloy::rpc::types::Header { + inner: header.into_inner(), + hash: block_hash, + total_difficulty: None, + size: None, + }, + transactions, + uncles: EmptyArray, + })) + }; + + await_handler!(@option hctx.spawn(task)) } -pub(super) async fn block_tx_count( +/// `eth_getBlockTransactionCount*` — transaction count in a block. +pub(crate) async fn block_tx_count( hctx: HandlerCtx, (t,): (T,), - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> Result, String> where T: Into, - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { let id = t.into(); - let task = async move { ctx.signet().tx_count(id).await.map_err(|e| e.to_string()) }; - await_handler!(@option hctx.spawn_blocking(task)) + let task = async move { + let cold = ctx.cold(); + let block_num = ctx.resolve_block_id(id).map_err(|e| e.to_string())?; + + cold.get_transaction_count(block_num) + .await + .map(|c| Some(U64::from(c))) + .map_err(|e| e.to_string()) + }; + + await_handler!(@option hctx.spawn(task)) } -pub(super) async fn block_receipts( +/// `eth_getBlockReceipts` — all receipts in a block. +pub(crate) async fn block_receipts( hctx: HandlerCtx, (id,): (BlockId,), - ctx: RpcCtx, -) -> Result>>, String> + ctx: StorageRpcCtx, +) -> Result, String> where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { let task = async move { - let Some(receipts) = ctx.signet().raw_receipts(id).await.map_err(|e| e.to_string())? else { - return Ok(None); - }; + let cold = ctx.cold(); + let block_num = ctx.resolve_block_id(id).map_err(|e| e.to_string())?; - let Some((block_hash, block)) = - ctx.signet().raw_block(id).await.map_err(|e| e.to_string())? - else { + let (header, txs, receipts) = tokio::try_join!( + cold.get_header_by_number(block_num), + cold.get_transactions_in_block(block_num), + cold.get_receipts_in_block(block_num), + ) + .map_err(|e| e.to_string())?; + + let Some(header) = header else { return Ok(None); }; - let header = block.header(); - let block_number = header.number; let base_fee = header.base_fee_per_gas; - let excess_blob_gas = None; - let timestamp = header.timestamp; - - block - .body() - .transactions() - .zip(receipts.iter()) - .enumerate() - .map(|(idx, (tx, receipt))| { - let meta = TransactionMeta { - tx_hash: *tx.hash(), - index: idx as u64, - block_hash, - block_number, - base_fee, - excess_blob_gas, - timestamp, - }; - build_signet_receipt(tx.to_owned(), meta, receipt.to_owned(), receipts.to_vec()) + + Ok(Some(LazyReceipts { txs, receipts, base_fee })) + }; + + await_handler!(@option hctx.spawn(task)) +} + +/// `eth_getBlockHeaderByHash` / `eth_getBlockHeaderByNumber`. +pub(crate) async fn header_by( + hctx: HandlerCtx, + (t,): (T,), + ctx: StorageRpcCtx, +) -> Result, String> +where + T: Into, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, +{ + let id = t.into(); + + let task = async move { + ctx.resolve_header(id) + .map(|opt| { + opt.map(|sh| { + let hash = sh.hash(); + alloy::rpc::types::Header { + inner: sh.into_inner(), + hash, + total_difficulty: None, + size: None, + } + }) }) - .collect::, _>>() - .map(Some) .map_err(|e| e.to_string()) }; await_handler!(@option hctx.spawn_blocking(task)) } -pub(super) async fn raw_transaction_by_hash( +// --------------------------------------------------------------------------- +// Transaction Queries +// --------------------------------------------------------------------------- + +/// `eth_getTransactionByHash` — look up transaction by hash from cold storage. +pub(crate) async fn transaction_by_hash( hctx: HandlerCtx, (hash,): (B256,), - ctx: RpcCtx, -) -> Result, String> + ctx: StorageRpcCtx, +) -> Result, String> where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { let task = async move { - ctx.signet() - .provider() - .transaction_by_hash(hash) - .map_err(|e| e.to_string()) - .map(|tx| tx.as_ref().map(Encodable2718::encoded_2718).map(Into::into)) + let cold = ctx.cold(); + let Some(confirmed) = cold.get_tx_by_hash(hash).await.map_err(|e| e.to_string())? else { + return Ok(None); + }; + + let (tx, meta) = confirmed.into_parts(); + + // Fetch header for base_fee + let header = + cold.get_header_by_number(meta.block_number()).await.map_err(|e| e.to_string())?; + let base_fee = header.and_then(|h| h.base_fee_per_gas); + + Ok(Some(build_rpc_transaction(&tx, &meta, base_fee))) }; - await_handler!(@option hctx.spawn_blocking(task)) + await_handler!(@option hctx.spawn(task)) } -pub(super) async fn transaction_by_hash( +/// `eth_getRawTransactionByHash` — RLP-encoded transaction bytes. +pub(crate) async fn raw_transaction_by_hash( hctx: HandlerCtx, (hash,): (B256,), - ctx: RpcCtx, -) -> Result>, String> + ctx: StorageRpcCtx, +) -> Result, String> where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let task = async move { ctx.signet().rpc_transaction_by_hash(hash).map_err(|e| e.to_string()) }; + let task = async move { + ctx.cold() + .get_tx_by_hash(hash) + .await + .map(|opt| opt.map(|c| c.into_inner().encoded_2718().into())) + .map_err(|e| e.to_string()) + }; - await_handler!(@option hctx.spawn_blocking(task)) + await_handler!(@option hctx.spawn(task)) } -pub(super) async fn raw_transaction_by_block_and_index( +/// `eth_getTransactionByBlock*AndIndex` — transaction by position in block. +pub(crate) async fn transaction_by_block_and_index( hctx: HandlerCtx, (t, index): (T, U64), - ctx: RpcCtx, -) -> Result, String> + ctx: StorageRpcCtx, +) -> Result, String> where T: Into, - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let id: BlockId = t.into(); + let id = t.into(); + let task = async move { - let Some((_, block)) = ctx.signet().raw_block(id).await.map_err(|e| e.to_string())? else { + let cold = ctx.cold(); + let block_num = ctx.resolve_block_id(id).map_err(|e| e.to_string())?; + + let Some(confirmed) = cold + .get_tx_by_block_and_index(block_num, index.to::()) + .await + .map_err(|e| e.to_string())? + else { return Ok(None); }; - Ok(block.body().transactions.get(index.to::()).map(|tx| tx.encoded_2718().into())) + let (tx, meta) = confirmed.into_parts(); + let header = + cold.get_header_by_number(meta.block_number()).await.map_err(|e| e.to_string())?; + let base_fee = header.and_then(|h| h.base_fee_per_gas); + + Ok(Some(build_rpc_transaction(&tx, &meta, base_fee))) }; - await_handler!(@option hctx.spawn_blocking(task)) + await_handler!(@option hctx.spawn(task)) } -pub(super) async fn transaction_by_block_and_index( +/// `eth_getRawTransactionByBlock*AndIndex` — raw RLP bytes by position. +pub(crate) async fn raw_transaction_by_block_and_index( hctx: HandlerCtx, (t, index): (T, U64), - ctx: RpcCtx, -) -> Result>, String> + ctx: StorageRpcCtx, +) -> Result, String> where T: Into, - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { let id = t.into(); let task = async move { - ctx.signet() - .rpc_transaction_by_block_idx(id, index.to::()) + let cold = ctx.cold(); + let block_num = ctx.resolve_block_id(id).map_err(|e| e.to_string())?; + + cold.get_tx_by_block_and_index(block_num, index.to::()) .await + .map(|opt| opt.map(|c| c.into_inner().encoded_2718().into())) .map_err(|e| e.to_string()) }; - await_handler!(@option hctx.spawn_blocking(task)) + await_handler!(@option hctx.spawn(task)) } -pub(super) async fn transaction_receipt( +/// `eth_getTransactionReceipt` — receipt by tx hash. Fetches the receipt, +/// then the associated transaction and header for derived fields. +pub(crate) async fn transaction_receipt( hctx: HandlerCtx, (hash,): (B256,), - ctx: RpcCtx, -) -> Result>, String> + ctx: StorageRpcCtx, +) -> Result, String> where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let task = - async move { ctx.signet().rpc_receipt_by_hash(hash).await.map_err(|e| e.to_string()) }; + let task = async move { + let cold = ctx.cold(); - await_handler!(@option hctx.spawn_blocking(task)) + let Some(cr) = + cold.get_receipt(ReceiptSpecifier::TxHash(hash)).await.map_err(|e| e.to_string())? + else { + return Ok(None); + }; + + let (tx, header) = tokio::try_join!( + cold.get_tx_by_hash(cr.tx_hash), + cold.get_header_by_number(cr.block_number), + ) + .map_err(|e| e.to_string())?; + + let tx = tx.ok_or(EthError::TransactionMissing).map_err(|e| e.to_string())?.into_inner(); + let base_fee = header.and_then(|h| h.base_fee_per_gas); + + Ok(Some(build_receipt(&cr, &tx, base_fee))) + }; + + await_handler!(@option hctx.spawn(task)) } -pub(super) async fn balance( +// --------------------------------------------------------------------------- +// Account State (Hot Storage) +// --------------------------------------------------------------------------- + +/// `eth_getBalance` — account balance at a given block from hot storage. +pub(crate) async fn balance( hctx: HandlerCtx, AddrWithBlock(address, block): AddrWithBlock, - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let block = block.unwrap_or(BlockId::latest()); + let id = block.unwrap_or(BlockId::latest()); + let task = async move { - let state = ctx.signet().provider().state_by_block_id(block).map_err(|e| e.to_string())?; - let bal = state.account_balance(&address).map_err(|e| e.to_string())?; - Ok(bal.unwrap_or_default()) + let (reader, height) = hot_reader_at_block(&ctx, id)?; + let acct = + reader.get_account_at_height(&address, Some(height)).map_err(|e| e.to_string())?; + + Ok(acct.map(|a| a.balance).unwrap_or_default()) }; await_handler!(@option hctx.spawn_blocking(task)) } -pub(super) async fn storage_at( +/// `eth_getStorageAt` — contract storage slot at a given block. +pub(crate) async fn storage_at( hctx: HandlerCtx, StorageAtArgs(address, key, block): StorageAtArgs, - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let block = block.unwrap_or(BlockId::latest()); + let id = block.unwrap_or(BlockId::latest()); + let task = async move { - let state = ctx.signet().provider().state_by_block_id(block).map_err(|e| e.to_string())?; - let val = state.storage(address, key.into()).map_err(|e| e.to_string())?; + let (reader, height) = hot_reader_at_block(&ctx, id)?; + let val = reader + .get_storage_at_height(&address, &key, Some(height)) + .map_err(|e| e.to_string())?; + Ok(val.unwrap_or_default().to_be_bytes().into()) }; await_handler!(@option hctx.spawn_blocking(task)) } -pub(super) async fn addr_tx_count( +/// `eth_getTransactionCount` — account nonce at a given block. +pub(crate) async fn addr_tx_count( hctx: HandlerCtx, AddrWithBlock(address, block): AddrWithBlock, - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let block = block.unwrap_or(BlockId::latest()); + let id = block.unwrap_or(BlockId::latest()); + let task = async move { - let state = ctx.signet().provider().state_by_block_id(block).map_err(|e| e.to_string())?; - let count = state.account_nonce(&address).map_err(|e| e.to_string())?; - Ok(U64::from(count.unwrap_or_default())) + let (reader, height) = hot_reader_at_block(&ctx, id)?; + let acct = + reader.get_account_at_height(&address, Some(height)).map_err(|e| e.to_string())?; + + Ok(U64::from(acct.map(|a| a.nonce).unwrap_or_default())) }; await_handler!(@option hctx.spawn_blocking(task)) } -pub(super) async fn code_at( +/// `eth_getCode` — contract bytecode at a given block. +pub(crate) async fn code_at( hctx: HandlerCtx, AddrWithBlock(address, block): AddrWithBlock, - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let block = block.unwrap_or(BlockId::latest()); + let id = block.unwrap_or(BlockId::latest()); + let task = async move { - let state = ctx.signet().provider().state_by_block_id(block).map_err(|e| e.to_string())?; - let code = state.account_code(&address).map_err(|e| e.to_string())?; - Ok(code.unwrap_or_default().original_bytes()) + let (reader, height) = hot_reader_at_block(&ctx, id)?; + let acct = + reader.get_account_at_height(&address, Some(height)).map_err(|e| e.to_string())?; + + let Some(acct) = acct else { + return Ok(alloy::primitives::Bytes::new()); + }; + + let Some(code_hash) = acct.bytecode_hash else { + return Ok(alloy::primitives::Bytes::new()); + }; + + let code = reader.get_bytecode(&code_hash).map_err(|e| e.to_string())?; + + Ok(code.map(|c| c.original_bytes()).unwrap_or_default()) }; await_handler!(@option hctx.spawn_blocking(task)) } -pub(super) async fn header_by( - hctx: HandlerCtx, - (t,): (T,), - ctx: RpcCtx, -) -> Result>, String> -where - T: Into, - Host: FullNodeComponents, - Signet: Pnt, -{ - let id = t.into(); +// --------------------------------------------------------------------------- +// EVM Execution +// --------------------------------------------------------------------------- - await_handler!(@option hctx.spawn_blocking_with_ctx(|hctx| async move { - Ok(block(hctx, BlockParams(id, None), ctx).await?.map(|block| block.header)) - })) -} - -/// Normalize transaction request gas, without making DB reads +/// Shared EVM call execution used by `eth_call` and `eth_estimateGas`. /// -/// Does the following: -/// - If the gas is below `MIN_TRANSACTION_GAS`, set it to `None` -/// - If the gas is above the `rpc_gas_cap`, set it to the `rpc_gas_cap` -/// - Otherwise, do nothing -const fn normalize_gas_stateless(request: &mut TransactionRequest, max_gas: u64) { - match request.gas { - Some(..MIN_TRANSACTION_GAS) => request.gas = None, - Some(val) if val > max_gas => request.gas = Some(max_gas), - _ => {} - } -} - -/// We want to ensure that req.gas is not less than `MIN_TRANSACTION_GAS` -/// coming into this. -pub(super) async fn run_call( +/// Resolves the block, builds a revm instance with the requested state +/// and block overrides, then executes the transaction request. +pub(crate) async fn run_call( hctx: HandlerCtx, TxParams(request, block, state_overrides, block_overrides): TxParams, - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> ResponsePayload where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { let id = block.unwrap_or(BlockId::latest()); - - // this span is verbose yo. - let span = trace_span!( - "run_call", - ?request, - block_id = %id, - state_overrides = ?state_overrides.as_ref().map(StateOverride::len).unwrap_or_default(), - block_overrides = ?block_overrides.is_some(), - block_cfg = tracing::field::Empty, - ); + let span = trace_span!("run_call", block_id = %id); let task = async move { - let block_cfg = match ctx.signet().block_cfg(id).await { - Ok(block_cfg) => block_cfg, - Err(e) => { - return ResponsePayload::internal_error_with_message_and_obj( - "error while loading block cfg".into(), - e.to_string().into(), - ); - } - }; + let EvmBlockContext { header, db } = response_tri!(ctx.resolve_evm_block(id)); - // Set up trevm - let trevm = response_tri!(ctx.trevm(id.into(), &block_cfg)); + let trevm = signet_evm::signet_evm(db, ctx.constants().clone()) + .fill_cfg(&CfgFiller(ctx.chain_id())) + .fill_block(&header); - let mut trevm = response_tri!(trevm.maybe_apply_state_overrides(state_overrides.as_ref())) + let trevm = response_tri!(trevm.maybe_apply_state_overrides(state_overrides.as_ref())) .maybe_apply_block_overrides(block_overrides.as_deref()) .fill_tx(&request); - // AFTER applying overrides and filling the tx, we want to statefully - // modify the gas cap. + let mut trevm = trevm; let new_gas = response_tri!(trevm.cap_tx_gas()); if Some(new_gas) != request.gas { - debug!( - req_gas = ?request.gas, - new_gas, - "capping gas for call", - ); + debug!(req_gas = ?request.gas, new_gas, "capping gas for call"); } - let execution_result = response_tri!(trevm.call().map_err(EvmErrored::into_error)).0; - - ResponsePayload::Success(execution_result) + let result = response_tri!(trevm.call().map_err(signet_evm::EvmErrored::into_error)); + ResponsePayload(Ok(result.0)) } .instrument(span); await_handler!(@response_option hctx.spawn_blocking(task)) } -pub(super) async fn call( +/// `eth_call` — execute a call and return the output bytes. +/// +/// Delegates to [`run_call`], then maps the execution result to raw +/// output bytes, revert data, or halt reason. +pub(crate) async fn call( hctx: HandlerCtx, mut params: TxParams, - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> ResponsePayload where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - // Stateless gas normalization. We will do stateful gas normalization later - // in [`run_call`]. - // - // This check is done greedily, as it is a simple comparison. - let max_gas = ctx.signet().config().rpc_gas_cap; + let max_gas = ctx.config().rpc_gas_cap; normalize_gas_stateless(&mut params.0, max_gas); await_handler!(@response_option hctx.spawn_with_ctx(|hctx| async move { - let res = match run_call(hctx, params, ctx).await { - ResponsePayload::Success(res) => res, - ResponsePayload::Failure(err) => return ResponsePayload::Failure(err), + let res = match run_call(hctx, params, ctx).await.0 { + Ok(res) => res, + Err(err) => return ResponsePayload(Err(err)), }; match res { ExecutionResult::Success { output, .. } => { - ResponsePayload::Success(output.data().clone()) + ResponsePayload(Ok(output.data().clone())) } ExecutionResult::Revert { output, .. } => { ResponsePayload::internal_error_with_message_and_obj( @@ -516,59 +796,38 @@ where })) } -/// Estimate the gas cost of a transaction. -pub(super) async fn estimate_gas( +/// `eth_estimateGas` — estimate gas required for a transaction. +pub(crate) async fn estimate_gas( hctx: HandlerCtx, TxParams(mut request, block, state_overrides, block_overrides): TxParams, - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> ResponsePayload where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let id = block.unwrap_or(BlockId::pending()); - - // this span is verbose yo. - let span = trace_span!( - "estimate_gas", - ?request, - block_id = %id, - state_overrides = ?state_overrides.as_ref().map(StateOverride::len).unwrap_or_default(), - block_overrides = ?block_overrides.is_some(), - block_cfg = tracing::field::Empty, - ); - - // Stateless gas normalization. - let max_gas = ctx.signet().config().rpc_gas_cap; + let max_gas = ctx.config().rpc_gas_cap; normalize_gas_stateless(&mut request, max_gas); + let id = block.unwrap_or(BlockId::pending()); + let span = trace_span!("eth_estimateGas", block_id = %id); + let task = async move { - // Get the block cfg from backend, erroring if it fails - let block_cfg = match ctx.signet().block_cfg(id).await { - Ok(block_cfg) => block_cfg, - Err(e) => { - return ResponsePayload::internal_error_with_message_and_obj( - "error while loading block cfg".into(), - e.to_string().into(), - ); - } - }; + let EvmBlockContext { header, db } = response_tri!(ctx.resolve_evm_block(id)); - let trevm = response_tri!(ctx.trevm(id.into(), &block_cfg)); + let trevm = signet_evm::signet_evm(db, ctx.constants().clone()) + .fill_cfg(&CfgFiller(ctx.chain_id())) + .fill_block(&header); - // Apply state and block overrides (state overrides are fallible as - // they require DB access) let trevm = response_tri!(trevm.maybe_apply_state_overrides(state_overrides.as_ref())) .maybe_apply_block_overrides(block_overrides.as_deref()) .fill_tx(&request); - // in eth_call we cap gas here. in eth_estimate gas it is done by - // trevm - - let (estimate, _) = response_tri!(trevm.estimate_gas().map_err(EvmErrored::into_error)); + let (estimate, _) = + response_tri!(trevm.estimate_gas().map_err(signet_evm::EvmErrored::into_error)); match estimate { - EstimationResult::Success { limit, .. } => ResponsePayload::Success(U64::from(limit)), + EstimationResult::Success { limit, .. } => ResponsePayload(Ok(U64::from(limit))), EstimationResult::Revert { reason, .. } => { ResponsePayload::internal_error_with_message_and_obj( "execution reverted".into(), @@ -588,83 +847,84 @@ where await_handler!(@response_option hctx.spawn_blocking(task)) } -pub(super) async fn gas_price( +/// `eth_createAccessList` — generate an access list for a transaction. +pub(crate) async fn create_access_list( hctx: HandlerCtx, - ctx: RpcCtx, -) -> Result + TxParams(mut request, block, state_overrides, block_overrides): TxParams, + ctx: StorageRpcCtx, +) -> ResponsePayload where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { + let max_gas = ctx.config().rpc_gas_cap; + normalize_gas_stateless(&mut request, max_gas); + + let id = block.unwrap_or(BlockId::pending()); + let span = trace_span!("eth_createAccessList", block_id = %id); + let task = async move { - let (header, suggested) = tokio::try_join!( - ctx.signet().raw_header(BlockId::latest()), - ctx.signet().gas_oracle().suggest_tip_cap(), - ) - .map_err(|e| e.to_string())?; + let EvmBlockContext { header, db } = response_tri!(ctx.resolve_evm_block(id)); - let base_fee = header.and_then(|h| h.1.base_fee_per_gas()).unwrap_or_default(); - Ok(suggested + U256::from(base_fee)) - }; + let trevm = signet_evm::signet_evm(db, ctx.constants().clone()) + .fill_cfg(&CfgFiller(ctx.chain_id())) + .fill_block(&header); - await_handler!(@option hctx.spawn_blocking(task)) -} + let trevm = response_tri!(trevm.maybe_apply_state_overrides(state_overrides.as_ref())) + .maybe_apply_block_overrides(block_overrides.as_deref()) + .fill_tx(&request); -pub(super) async fn max_priority_fee_per_gas( - hctx: HandlerCtx, - ctx: RpcCtx, -) -> Result -where - Host: FullNodeComponents, - Signet: Pnt, -{ - let task = - async move { ctx.signet().gas_oracle().suggest_tip_cap().await.map_err(|e| e.to_string()) }; + let initial = request.access_list.clone().unwrap_or_default(); + let mut inspector = AccessListInspector::new(initial); - await_handler!(@option hctx.spawn_blocking(task)) -} + let result = trevm + .try_with_inspector(&mut inspector, |trevm| trevm.run()) + .map_err(signet_evm::EvmErrored::into_error); -pub(super) async fn fee_history( - hctx: HandlerCtx, - FeeHistoryArgs(block_count, newest, reward_percentiles): FeeHistoryArgs, - ctx: RpcCtx, -) -> Result -where - Host: FullNodeComponents, - Signet: Pnt, -{ - let task = async move { - ctx.signet() - .fee_history(block_count.to::(), newest, reward_percentiles) - .await - .map_err(|e| e.to_string()) - }; + let (gas_used, error) = match result { + Ok(ref trevm) => (U256::from(trevm.gas_used()), None), + Err(ref e) => (U256::ZERO, Some(e.to_string())), + }; - await_handler!(@option hctx.spawn_blocking(task)) + let access_list = inspector.into_access_list(); + + ResponsePayload(Ok(AccessListResult { access_list, gas_used, error })) + } + .instrument(span); + + await_handler!(@response_option hctx.spawn_blocking(task)) } -pub(super) async fn send_raw_transaction( +// --------------------------------------------------------------------------- +// Transaction Submission +// --------------------------------------------------------------------------- + +/// `eth_sendRawTransaction` — decode and forward a signed transaction. +/// +/// The transaction is forwarded to the tx cache in a fire-and-forget +/// task; the hash is returned immediately. +pub(crate) async fn send_raw_transaction( hctx: HandlerCtx, (tx,): (alloy::primitives::Bytes,), - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let task = |hctx: HandlerCtx| async move { - let Some(tx_cache) = ctx.signet().tx_cache() else { - return Err("tx-cache URL not provided".to_string()); - }; + let Some(tx_cache) = ctx.tx_cache().cloned() else { + return Err("tx-cache URL not provided".to_string()); + }; - let envelope = match TxEnvelope::decode_2718(&mut tx.as_ref()) { - Ok(envelope) => envelope, - Err(e) => return Err(e.to_string()), - }; + let task = |hctx: HandlerCtx| async move { + let envelope = alloy::consensus::TxEnvelope::decode_2718(&mut tx.as_ref()) + .map_err(|e| e.to_string())?; let hash = *envelope.tx_hash(); hctx.spawn(async move { - tx_cache.forward_raw_transaction(envelope).await.map_err(|e| e.to_string()) + if let Err(e) = tx_cache.forward_raw_transaction(envelope).await { + tracing::warn!(error = %e, %hash, "failed to forward raw transaction"); + } }); Ok(hash) @@ -673,107 +933,223 @@ where await_handler!(@option hctx.spawn_blocking_with_ctx(task)) } -pub(super) async fn get_logs( +// --------------------------------------------------------------------------- +// Logs +// --------------------------------------------------------------------------- + +/// Drain a [`signet_cold::LogStream`] into a `Vec`. +/// +/// Errors from the stream (deadline exceeded, too many logs, reorg) are +/// propagated as the first encountered error. +async fn collect_log_stream(stream: signet_cold::LogStream) -> signet_cold::ColdResult> { + use tokio_stream::StreamExt; + let mut logs = Vec::new(); + let mut stream = std::pin::pin!(stream); + while let Some(log) = stream.next().await { + logs.push(log?); + } + Ok(logs) +} + +/// `eth_getLogs` — query logs from cold storage with filter criteria. +/// +/// Uses `stream_logs` for deadline enforcement and dedicated concurrency +/// control. The stream is collected into a `Vec` for the JSON-RPC response. +pub(crate) async fn get_logs( hctx: HandlerCtx, - (filter,): (alloy::rpc::types::Filter,), - ctx: RpcCtx, -) -> Result, String> + (filter,): (Filter,), + ctx: StorageRpcCtx, +) -> Result, String> where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let task = async move { ctx.signet().logs(&filter).await.map_err(EthError::into_string) }; + let task = async move { + let cold = ctx.cold(); + + let resolved_filter = match filter.block_option { + alloy::rpc::types::FilterBlockOption::AtBlockHash(_) => filter, + alloy::rpc::types::FilterBlockOption::Range { from_block, to_block } => { + let from = from_block + .map(|b| ctx.resolve_block_tag(b)) + .unwrap_or_else(|| ctx.tags().latest()); + let to = to_block + .map(|b| ctx.resolve_block_tag(b)) + .unwrap_or_else(|| ctx.tags().latest()); + + if from > to { + return Err("fromBlock must not exceed toBlock".to_string()); + } + let max_blocks = ctx.config().max_blocks_per_filter; + if to - from > max_blocks { + return Err(format!("query exceeds max block range ({max_blocks})")); + } - await_handler!(@option hctx.spawn_blocking(task)) + Filter { + block_option: alloy::rpc::types::FilterBlockOption::Range { + from_block: Some(BlockNumberOrTag::Number(from)), + to_block: Some(BlockNumberOrTag::Number(to)), + }, + ..filter + } + } + }; + + let max_logs = ctx.config().max_logs_per_response; + let deadline = ctx.config().max_log_query_deadline; + + let stream = cold + .stream_logs(resolved_filter, max_logs, deadline) + .await + .map_err(|e| e.to_string())?; + + collect_log_stream(stream).await.map_err(|e| e.to_string()) + }; + + await_handler!(@option hctx.spawn(task)) } -pub(super) async fn new_filter( +// --------------------------------------------------------------------------- +// Filters +// --------------------------------------------------------------------------- + +/// `eth_newFilter` — install a log filter for polling. +pub(crate) async fn new_filter( hctx: HandlerCtx, - (filter,): (alloy::rpc::types::Filter,), - ctx: RpcCtx, + (filter,): (Filter,), + ctx: StorageRpcCtx, ) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let task = - async move { ctx.signet().install_log_filter(filter).map_err(EthError::into_string) }; + let task = async move { + let latest = ctx.tags().latest(); + Ok(ctx.filter_manager().install_log_filter(latest, filter)) + }; await_handler!(@option hctx.spawn_blocking(task)) } -pub(super) async fn new_block_filter( +/// `eth_newBlockFilter` — install a block hash filter for polling. +pub(crate) async fn new_block_filter( hctx: HandlerCtx, - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let task = async move { ctx.signet().install_block_filter().map_err(EthError::into_string) }; + let task = async move { + let latest = ctx.tags().latest(); + Ok(ctx.filter_manager().install_block_filter(latest)) + }; await_handler!(@option hctx.spawn_blocking(task)) } -pub(super) async fn uninstall_filter( +/// `eth_uninstallFilter` — remove a filter. +pub(crate) async fn uninstall_filter( hctx: HandlerCtx, (id,): (U64,), - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let task = async move { Ok(ctx.signet().uninstall_filter(id)) }; - + let task = async move { Ok(ctx.filter_manager().uninstall(id).is_some()) }; await_handler!(@option hctx.spawn_blocking(task)) } -pub(super) async fn get_filter_changes( +/// `eth_getFilterChanges` / `eth_getFilterLogs` — poll a filter for new +/// results since the last poll. Fetches matching data from cold storage. +pub(crate) async fn get_filter_changes( hctx: HandlerCtx, (id,): (U64,), - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let task = async move { ctx.signet().filter_changes(id).await.map_err(EthError::into_string) }; + let task = async move { + let fm = ctx.filter_manager(); + let mut entry = fm.get_mut(id).ok_or_else(|| format!("filter not found: {id}"))?; - await_handler!(@option hctx.spawn_blocking(task)) + let latest = ctx.tags().latest(); + let start = entry.next_start_block(); + + if start > latest { + entry.mark_polled(latest); + return Ok(entry.empty_output()); + } + + let cold = ctx.cold(); + + if entry.is_block() { + let specs: Vec<_> = (start..=latest).map(HeaderSpecifier::Number).collect(); + let headers = cold.get_headers(specs).await.map_err(|e| e.to_string())?; + let hashes: Vec = headers.into_iter().flatten().map(|h| h.hash()).collect(); + entry.mark_polled(latest); + Ok(FilterOutput::from(hashes)) + } else { + let stored = entry.as_filter().cloned().unwrap(); + let resolved = Filter { + block_option: alloy::rpc::types::FilterBlockOption::Range { + from_block: Some(BlockNumberOrTag::Number(start)), + to_block: Some(BlockNumberOrTag::Number(latest)), + }, + ..stored + }; + + let max_logs = ctx.config().max_logs_per_response; + let deadline = ctx.config().max_log_query_deadline; + + let stream = + cold.stream_logs(resolved, max_logs, deadline).await.map_err(|e| e.to_string())?; + + let logs = collect_log_stream(stream).await.map_err(|e| e.to_string())?; + + entry.mark_polled(latest); + Ok(FilterOutput::from(logs)) + } + }; + + await_handler!(@option hctx.spawn(task)) } -pub(super) async fn subscribe( +// --------------------------------------------------------------------------- +// Subscriptions +// --------------------------------------------------------------------------- + +/// `eth_subscribe` — register a push-based subscription (WebSocket/SSE). +pub(crate) async fn subscribe( hctx: HandlerCtx, sub: SubscribeArgs, - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let kind = sub.try_into()?; + let interest: InterestKind = sub.try_into()?; - let task = |hctx| async move { - ctx.signet() - .subscriptions() - .subscribe(&hctx, kind) - .ok_or_else(|| "pubsub not enabled".to_string()) - }; - - await_handler!(@option hctx.spawn_blocking_with_ctx(task)) + ctx.sub_manager() + .subscribe(&hctx, interest) + .ok_or_else(|| "notifications not enabled on this transport".to_string()) } -pub(super) async fn unsubscribe( +/// `eth_unsubscribe` — cancel a push-based subscription. +pub(crate) async fn unsubscribe( hctx: HandlerCtx, (id,): (U64,), - ctx: RpcCtx, + ctx: StorageRpcCtx, ) -> Result where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let task = async move { Ok(ctx.signet().subscriptions().unsubscribe(id)) }; - + let task = async move { Ok(ctx.sub_manager().unsubscribe(id)) }; await_handler!(@option hctx.spawn_blocking(task)) } diff --git a/crates/rpc/src/eth/error.rs b/crates/rpc/src/eth/error.rs index 2d85cce..135b6b8 100644 --- a/crates/rpc/src/eth/error.rs +++ b/crates/rpc/src/eth/error.rs @@ -1,26 +1,61 @@ -use reth::{ - providers::ProviderError, - rpc::{eth::filter::EthFilterError, server_types::eth::EthApiError}, -}; +//! Error types for the storage-backed ETH RPC. -/// Errors that can occur when interacting with the `eth_` namespace. +use alloy::{eips::BlockId, primitives::Bytes}; +use serde::Serialize; + +/// Errors from the storage-backed ETH RPC. #[derive(Debug, thiserror::Error)] pub enum EthError { - /// Provider error: [`ProviderError`]. - #[error("Provider error: {0}")] - Provider(#[from] ProviderError), - /// Filter error [`EthFilterError`]. - #[error("Filter error: {0}")] - Filter(#[from] EthFilterError), - /// Eth API error: [`EthApiError`]. - #[error("Eth API error: {0}")] - Rpc(#[from] EthApiError), + /// Cold storage error. + #[error("cold storage: {0}")] + Cold(#[from] signet_cold::ColdStorageError), + /// Hot storage error. + #[error("hot storage: {0}")] + Hot(#[from] signet_storage::StorageError), + /// Block resolution error. + #[error("resolve: {0}")] + Resolve(#[from] crate::config::resolve::ResolveError), + /// Invalid transaction signature. + #[error("invalid transaction signature")] + InvalidSignature, + /// Block not found. + #[error("block not found: {0}")] + BlockNotFound(BlockId), + /// Receipt found but the corresponding transaction is missing. + #[error("receipt found but transaction missing")] + TransactionMissing, + /// EVM execution error. + #[error("evm: {0}")] + Evm(String), } impl EthError { - /// Turn into a string by value, allows for - /// `.map_err(EthError::into_string)` to be used. + /// Convert the error to a string for JSON-RPC responses. pub fn into_string(self) -> String { - ToString::to_string(&self) + self.to_string() + } +} + +/// Error data for `eth_call` and `eth_estimateGas` responses. +/// +/// Serialized as JSON in the error response `data` field. +#[derive(Debug, Clone, Serialize)] +#[serde(untagged)] +pub(crate) enum CallErrorData { + /// Revert data bytes. + Bytes(Bytes), + /// Error message string. + String(String), +} + +impl From for CallErrorData { + fn from(b: Bytes) -> Self { + Self::Bytes(b) + } +} + +impl From for CallErrorData { + fn from(s: String) -> Self { + Self::String(s) } } diff --git a/crates/rpc/src/eth/helpers.rs b/crates/rpc/src/eth/helpers.rs index 5462624..399a177 100644 --- a/crates/rpc/src/eth/helpers.rs +++ b/crates/rpc/src/eth/helpers.rs @@ -1,31 +1,239 @@ -use alloy::{eips::BlockId, primitives::Bytes}; - -/// Error output of `eth_call`. -#[derive(Debug, Clone, serde::Serialize)] -#[serde(untagged)] -pub enum CallErrorData { - /// Error output is a byte array, usually a revert message. - Bytes(Bytes), - /// Output is a block id. - BlockId(BlockId), - /// Error message. - String(String), -} - -impl From for CallErrorData { - fn from(bytes: Bytes) -> Self { - Self::Bytes(bytes) +//! Parameter types, macros, and utility helpers for ETH RPC endpoints. + +use super::types::{RpcReceipt, RpcTransaction}; +use crate::interest::InterestKind; +use alloy::{ + consensus::{ + ReceiptEnvelope, ReceiptWithBloom, Transaction, TxReceipt, transaction::Recovered, + }, + eips::BlockId, + primitives::{Address, TxKind, U256}, + rpc::types::{ + BlockOverrides, Log, TransactionReceipt, TransactionRequest, pubsub::SubscriptionKind, + state::StateOverride, + }, +}; +use serde::Deserialize; +use signet_cold::ColdReceipt; +use signet_storage_types::ConfirmationMeta; +use trevm::MIN_TRANSACTION_GAS; + +/// Args for `eth_call` and `eth_estimateGas`. +#[derive(Debug, Deserialize)] +pub(crate) struct TxParams( + pub TransactionRequest, + #[serde(default)] pub Option, + #[serde(default)] pub Option, + #[serde(default)] pub Option>, +); + +/// Args for `eth_getBlockByHash` and `eth_getBlockByNumber`. +#[derive(Debug, Deserialize)] +pub(crate) struct BlockParams(pub T, #[serde(default)] pub Option); + +/// Args for `eth_getStorageAt`. +#[derive(Debug, Deserialize)] +pub(crate) struct StorageAtArgs(pub Address, pub U256, #[serde(default)] pub Option); + +/// Args for `eth_getBalance`, `eth_getTransactionCount`, and `eth_getCode`. +#[derive(Debug, Deserialize)] +pub(crate) struct AddrWithBlock(pub Address, #[serde(default)] pub Option); + +/// Args for `eth_feeHistory`. +#[derive(Debug, Deserialize)] +pub(crate) struct FeeHistoryArgs( + pub alloy::primitives::U64, + pub alloy::eips::BlockNumberOrTag, + #[serde(default)] pub Option>, +); + +/// Args for `eth_subscribe`. +#[derive(Debug, Deserialize)] +pub(crate) struct SubscribeArgs( + pub alloy::rpc::types::pubsub::SubscriptionKind, + #[serde(default)] pub Option>, +); + +impl TryFrom for InterestKind { + type Error = String; + + fn try_from(args: SubscribeArgs) -> Result { + match args.0 { + SubscriptionKind::Logs => args + .1 + .map(InterestKind::Log) + .ok_or_else(|| "missing filter for Logs subscription".to_string()), + SubscriptionKind::NewHeads => { + if args.1.is_some() { + Err("filter not supported for NewHeads subscription".to_string()) + } else { + Ok(InterestKind::Block) + } + } + other => Err(format!("unsupported subscription kind: {other:?}")), + } } } -impl From for CallErrorData { - fn from(id: BlockId) -> Self { - Self::BlockId(id) +/// Normalize transaction request gas without making DB reads. +/// +/// - If the gas is below `MIN_TRANSACTION_GAS`, set it to `None` +/// - If the gas is above the `rpc_gas_cap`, set it to the `rpc_gas_cap` +pub(crate) const fn normalize_gas_stateless(request: &mut TransactionRequest, max_gas: u64) { + match request.gas { + Some(..MIN_TRANSACTION_GAS) => request.gas = None, + Some(val) if val > max_gas => request.gas = Some(max_gas), + _ => {} + } +} + +/// Await a handler task, returning an error string on panic/cancel. +macro_rules! await_handler { + ($h:expr) => { + match $h.await { + Ok(res) => res, + Err(_) => return Err("task panicked or cancelled".to_string()), + } + }; + + (@option $h:expr) => { + match $h.await { + Ok(Some(res)) => res, + _ => return Err("task panicked or cancelled".to_string()), + } + }; + + (@response_option $h:expr) => { + match $h.await { + Ok(Some(res)) => res, + _ => { + return ajj::ResponsePayload::internal_error_message(std::borrow::Cow::Borrowed( + "task panicked or cancelled", + )) + } + } + }; +} +pub(crate) use await_handler; + +/// Try-operator for `ResponsePayload`. +macro_rules! response_tri { + ($h:expr) => { + match $h { + Ok(res) => res, + Err(err) => return ajj::ResponsePayload::internal_error_message(err.to_string().into()), + } + }; +} +pub(crate) use response_tri; + +/// Resolve a block ID and open a hot storage reader at that height. +/// +/// Shared by account-state endpoints (`balance`, `storage_at`, +/// `addr_tx_count`, `code_at`) which all follow the same +/// resolve → open reader → query pattern. +pub(crate) fn hot_reader_at_block( + ctx: &crate::config::StorageRpcCtx, + id: BlockId, +) -> Result<(H::RoTx, u64), String> +where + H: signet_hot::HotKv, + ::Error: std::error::Error + Send + Sync + 'static, +{ + let height = ctx.resolve_block_id(id).map_err(|e| e.to_string())?; + let reader = ctx.hot_reader().map_err(|e| e.to_string())?; + Ok((reader, height)) +} + +/// Small wrapper implementing [`trevm::Cfg`] to set the chain ID. +pub(crate) struct CfgFiller(pub u64); + +impl trevm::Cfg for CfgFiller { + fn fill_cfg_env(&self, cfg: &mut trevm::revm::context::CfgEnv) { + cfg.chain_id = self.0; + } +} + +/// Build an [`alloy::rpc::types::Transaction`] from cold storage types. +pub(crate) fn build_rpc_transaction( + tx: &signet_storage_types::RecoveredTx, + meta: &ConfirmationMeta, + base_fee: Option, +) -> RpcTransaction { + let signer = tx.signer(); + let tx_envelope: alloy::consensus::TxEnvelope = tx.clone().into_inner().into(); + let inner = Recovered::new_unchecked(tx_envelope, signer); + + let egp = base_fee + .map(|bf| inner.effective_tip_per_gas(bf).unwrap_or_default() as u64 + bf) + .unwrap_or_else(|| inner.max_fee_per_gas() as u64); + + alloy::rpc::types::Transaction { + inner, + block_hash: Some(meta.block_hash()), + block_number: Some(meta.block_number()), + transaction_index: Some(meta.transaction_index()), + effective_gas_price: Some(egp as u128), + } +} + +/// Build a [`TransactionReceipt`] from a [`ColdReceipt`] and its transaction. +/// +/// The transaction is needed for `to`, `contract_address`, and +/// `effective_gas_price` which are not stored on the receipt. +pub(crate) fn build_receipt( + cr: &ColdReceipt, + tx: &signet_storage_types::RecoveredTx, + base_fee: Option, +) -> RpcReceipt { + let logs_bloom = cr.receipt.bloom(); + let status = cr.receipt.status; + let cumulative_gas_used = cr.receipt.cumulative_gas_used; + + let rpc_receipt = alloy::rpc::types::eth::Receipt { + status, + cumulative_gas_used, + logs: cr.receipt.logs.clone(), + }; + + let (contract_address, to) = match tx.kind() { + TxKind::Create => (Some(cr.from.create(tx.nonce())), None), + TxKind::Call(addr) => (None, Some(Address(*addr))), + }; + + let egp = base_fee + .map(|bf| tx.effective_tip_per_gas(bf).unwrap_or_default() as u64 + bf) + .unwrap_or_else(|| tx.max_fee_per_gas() as u64); + + TransactionReceipt { + inner: build_receipt_envelope( + ReceiptWithBloom { receipt: rpc_receipt, logs_bloom }, + cr.tx_type, + ), + transaction_hash: cr.tx_hash, + transaction_index: Some(cr.transaction_index), + block_hash: Some(cr.block_hash), + block_number: Some(cr.block_number), + from: cr.from, + to, + gas_used: cr.gas_used, + contract_address, + effective_gas_price: egp as u128, + blob_gas_price: None, + blob_gas_used: None, } } -impl From for CallErrorData { - fn from(s: String) -> Self { - Self::String(s) +/// Wrap a receipt in the appropriate [`ReceiptEnvelope`] variant. +const fn build_receipt_envelope( + receipt: ReceiptWithBloom>, + tx_type: alloy::consensus::TxType, +) -> ReceiptEnvelope { + match tx_type { + alloy::consensus::TxType::Legacy => ReceiptEnvelope::Legacy(receipt), + alloy::consensus::TxType::Eip2930 => ReceiptEnvelope::Eip2930(receipt), + alloy::consensus::TxType::Eip1559 => ReceiptEnvelope::Eip1559(receipt), + alloy::consensus::TxType::Eip4844 => ReceiptEnvelope::Eip4844(receipt), + alloy::consensus::TxType::Eip7702 => ReceiptEnvelope::Eip7702(receipt), } } diff --git a/crates/rpc/src/eth/mod.rs b/crates/rpc/src/eth/mod.rs index 0d8c131..4e1309a 100644 --- a/crates/rpc/src/eth/mod.rs +++ b/crates/rpc/src/eth/mod.rs @@ -1,90 +1,87 @@ +//! ETH namespace RPC router backed by storage. + mod endpoints; -use endpoints::*; +use endpoints::{ + addr_tx_count, balance, block, block_number, block_receipts, block_tx_count, call, chain_id, + code_at, create_access_list, estimate_gas, fee_history, gas_price, get_filter_changes, + get_logs, header_by, max_priority_fee_per_gas, new_block_filter, new_filter, + raw_transaction_by_block_and_index, raw_transaction_by_hash, send_raw_transaction, storage_at, + subscribe, syncing, transaction_by_block_and_index, transaction_by_hash, transaction_receipt, + uncle_block, uncle_count, uninstall_filter, unsubscribe, +}; mod error; pub use error::EthError; -mod helpers; -pub use helpers::CallErrorData; +pub(crate) mod helpers; +pub(crate) mod types; -use crate::ctx::RpcCtx; +use crate::config::StorageRpcCtx; use alloy::{eips::BlockNumberOrTag, primitives::B256}; -use reth_node_api::FullNodeComponents; -use signet_node_types::Pnt; +use signet_hot::{HotKv, model::HotKvRead}; +use trevm::revm::database::DBErrorMarker; -/// Instantiate the `eth` API router. -pub fn eth() -> ajj::Router> +/// Instantiate the `eth` API router backed by storage. +pub(crate) fn eth() -> ajj::Router> where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { ajj::Router::new() - .route("protocolVersion", protocol_version) - .route("syncing", syncing) - .route("blockNumber", block_number) - .route("chainId", chain_id) - .route("getBlockByHash", block::) - .route("getBlockByNumber", block::) - .route("getBlockTransactionCountByHash", block_tx_count::) - .route("getBlockTransactionCountByNumber", block_tx_count::) - .route("getBlockReceipts", block_receipts) - .route("getRawTransactionByHash", raw_transaction_by_hash) - .route("getTransactionByHash", transaction_by_hash) + .route("blockNumber", block_number::) + .route("chainId", chain_id::) + .route("getBlockByHash", block::) + .route("getBlockByNumber", block::) + .route("getBlockTransactionCountByHash", block_tx_count::) + .route("getBlockTransactionCountByNumber", block_tx_count::) + .route("getBlockReceipts", block_receipts::) + .route("getRawTransactionByHash", raw_transaction_by_hash::) + .route("getTransactionByHash", transaction_by_hash::) .route( "getRawTransactionByBlockHashAndIndex", - raw_transaction_by_block_and_index::, + raw_transaction_by_block_and_index::, ) .route( "getRawTransactionByBlockNumberAndIndex", - raw_transaction_by_block_and_index::, + raw_transaction_by_block_and_index::, ) - .route("getTransactionByBlockHashAndIndex", transaction_by_block_and_index::) + .route("getTransactionByBlockHashAndIndex", transaction_by_block_and_index::) .route( "getTransactionByBlockNumberAndIndex", - transaction_by_block_and_index::, + transaction_by_block_and_index::, ) - .route("getTransactionReceipt", transaction_receipt) - .route("getBalance", balance) - .route("getStorageAt", storage_at) - .route("getTransactionCount", addr_tx_count) - .route("getCode", code_at) - .route("getBlockHeaderByHash", header_by::) - .route("getBlockHeaderByNumber", header_by::) - .route("call", call) - .route("estimateGas", estimate_gas) - .route("gasPrice", gas_price) - .route("maxPriorityFeePerGas", max_priority_fee_per_gas) - .route("feeHistory", fee_history) - .route("sendRawTransaction", send_raw_transaction) - .route("getLogs", get_logs) - .route("newFilter", new_filter) - .route("newBlockFilter", new_block_filter) - .route("uninstallFilter", uninstall_filter) - .route("getFilterChanges", get_filter_changes) - .route("getFilterLogs", get_filter_changes) - .route("subscribe", subscribe) - .route("unsubscribe", unsubscribe) - // --------------- - // - // Unsupported methods: - // - .route("coinbase", not_supported) - .route("accounts", not_supported) - .route("blobBaseFee", not_supported) - .route("getUncleCountByBlockHash", not_supported) - .route("getUncleCountByBlockNumber", not_supported) - .route("getUncleByBlockHashAndIndex", not_supported) - .route("getUncleByBlockNumberAndIndex", not_supported) - .route("getWork", not_supported) - .route("hashrate", not_supported) - .route("mining", not_supported) - .route("submitHashrate", not_supported) - .route("submitWork", not_supported) - .route("sendTransaction", not_supported) - .route("sign", not_supported) - .route("signTransaction", not_supported) - .route("signTypedData", not_supported) - .route("getProof", not_supported) - .route("createAccessList", not_supported) - .route("newPendingTransactionFilter", not_supported) + .route("getTransactionReceipt", transaction_receipt::) + .route("getBlockHeaderByHash", header_by::) + .route("getBlockHeaderByNumber", header_by::) + .route("getBalance", balance::) + .route("getStorageAt", storage_at::) + .route("getTransactionCount", addr_tx_count::) + .route("getCode", code_at::) + .route("call", call::) + .route("estimateGas", estimate_gas::) + .route("sendRawTransaction", send_raw_transaction::) + .route("getLogs", get_logs::) + .route("syncing", syncing::) + .route("gasPrice", gas_price::) + .route("maxPriorityFeePerGas", max_priority_fee_per_gas::) + .route("feeHistory", fee_history::) + .route("createAccessList", create_access_list::) + .route("newFilter", new_filter::) + .route("newBlockFilter", new_block_filter::) + .route("uninstallFilter", uninstall_filter::) + .route("getFilterChanges", get_filter_changes::) + .route("getFilterLogs", get_filter_changes::) + .route("subscribe", subscribe::) + .route("unsubscribe", unsubscribe::) + // Uncle queries return semantically correct values (0 / null) + // because Signet has no uncle blocks. + .route("getUncleCountByBlockHash", uncle_count) + .route("getUncleCountByBlockNumber", uncle_count) + .route("getUncleByBlockHashAndIndex", uncle_block) + .route("getUncleByBlockNumberAndIndex", uncle_block) + // Unsupported methods (return method_not_found by default): + // - protocolVersion, coinbase, accounts, blobBaseFee + // - getWork, hashrate, mining, submitHashrate, submitWork + // - sendTransaction, sign, signTransaction, signTypedData + // - getProof, newPendingTransactionFilter } diff --git a/crates/rpc/src/eth/types.rs b/crates/rpc/src/eth/types.rs new file mode 100644 index 0000000..0f43742 --- /dev/null +++ b/crates/rpc/src/eth/types.rs @@ -0,0 +1,98 @@ +//! Response and serialization types for ETH RPC endpoints. + +use super::helpers::{build_receipt, build_rpc_transaction}; +use alloy::{ + network::{Ethereum, Network}, + primitives::B256, +}; +use serde::{Serialize, Serializer, ser::SerializeSeq}; +use signet_cold::ColdReceipt; + +/// RPC header type for the Ethereum network. +pub(crate) type RpcHeader = ::HeaderResponse; +/// RPC transaction type for the Ethereum network. +pub(crate) type RpcTransaction = ::TransactionResponse; +/// RPC receipt type for the Ethereum network. +pub(crate) type RpcReceipt = ::ReceiptResponse; + +/// Serializes as an empty JSON array `[]` without allocating. +pub(crate) struct EmptyArray; + +impl Serialize for EmptyArray { + fn serialize(&self, serializer: S) -> Result { + serializer.serialize_seq(Some(0))?.end() + } +} + +/// Block transactions with lazy serialization. +/// +/// In both variants the raw `RecoveredTx` list is kept and transformed +/// during serialization — either to full RPC transaction objects or to bare +/// hashes — avoiding an intermediate `Vec` allocation. +pub(crate) enum BlockTransactions { + Full { + txs: Vec, + block_num: u64, + block_hash: B256, + base_fee: Option, + }, + Hashes(Vec), +} + +impl Serialize for BlockTransactions { + fn serialize(&self, serializer: S) -> Result { + match self { + Self::Full { txs, block_num, block_hash, base_fee } => { + let mut seq = serializer.serialize_seq(Some(txs.len()))?; + for (i, tx) in txs.iter().enumerate() { + let meta = signet_storage_types::ConfirmationMeta::new( + *block_num, + *block_hash, + i as u64, + ); + seq.serialize_element(&build_rpc_transaction(tx, &meta, *base_fee))?; + } + seq.end() + } + Self::Hashes(txs) => { + let mut seq = serializer.serialize_seq(Some(txs.len()))?; + for tx in txs { + seq.serialize_element(tx.tx_hash())?; + } + seq.end() + } + } + } +} + +/// RPC block response with lazy transaction serialization. +/// +/// Replaces the alloy `Block` type so that transactions are serialized +/// inline from raw storage data. Signet has no uncles or withdrawals, so +/// those are hardcoded as empty/absent to avoid allocations. +#[derive(Serialize)] +#[serde(rename_all = "camelCase")] +pub(crate) struct RpcBlock { + #[serde(flatten)] + pub(crate) header: alloy::rpc::types::Header, + pub(crate) transactions: BlockTransactions, + pub(crate) uncles: EmptyArray, +} + +/// Lazily serialized receipt list. Each receipt is built and serialized +/// inline without allocating an intermediate `Vec`. +pub(crate) struct LazyReceipts { + pub(crate) txs: Vec, + pub(crate) receipts: Vec, + pub(crate) base_fee: Option, +} + +impl Serialize for LazyReceipts { + fn serialize(&self, serializer: S) -> Result { + let mut seq = serializer.serialize_seq(Some(self.txs.len()))?; + for (tx, cr) in self.txs.iter().zip(&self.receipts) { + seq.serialize_element(&build_receipt(cr, tx, self.base_fee))?; + } + seq.end() + } +} diff --git a/crates/rpc/src/inspect/db.rs b/crates/rpc/src/inspect/db.rs deleted file mode 100644 index 73f2cde..0000000 --- a/crates/rpc/src/inspect/db.rs +++ /dev/null @@ -1,164 +0,0 @@ -use ajj::serde_json; -use eyre::WrapErr; -use reth::providers::{ProviderFactory, providers::ProviderNodeTypes}; -use reth_db::{Database, TableViewer, mdbx, table::Table}; -use reth_db_common::{DbTool, ListFilter}; -use signet_node_types::Pnt; -use std::sync::{Arc, OnceLock}; -use tracing::instrument; - -/// Modeled on the `Command` struct from `reth/crates/cli/commands/src/db/list.rs` -#[derive(Debug, serde::Deserialize)] -pub(crate) struct DbArgs( - /// The table name - String, // 0 - /// Skip first N entries - #[serde(default)] - usize, // 1 - /// How many items to take from the walker - #[serde(default)] - Option, // 2 - /// Search parameter for both keys and values. Prefix it with `0x` to search for binary data, - /// and text otherwise. - /// - /// ATTENTION! For compressed tables (`Transactions` and `Receipts`), there might be - /// missing results since the search uses the raw uncompressed value from the database. - #[serde(default)] - Option, // 3 -); - -impl DbArgs { - /// Get the table name. - pub(crate) fn table_name(&self) -> &str { - &self.0 - } - - /// Parse the table name into a [`reth_db::Tables`] enum. - pub(crate) fn table(&self) -> Result { - self.table_name().parse() - } - - /// Get the skip value. - pub(crate) const fn skip(&self) -> usize { - self.1 - } - - /// Get the length value. - pub(crate) fn len(&self) -> usize { - self.2.unwrap_or(5) - } - - /// Get the search value. - pub(crate) fn search(&self) -> Vec { - self.3 - .as_ref() - .map(|search| { - if let Some(search) = search.strip_prefix("0x") { - return alloy::primitives::hex::decode(search).unwrap(); - } - search.as_bytes().to_vec() - }) - .unwrap_or_default() - } - - /// Generate [`ListFilter`] from command. - pub(crate) fn list_filter(&self) -> ListFilter { - ListFilter { - skip: self.skip(), - len: self.len(), - search: self.search(), - min_row_size: 0, - min_key_size: 0, - min_value_size: 0, - reverse: false, - only_count: false, - } - } -} - -pub(crate) struct ListTableViewer<'a, 'b, N: Pnt> { - pub(crate) factory: &'b ProviderFactory, - pub(crate) args: &'a DbArgs, - - pub(crate) output: OnceLock>, -} - -impl<'a, 'b, N: Pnt> ListTableViewer<'a, 'b, N> { - /// Create a new `ListTableViewer`. - pub(crate) fn new(factory: &'b ProviderFactory, args: &'a DbArgs) -> Self { - Self { factory, args, output: Default::default() } - } - - /// Take the output if it has been initialized, otherwise return `None`. - pub(crate) fn take_output(self) -> Option> { - self.output.into_inner() - } -} - -impl>> TableViewer<()> - for ListTableViewer<'_, '_, N> -{ - type Error = eyre::Report; - - #[instrument(skip(self), err)] - fn view(&self) -> eyre::Result<()> { - let tool = DbTool { provider_factory: self.factory.clone() }; - - self.factory.db_ref().view(|tx| { - let table_db = - tx.inner.open_db(Some(self.args.table_name())).wrap_err("Could not open db.")?; - let stats = tx - .inner - .db_stat(table_db.dbi()) - .wrap_err(format!("Could not find table: {}", stringify!($table)))?; - let total_entries = stats.entries(); - let final_entry_idx = total_entries.saturating_sub(1); - eyre::ensure!( - self.args.skip() >= final_entry_idx, - "Skip value {} is greater than total entries {}", - self.args.skip(), - total_entries - ); - - let list_filter = self.args.list_filter(); - - let (list, _) = tool.list::(&list_filter)?; - - let json = - serde_json::value::to_raw_value(&list).wrap_err("Failed to serialize list")?; - - self.output.get_or_init(|| json); - - Ok(()) - })??; - - Ok(()) - } -} - -// Some code in this file is adapted from github.com/paradigmxyz/reth. -// -// Particularly the `reth/crates/cli/commands/src/db/list.rs` file. It is -// reproduced here under the terms of the MIT license, -// -// The MIT License (MIT) -// -// Copyright (c) 2022-2025 Reth Contributors -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. diff --git a/crates/rpc/src/inspect/endpoints.rs b/crates/rpc/src/inspect/endpoints.rs deleted file mode 100644 index e5155ff..0000000 --- a/crates/rpc/src/inspect/endpoints.rs +++ /dev/null @@ -1,37 +0,0 @@ -use crate::{ - inspect::db::{DbArgs, ListTableViewer}, - utils::{await_handler, response_tri}, -}; -use ajj::{HandlerCtx, ResponsePayload}; -use reth::providers::{ProviderFactory, providers::ProviderNodeTypes}; -use reth_db::mdbx; -use signet_node_types::Pnt; -use std::sync::Arc; - -/// Handler for the `db` endpoint in the `inspect` module. -pub(super) async fn db( - hctx: HandlerCtx, - args: DbArgs, - ctx: ProviderFactory, -) -> ResponsePayload, String> -where - Signet: Pnt + ProviderNodeTypes>, -{ - let task = async move { - let table: reth_db::Tables = response_tri!(args.table(), "invalid table name"); - - let viewer = ListTableViewer::new(&ctx, &args); - - response_tri!(table.view(&viewer), "Failed to view table"); - - let Some(output) = viewer.take_output() else { - return ResponsePayload::internal_error_message( - "No output generated. The task may have panicked or been cancelled. This is a bug, please report it.".into(), - ); - }; - - ResponsePayload::Success(output) - }; - - await_handler!(@response_option hctx.spawn_blocking(task)) -} diff --git a/crates/rpc/src/inspect/mod.rs b/crates/rpc/src/inspect/mod.rs deleted file mode 100644 index e0cb981..0000000 --- a/crates/rpc/src/inspect/mod.rs +++ /dev/null @@ -1,16 +0,0 @@ -pub(crate) mod db; - -mod endpoints; - -use reth::providers::{ProviderFactory, providers::ProviderNodeTypes}; -use reth_db::mdbx; -use signet_node_types::Pnt; -use std::sync::Arc; - -/// Instantiate the `inspect` API router. -pub fn inspect() -> ajj::Router> -where - Signet: Pnt + ProviderNodeTypes>, -{ - ajj::Router::new().route("db", endpoints::db::) -} diff --git a/crates/rpc/src/interest/buffer.rs b/crates/rpc/src/interest/buffer.rs new file mode 100644 index 0000000..0485714 --- /dev/null +++ b/crates/rpc/src/interest/buffer.rs @@ -0,0 +1,124 @@ +//! Unified event buffer for filters and subscriptions. +//! +//! [`EventBuffer`] is a generic buffer over the block representation. +//! Filters use `EventBuffer` (block hashes), while subscriptions +//! use `EventBuffer
` (full headers). Both variants share a +//! common log-event arm. + +use alloy::{ + primitives::B256, + rpc::types::{Header, Log}, +}; +use serde::Serialize; +use std::collections::VecDeque; + +/// Buffer of chain events, parameterized by the block representation. +/// +/// Filters use `EventBuffer` (block hashes), while subscriptions +/// use `EventBuffer
` (full headers). +#[derive(Debug, Clone, PartialEq, Eq, Serialize)] +#[serde(untagged)] +pub(crate) enum EventBuffer { + /// Log entries. + Log(VecDeque), + /// Block events. + Block(VecDeque), +} + +impl EventBuffer { + /// True if the buffer contains no events. + pub(crate) fn is_empty(&self) -> bool { + self.len() == 0 + } + + /// Number of events in the buffer. + pub(crate) fn len(&self) -> usize { + match self { + Self::Log(logs) => logs.len(), + Self::Block(blocks) => blocks.len(), + } + } + + /// Extend this buffer with events from another buffer of the same kind. + /// + /// # Panics + /// + /// Panics if the buffers are of different kinds (log vs. block). + pub(crate) fn extend(&mut self, other: Self) { + match (self, other) { + (Self::Log(a), Self::Log(b)) => a.extend(b), + (Self::Block(a), Self::Block(b)) => a.extend(b), + _ => panic!("attempted to extend with mismatched buffer kinds"), + } + } + + /// Pop the next event from the front of the buffer. + pub(crate) fn pop_front(&mut self) -> Option> { + match self { + Self::Log(logs) => logs.pop_front().map(|l| EventItem::Log(Box::new(l))), + Self::Block(blocks) => blocks.pop_front().map(EventItem::Block), + } + } +} + +/// A single event popped from an [`EventBuffer`]. +#[derive(Debug, Clone, PartialEq, Eq, Serialize)] +#[serde(untagged)] +pub(crate) enum EventItem { + /// A log entry. + Log(Box), + /// A block event. + Block(B), +} + +// --- FilterOutput (EventBuffer) conversions --- + +impl From> for EventBuffer { + fn from(hashes: Vec) -> Self { + Self::Block(hashes.into()) + } +} + +impl From> for EventBuffer { + fn from(logs: Vec) -> Self { + Self::Log(logs.into()) + } +} + +impl FromIterator for EventBuffer { + fn from_iter>(iter: T) -> Self { + Self::Log(iter.into_iter().collect()) + } +} + +impl FromIterator for EventBuffer { + fn from_iter>(iter: T) -> Self { + Self::Block(iter.into_iter().collect()) + } +} + +// --- SubscriptionBuffer (EventBuffer
) conversions --- + +impl From> for EventBuffer
{ + fn from(logs: Vec) -> Self { + Self::Log(logs.into()) + } +} + +impl From> for EventBuffer
{ + fn from(headers: Vec
) -> Self { + Self::Block(headers.into()) + } +} + +impl FromIterator for EventBuffer
{ + fn from_iter>(iter: T) -> Self { + Self::Log(iter.into_iter().collect()) + } +} + +impl FromIterator
for EventBuffer
{ + fn from_iter>(iter: T) -> Self { + Self::Block(iter.into_iter().collect()) + } +} diff --git a/crates/rpc/src/interest/filters.rs b/crates/rpc/src/interest/filters.rs index df5b867..ae09367 100644 --- a/crates/rpc/src/interest/filters.rs +++ b/crates/rpc/src/interest/filters.rs @@ -1,11 +1,12 @@ -use crate::interest::InterestKind; +//! Filter management for `eth_newFilter` / `eth_getFilterChanges`. + +use crate::interest::{InterestKind, buffer::EventBuffer}; use alloy::{ primitives::{B256, U64}, - rpc::types::{Filter, Log}, + rpc::types::Filter, }; use dashmap::{DashMap, mapref::one::RefMut}; use std::{ - collections::VecDeque, sync::{ Arc, Weak, atomic::{AtomicU64, Ordering}, @@ -16,117 +17,13 @@ use tracing::trace; type FilterId = U64; -/// Either type for filter outputs. -#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize)] -#[serde(untagged)] -pub enum Either { - /// Log - Log(Log), - /// Block hash - Block(B256), -} - -/// The output of a filter. -/// -/// This will be either a list of logs or a list of block hashes. Pending tx -/// filters are not supported by Signet. For convenience, there is a special -/// variant for empty results. -#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize)] -#[serde(untagged)] -pub enum FilterOutput { - /// Empty output. Holds a `[(); 0]` to make sure it serializes as an empty - /// array. - Empty([(); 0]), - /// Logs - Log(VecDeque), - /// Block hashes - Block(VecDeque), -} - -impl FilterOutput { - /// Create an empty filter output. - pub const fn empty() -> Self { - Self::Empty([]) - } - - /// True if this is an empty filter output. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// The length of this filter output. - pub fn len(&self) -> usize { - match self { - Self::Empty(_) => 0, - Self::Log(logs) => logs.len(), - Self::Block(blocks) => blocks.len(), - } - } - - /// Extend this filter output with another. - /// - /// # Panics - /// - /// If the two filter outputs are of different types. - pub fn extend(&mut self, other: Self) { - match (self, other) { - // If we're a log, we can extend with other logs - (Self::Log(logs), Self::Log(other_logs)) => logs.extend(other_logs), - // If we're a block, we can extend with other blocks - (Self::Block(blocks), Self::Block(other_blocks)) => blocks.extend(other_blocks), - // Extending with empty is a noop - (_, Self::Empty(_)) => (), - // If we're empty, just take the other value - (this @ Self::Empty(_), other) => *this = other, - // This will occur when trying to mix log and block outputs - _ => panic!("attempted to mix log and block outputs"), - } - } - - /// Pop a value from the front of the filter output. - pub fn pop_front(&mut self) -> Option { - match self { - Self::Log(logs) => logs.pop_front().map(Either::Log), - Self::Block(blocks) => blocks.pop_front().map(Either::Block), - Self::Empty(_) => None, - } - } -} - -impl From> for FilterOutput { - fn from(block_hashes: Vec) -> Self { - Self::Block(block_hashes.into()) - } -} - -impl From> for FilterOutput { - fn from(logs: Vec) -> Self { - Self::Log(logs.into()) - } -} - -impl FromIterator for FilterOutput { - fn from_iter>(iter: T) -> Self { - let inner: VecDeque<_> = iter.into_iter().collect(); - if inner.is_empty() { Self::empty() } else { Self::Log(inner) } - } -} - -impl FromIterator for FilterOutput { - fn from_iter>(iter: T) -> Self { - let inner: VecDeque<_> = iter.into_iter().collect(); - if inner.is_empty() { Self::empty() } else { Self::Block(inner) } - } -} +/// Output of a polled filter: log entries or block hashes. +pub(crate) type FilterOutput = EventBuffer; /// An active filter. /// -/// This struct records -/// - the filter details -/// - the [`Instant`] at which the filter was last polled -/// - the first block whose contents should be considered by the filter -/// -/// These are updated via the [`Self::mark_polled`] method. +/// Records the filter details, the [`Instant`] at which the filter was last +/// polled, and the first block whose contents should be considered. #[derive(Debug, Clone, PartialEq, Eq)] pub(crate) struct ActiveFilter { next_start_block: u64, @@ -147,11 +44,6 @@ impl core::fmt::Display for ActiveFilter { } impl ActiveFilter { - /// True if this is a log filter. - pub(crate) const fn is_filter(&self) -> bool { - self.kind.is_filter() - } - /// True if this is a block filter. pub(crate) const fn is_block(&self) -> bool { self.kind.is_block() @@ -168,7 +60,7 @@ impl ActiveFilter { self.last_poll_time = Instant::now(); } - /// Get the last block for which the filter was polled. + /// Get the next start block for the filter. pub(crate) const fn next_start_block(&self) -> u64 { self.next_start_block } @@ -177,6 +69,11 @@ impl ActiveFilter { pub(crate) fn time_since_last_poll(&self) -> Duration { self.last_poll_time.elapsed() } + + /// Return an empty output of the same kind as this filter. + pub(crate) const fn empty_output(&self) -> FilterOutput { + self.kind.empty_output() + } } /// Inner logic for [`FilterManager`]. @@ -206,7 +103,6 @@ impl FilterManagerInner { fn install(&self, current_block: u64, kind: InterestKind) -> FilterId { let id = self.next_id(); let next_start_block = current_block + 1; - // discard the result, as we'll not reuse ever. let _ = self .filters .insert(id, ActiveFilter { next_start_block, last_poll_time: Instant::now(), kind }); @@ -313,7 +209,7 @@ impl FilterCleanTask { // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: -//. +// // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // diff --git a/crates/rpc/src/interest/kind.rs b/crates/rpc/src/interest/kind.rs index 3d8ecf7..f883e5a 100644 --- a/crates/rpc/src/interest/kind.rs +++ b/crates/rpc/src/interest/kind.rs @@ -1,106 +1,86 @@ -use crate::interest::{filters::FilterOutput, subs::SubscriptionBuffer}; -use alloy::{ - consensus::BlockHeader, - rpc::types::{Filter, Header, Log}, -}; -use reth::{providers::CanonStateNotification, rpc::types::FilteredParams}; +//! Filter kinds for subscriptions and polling filters. + +use crate::interest::{NewBlockNotification, filters::FilterOutput, subs::SubscriptionBuffer}; +use alloy::rpc::types::{Filter, Header, Log}; use std::collections::VecDeque; /// The different kinds of filters that can be created. /// /// Pending tx filters are not supported by Signet. #[derive(Debug, Clone, PartialEq, Eq)] -pub enum InterestKind { +pub(crate) enum InterestKind { + /// Log filter with a user-supplied [`Filter`]. Log(Box), + /// New-block filter. Block, } impl InterestKind { - /// True if this is a log filter. - pub const fn is_filter(&self) -> bool { - matches!(self, Self::Log(_)) - } - /// True if this is a block filter. - pub const fn is_block(&self) -> bool { + pub(crate) const fn is_block(&self) -> bool { matches!(self, Self::Block) } /// Fallible cast to a filter. - pub const fn as_filter(&self) -> Option<&Filter> { + pub(crate) const fn as_filter(&self) -> Option<&Filter> { match self { Self::Log(f) => Some(f), _ => None, } } - fn apply_block(&self, notif: &CanonStateNotification) -> SubscriptionBuffer { - notif - .committed() - .blocks_iter() - .map(|b| Header { - hash: b.hash(), - inner: b.clone_header(), - total_difficulty: None, - size: None, - }) - .collect() + fn apply_block(notif: &NewBlockNotification) -> SubscriptionBuffer { + let header = Header { + hash: notif.header.hash_slow(), + inner: notif.header.clone(), + total_difficulty: None, + size: None, + }; + SubscriptionBuffer::Block(VecDeque::from([header])) } - fn apply_filter(&self, notif: &CanonStateNotification) -> SubscriptionBuffer { - // NB: borrowing OUTSIDE the top-level closure prevents this value from - // being moved into the closure, which would result in the inner - // closures violating borrowing rules. + fn apply_filter(&self, notif: &NewBlockNotification) -> SubscriptionBuffer { let filter = self.as_filter().unwrap(); + let block_hash = notif.header.hash_slow(); + let block_number = notif.header.number; + let block_timestamp = notif.header.timestamp; - let address_filter = FilteredParams::address_filter(&filter.address); - let topics_filter = FilteredParams::topics_filter(&filter.topics); - - notif - .committed() - .blocks_and_receipts() - .filter(|(block, _)| { - let bloom = block.header().logs_bloom(); - FilteredParams::matches_address(bloom, &address_filter) - && FilteredParams::matches_topics(bloom, &topics_filter) + let logs: VecDeque = notif + .receipts + .iter() + .enumerate() + .flat_map(|(tx_idx, receipt)| { + let tx_hash = *notif.transactions[tx_idx].tx_hash(); + receipt.inner.logs.iter().map(move |log| (tx_idx, tx_hash, log)) }) - .flat_map(move |(block, receipts)| { - let block_num_hash = block.num_hash(); - - receipts.iter().enumerate().flat_map(move |(transaction_index, receipt)| { - let transaction_hash = *block.body().transactions[transaction_index].hash(); - - receipt.logs.iter().enumerate().filter_map(move |(log_index, log)| { - if filter.matches(log) { - Some(Log { - inner: log.clone(), - block_hash: Some(block_num_hash.hash), - block_number: Some(block_num_hash.number), - block_timestamp: Some(block.header().timestamp()), - transaction_hash: Some(transaction_hash), - transaction_index: Some(transaction_index as u64), - log_index: Some(log_index as u64), - removed: false, - }) - } else { - None - } - }) - }) + .enumerate() + .filter(|(_, (_, _, log))| filter.matches(log)) + .map(|(log_idx, (tx_idx, tx_hash, log))| Log { + inner: log.clone(), + block_hash: Some(block_hash), + block_number: Some(block_number), + block_timestamp: Some(block_timestamp), + transaction_hash: Some(tx_hash), + transaction_index: Some(tx_idx as u64), + log_index: Some(log_idx as u64), + removed: false, }) - .collect() + .collect(); + + SubscriptionBuffer::Log(logs) } - /// Apply the filter to a [`CanonStateNotification`] - pub fn filter_notification_for_sub( + /// Apply the filter to a [`NewBlockNotification`], producing a + /// subscription buffer. + pub(crate) fn filter_notification_for_sub( &self, - notif: &CanonStateNotification, + notif: &NewBlockNotification, ) -> SubscriptionBuffer { - if self.is_block() { self.apply_block(notif) } else { self.apply_filter(notif) } + if self.is_block() { Self::apply_block(notif) } else { self.apply_filter(notif) } } /// Return an empty output of the same kind as this filter. - pub const fn empty_output(&self) -> FilterOutput { + pub(crate) const fn empty_output(&self) -> FilterOutput { match self { Self::Log(_) => FilterOutput::Log(VecDeque::new()), Self::Block => FilterOutput::Block(VecDeque::new()), @@ -108,7 +88,7 @@ impl InterestKind { } /// Return an empty subscription buffer of the same kind as this filter. - pub const fn empty_sub_buffer(&self) -> SubscriptionBuffer { + pub(crate) const fn empty_sub_buffer(&self) -> SubscriptionBuffer { match self { Self::Log(_) => SubscriptionBuffer::Log(VecDeque::new()), Self::Block => SubscriptionBuffer::Block(VecDeque::new()), diff --git a/crates/rpc/src/interest/mod.rs b/crates/rpc/src/interest/mod.rs index f33044b..644d64b 100644 --- a/crates/rpc/src/interest/mod.rs +++ b/crates/rpc/src/interest/mod.rs @@ -1,8 +1,58 @@ -mod filters; -pub(crate) use filters::{ActiveFilter, FilterManager, FilterOutput}; +//! Filter and subscription management for storage-backed RPC. +//! +//! This module implements two managers that track client-registered +//! interests in chain events: +//! +//! - **[`FilterManager`]** — manages poll-based filters created via +//! `eth_newFilter` and `eth_newBlockFilter`. Clients poll with +//! `eth_getFilterChanges` to retrieve accumulated results. +//! +//! - **[`SubscriptionManager`]** — manages push-based subscriptions +//! created via `eth_subscribe`. Matching events are forwarded to +//! the client over the notification channel (WebSocket / SSE). +//! +//! # Architecture +//! +//! Both managers wrap a shared `Arc` containing a [`DashMap`] +//! that maps client-assigned IDs to their active state. This makes +//! both types cheaply clonable — cloning just increments an `Arc` +//! reference count. +//! +//! # Resource lifecycle +//! +//! Each manager spawns a **background OS thread** that periodically +//! cleans up stale entries. The cleanup threads hold a [`Weak`] +//! reference to the `Arc`, so they self-terminate once all +//! strong references are dropped. +//! +//! OS threads are used (rather than tokio tasks) because +//! [`DashMap::retain`] can deadlock if called from an async context +//! that also holds a `DashMap` read guard on the same shard. Running +//! cleanup on a dedicated OS thread ensures the retain lock is never +//! contended with an in-flight async handler. +//! +//! [`Weak`]: std::sync::Weak +//! [`DashMap`]: dashmap::DashMap +//! [`DashMap::retain`]: dashmap::DashMap::retain +mod buffer; +mod filters; +pub(crate) use filters::{FilterManager, FilterOutput}; mod kind; pub(crate) use kind::InterestKind; - mod subs; pub(crate) use subs::SubscriptionManager; + +/// Notification sent when a new block is available in storage. +/// +/// The caller constructs and sends these via a +/// [`tokio::sync::broadcast::Sender`]. +#[derive(Debug, Clone)] +pub struct NewBlockNotification { + /// The block header. + pub header: alloy::consensus::Header, + /// Transactions in the block. + pub transactions: Vec, + /// Receipts for the block. + pub receipts: Vec, +} diff --git a/crates/rpc/src/interest/subs.rs b/crates/rpc/src/interest/subs.rs index 36583d9..9c1bada 100644 --- a/crates/rpc/src/interest/subs.rs +++ b/crates/rpc/src/interest/subs.rs @@ -1,15 +1,13 @@ -use crate::interest::InterestKind; -use ajj::{HandlerCtx, serde_json}; -use alloy::{primitives::U64, rpc::types::Log}; -use dashmap::DashMap; -use reth::{ - providers::{CanonStateNotifications, CanonStateSubscriptions, providers::BlockchainProvider}, - rpc::types::Header, +//! Subscription management for `eth_subscribe` / `eth_unsubscribe`. + +use crate::interest::{ + InterestKind, NewBlockNotification, + buffer::{EventBuffer, EventItem}, }; -use signet_node_types::Pnt; +use ajj::HandlerCtx; +use alloy::primitives::U64; +use dashmap::DashMap; use std::{ - cmp::min, - collections::VecDeque, future::pending, sync::{ Arc, Weak, @@ -17,85 +15,41 @@ use std::{ }, time::Duration, }; -use tokio::sync::broadcast::error::RecvError; +use tokio::sync::broadcast::{self, error::RecvError}; use tokio_util::sync::{CancellationToken, WaitForCancellationFutureOwned}; use tracing::{Instrument, debug, debug_span, enabled, trace}; -/// Either type for subscription outputs. -#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize)] -#[serde(untagged)] -pub enum Either { - Log(Box), - Block(Box
), -} - -/// Buffer for subscription outputs. -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum SubscriptionBuffer { - Log(VecDeque), - Block(VecDeque
), -} - -impl SubscriptionBuffer { - /// True if the buffer is empty. - pub fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Get the number of items in the buffer. - pub fn len(&self) -> usize { - match self { - Self::Log(buf) => buf.len(), - Self::Block(buf) => buf.len(), - } - } - - /// Extend this buffer with another buffer. - /// - /// # Panics - /// - /// Panics if the buffers are of different types. - pub fn extend(&mut self, other: Self) { - match (self, other) { - (Self::Log(buf), Self::Log(other)) => buf.extend(other), - (Self::Block(buf), Self::Block(other)) => buf.extend(other), - _ => panic!("mismatched buffer types"), - } - } - - /// Pop the front of the buffer. - pub fn pop_front(&mut self) -> Option { - match self { - Self::Log(buf) => buf.pop_front().map(|log| Either::Log(Box::new(log))), - Self::Block(buf) => buf.pop_front().map(|header| Either::Block(Box::new(header))), - } - } -} - -impl From> for SubscriptionBuffer { - fn from(logs: Vec) -> Self { - Self::Log(logs.into()) - } -} +/// Buffer for subscription outputs: log entries or block headers. +pub(crate) type SubscriptionBuffer = EventBuffer; -impl FromIterator for SubscriptionBuffer { - fn from_iter>(iter: T) -> Self { - let inner: VecDeque<_> = iter.into_iter().collect(); - Self::Log(inner) - } +/// JSON-RPC subscription notification envelope. +/// +/// The `jsonrpc` and `method` fields are always `"2.0"` and +/// `"eth_subscription"` respectively, so they are hardcoded in the +/// [`serde::Serialize`] impl rather than stored as struct fields. +struct SubscriptionNotification<'a> { + params: SubscriptionParams<'a>, } -impl From> for SubscriptionBuffer { - fn from(headers: Vec
) -> Self { - Self::Block(headers.into()) +impl serde::Serialize for SubscriptionNotification<'_> { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut s = serializer.serialize_struct("SubscriptionNotification", 3)?; + s.serialize_field("jsonrpc", "2.0")?; + s.serialize_field("method", "eth_subscription")?; + s.serialize_field("params", &self.params)?; + s.end() } } -impl FromIterator
for SubscriptionBuffer { - fn from_iter>(iter: T) -> Self { - let inner: VecDeque<_> = iter.into_iter().collect(); - Self::Block(inner) - } +/// Params field of a subscription notification. +#[derive(serde::Serialize)] +struct SubscriptionParams<'a> { + result: &'a EventItem, + subscription: U64, } /// Tracks ongoing subscription tasks. @@ -109,30 +63,33 @@ impl FromIterator
for SubscriptionBuffer { /// This task runs on a separate thread to avoid [`DashMap::retain`] deadlock. /// See [`DashMap`] documentation for more information. #[derive(Clone)] -pub struct SubscriptionManager { - inner: Arc>, +pub(crate) struct SubscriptionManager { + inner: Arc, } -impl SubscriptionManager { +impl SubscriptionManager { /// Instantiate a new subscription manager, start a task to clean up - /// subscriptions cancelled by user disconnection - pub fn new(provider: BlockchainProvider, clean_interval: Duration) -> Self { - let inner = Arc::new(SubscriptionManagerInner::new(provider)); + /// subscriptions cancelled by user disconnection. + pub(crate) fn new( + notif_sender: broadcast::Sender, + clean_interval: Duration, + ) -> Self { + let inner = Arc::new(SubscriptionManagerInner::new(notif_sender)); let task = SubCleanerTask::new(Arc::downgrade(&inner), clean_interval); task.spawn(); Self { inner } } } -impl core::ops::Deref for SubscriptionManager { - type Target = SubscriptionManagerInner; +impl core::ops::Deref for SubscriptionManager { + type Target = SubscriptionManagerInner; fn deref(&self) -> &Self::Target { &self.inner } } -impl core::fmt::Debug for SubscriptionManager { +impl core::fmt::Debug for SubscriptionManager { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("SubscriptionManager").finish_non_exhaustive() } @@ -140,19 +97,16 @@ impl core::fmt::Debug for SubscriptionManager { /// Inner logic for [`SubscriptionManager`]. #[derive(Debug)] -pub struct SubscriptionManagerInner -where - N: Pnt, -{ +pub(crate) struct SubscriptionManagerInner { next_id: AtomicU64, tasks: DashMap, - provider: BlockchainProvider, + notif_sender: broadcast::Sender, } -impl SubscriptionManagerInner { +impl SubscriptionManagerInner { /// Create a new subscription manager. - pub fn new(provider: BlockchainProvider) -> Self { - Self { next_id: AtomicU64::new(1), tasks: DashMap::new(), provider } + fn new(notif_sender: broadcast::Sender) -> Self { + Self { next_id: AtomicU64::new(1), tasks: DashMap::new(), notif_sender } } /// Assign a new subscription ID. @@ -161,7 +115,7 @@ impl SubscriptionManagerInner { } /// Cancel a subscription task. - pub fn unsubscribe(&self, id: U64) -> bool { + pub(crate) fn unsubscribe(&self, id: U64) -> bool { if let Some(task) = self.tasks.remove(&id) { task.1.cancel(); true @@ -172,18 +126,19 @@ impl SubscriptionManagerInner { /// Subscribe to notifications. Returns `None` if notifications are /// disabled. - pub fn subscribe(&self, ajj_ctx: &HandlerCtx, filter: InterestKind) -> Option { + pub(crate) fn subscribe(&self, ajj_ctx: &HandlerCtx, filter: InterestKind) -> Option { if !ajj_ctx.notifications_enabled() { return None; } let id = self.next_id(); let token = CancellationToken::new(); + self.tasks.insert(id, token.clone()); let task = SubscriptionTask { id, filter, token: token.clone(), - notifs: self.provider.subscribe_to_canonical_state(), + notifs: self.notif_sender.subscribe(), }; task.spawn(ajj_ctx); @@ -199,21 +154,18 @@ struct SubscriptionTask { id: U64, filter: InterestKind, token: CancellationToken, - notifs: CanonStateNotifications, + notifs: broadcast::Receiver, } impl SubscriptionTask { /// Create the task future. - pub(crate) async fn task_future( - self, - ajj_ctx: HandlerCtx, - ajj_cancel: WaitForCancellationFutureOwned, - ) { + async fn task_future(self, ajj_ctx: HandlerCtx, ajj_cancel: WaitForCancellationFutureOwned) { let SubscriptionTask { id, filter, token, mut notifs } = self; - let Some(sender) = ajj_ctx.notifications() else { return }; + if !ajj_ctx.notifications_enabled() { + return; + } - // Buffer for notifications to be sent to the client let mut notif_buffer = filter.empty_sub_buffer(); tokio::pin!(ajj_cancel); @@ -223,99 +175,65 @@ impl SubscriptionTask { span.record("filter", format!("{filter:?}")); } + // NB: reserve half the capacity to avoid blocking other + // usage. This is a heuristic and can be adjusted as needed. let guard = span.enter(); - // This future checks if the notification buffer is non-empty and - // waits for the sender to have some capacity before sending. let permit_fut = async { if !notif_buffer.is_empty() { - // NB: we reserve half the capacity to avoid blocking other - // usage. This is a heuristic and can be adjusted as needed. - sender.reserve_many(min(sender.max_capacity() / 2, notif_buffer.len())).await + ajj_ctx + .permit_many((ajj_ctx.notification_capacity() / 2).min(notif_buffer.len())) + .await } else { - // If the notification buffer is empty, just never return pending().await } } .in_current_span(); drop(guard); - // NB: this select is biased, this ensures that the outbound - // buffer is either drained, or blocked on permits before checking - // the inbound buffer + // NB: biased select ensures we check cancellation before + // processing new notifications. + let _guard = span.enter(); tokio::select! { biased; _ = &mut ajj_cancel => { - let _guard = span.enter(); - // if AJJ cancelled us via client disconnect, we should - // cancel the token so that we can be reaped by the - // subscription cleaner task. trace!("subscription cancelled by client disconnect"); token.cancel(); break; } _ = token.cancelled() => { - // If the token is cancelled, this subscription has been - // cancelled by eth_unsubscribe - let _guard = span.enter(); trace!("subscription cancelled by user"); break; } permits = permit_fut => { - let _guard = span.enter(); - // channel closed - let Ok(permits) = permits else { + let Some(permits) = permits else { trace!("channel to client closed"); break }; for permit in permits { - // Send notification to the client for each permit. - let Some(item) = notif_buffer.pop_front() else { - // if we run out of notifications, we should break - // This would be weird, as we only allocated - // permits for notifications we had. Let's handle it anyway. - break; - }; - let notification = ajj::serde_json::json!{ - { - "jsonrpc": "2.0", - "method": "eth_subscription", - "params": { - "result": &item, - "subscription": id - }, - } - }; - // Serialize and send. - let Ok(brv) = serde_json::value::to_raw_value(¬ification) else { - trace!(?item, "failed to serialize notification"); - continue + let Some(item) = notif_buffer.pop_front() else { break }; + let notification = SubscriptionNotification { + params: SubscriptionParams { result: &item, subscription: id }, }; - permit.send(brv); + let _ = permit.send(¬ification); } } notif_res = notifs.recv() => { - let _guard = span.enter(); - let notif = match notif_res { Ok(notif) => notif, Err(RecvError::Lagged(skipped)) => { trace!(skipped, "missed notifications"); continue; - }, - Err(e) =>{ - trace!(?e, "CanonStateNotifications stream closed"); + } + Err(e) => { + trace!(?e, "notification stream closed"); break; } }; let output = filter.filter_notification_for_sub(¬if); - trace!(count = output.len(), "Filter applied to notification"); if !output.is_empty() { - // NB: this will panic if the filter type changes - // mid-task. But that should never happen as it would - // break API guarantees anyway notif_buffer.extend(output); } } @@ -324,7 +242,7 @@ impl SubscriptionTask { } /// Spawn on the ajj [`HandlerCtx`]. - pub(crate) fn spawn(self, ctx: &HandlerCtx) { + fn spawn(self, ctx: &HandlerCtx) { ctx.spawn_graceful_with_ctx(|ctx, ajj_cancel| self.task_future(ctx, ajj_cancel)); } } @@ -333,29 +251,27 @@ impl SubscriptionTask { /// /// This task runs on a separate thread to avoid [`DashMap::retain`] deadlocks. #[derive(Debug)] -pub(super) struct SubCleanerTask { - inner: Weak>, - interval: std::time::Duration, +struct SubCleanerTask { + inner: Weak, + interval: Duration, } -impl SubCleanerTask { +impl SubCleanerTask { /// Create a new subscription cleaner task. - pub(super) const fn new( - inner: Weak>, - interval: std::time::Duration, - ) -> Self { + const fn new(inner: Weak, interval: Duration) -> Self { Self { inner, interval } } /// Run the task. This task runs on a separate thread, which ensures that /// [`DashMap::retain`]'s deadlock condition is not met. See [`DashMap`] /// documentation for more information. - pub(super) fn spawn(self) { + fn spawn(self) { std::thread::spawn(move || { loop { std::thread::sleep(self.interval); - if let Some(inner) = self.inner.upgrade() { - inner.tasks.retain(|_, task| !task.is_cancelled()); + match self.inner.upgrade() { + Some(inner) => inner.tasks.retain(|_, task| !task.is_cancelled()), + None => break, } } }); diff --git a/crates/rpc/src/lib.rs b/crates/rpc/src/lib.rs index 0a5ff8a..3214b8d 100644 --- a/crates/rpc/src/lib.rs +++ b/crates/rpc/src/lib.rs @@ -1,42 +1,4 @@ -//! Signet RPC. -//! -//! This crate provides RPC endpoint definitions for the Signet node, as well -//! as the glue between the node and the RPC server. This RPC server is deeply -//! integrated with `reth`, and expects a variety of `reth`-specific types to be -//! passed in. As such, it is mostly useful within the context of a `signet` -//! node. -//! -//! ## Usage Example -//! -//! ```rust -//! # use signet_rpc::{RpcCtx}; -//! # use signet_node_types::Pnt; -//! # use reth_node_api::FullNodeComponents; -//! # use reth::tasks::TaskExecutor; -//! use signet_rpc::{router, ServeConfig}; -//! -//! # pub async fn f(ctx: RpcCtx, tasks: &TaskExecutor) -> eyre::Result<()> -//! # where -//! # Host: FullNodeComponents, -//! # Signet: Pnt, -//! # { -//! let router = signet_rpc::router().with_state(ctx); -//! -//! let cfg = ServeConfig { -//! http: vec!["localhost:8080".parse()?], -//! http_cors: None, -//! ws: vec![], -//! ws_cors: None, -//! ipc: None, -//! }; -//! -//! // Spawn the server on the given addresses, the shutdown guard -//! // will shutdown the server(s) when dropped. -//! let shutdown_guard = cfg.serve(tasks, router).await?; -//! # Ok(()) -//! # } -//! ``` - +#![doc = include_str!("../README.md")] #![warn( missing_copy_implementations, missing_debug_implementations, @@ -49,58 +11,30 @@ #![deny(unused_must_use, rust_2018_idioms)] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -mod config; - -pub use config::{RpcServerGuard, ServeConfig}; - -mod ctx; -pub use ctx::{LoadState, RpcCtx, SignetCtx}; - -mod debug; -pub use debug::{DebugError, debug}; +pub(crate) mod config; +pub use config::{BlockTags, ChainNotifier, StorageRpcConfig, StorageRpcCtx, SyncStatus}; mod eth; -pub use eth::{CallErrorData, EthError, eth}; - -mod signet; -pub use signet::{error::SignetError, signet}; - -mod inspect; -pub use inspect::inspect; +pub use eth::EthError; mod interest; +pub use interest::NewBlockNotification; -pub mod receipts; - -/// Utils and simple serve functions. -pub mod utils; - -/// Re-exported for convenience -pub use ::ajj; +mod debug; +pub use debug::DebugError; -use ajj::Router; -use reth::providers::{ProviderFactory, providers::ProviderNodeTypes}; -use reth_db::mdbx::DatabaseEnv; -use reth_node_api::FullNodeComponents; -use signet_node_types::Pnt; -use std::sync::Arc; +mod signet; +pub use signet::error::SignetError; -/// Create a new router with the given host and signet types. -pub fn router() -> Router> +/// Instantiate a combined router with `eth`, `debug`, and `signet` +/// namespaces. +pub fn router() -> ajj::Router> where - Host: FullNodeComponents, - Signet: Pnt, + H: signet_hot::HotKv + Send + Sync + 'static, + ::Error: trevm::revm::database::DBErrorMarker, { ajj::Router::new() - .nest("eth", eth::()) - .nest("signet", signet::()) - .nest("debug", debug::()) -} - -/// Create a new hazmat router that exposes the `inspect` API. -pub fn hazmat_router() -> Router> -where - Signet: Pnt + ProviderNodeTypes>, -{ - ajj::Router::new().nest("inspect", inspect::inspect::()) + .nest("eth", eth::eth()) + .nest("debug", debug::debug()) + .nest("signet", signet::signet()) } diff --git a/crates/rpc/src/receipts.rs b/crates/rpc/src/receipts.rs deleted file mode 100644 index 6e45e95..0000000 --- a/crates/rpc/src/receipts.rs +++ /dev/null @@ -1,134 +0,0 @@ -//! Signet RPC receipt response builder. - -use alloy::{ - consensus::{ReceiptEnvelope, Transaction, TxReceipt, transaction::TransactionMeta}, - primitives::{Address, TxKind}, - rpc::types::eth::{Log, ReceiptWithBloom, TransactionReceipt}, -}; -use reth::primitives::{Receipt, TransactionSigned}; -use reth::rpc::server_types::eth::{EthApiError, EthResult}; -use reth::{core::primitives::SignerRecoverable, primitives::TxType}; -use signet_types::MagicSig; - -/// Builds an [`TransactionReceipt`] obtaining the inner receipt envelope from the given closure. -pub fn build_signet_receipt( - transaction: TransactionSigned, - meta: TransactionMeta, - receipt: Receipt, - all_receipts: Vec, -) -> EthResult>> { - // Recover the transaction sender. - // Some transactions are emitted by Signet itself in behalf of the sender, - // in which case they'll use [`MagicSig`]s to preserve the sender with additional metadata. - // Therefore, in case recovering the signer fails, we try to parse the signature as a magic signature. - let from = MagicSig::try_from_signature(transaction.signature()) - .map(|magic_sig| magic_sig.rollup_sender()) - .or_else(|| transaction.recover_signer_unchecked().ok()) - .ok_or_else(|| EthApiError::InvalidTransactionSignature)?; - - // get the previous transaction cumulative gas used - let prev_cumulative = meta - .index - .checked_sub(1) - .and_then(|i| all_receipts.get(i as usize)) - .map(|r| r.cumulative_gas_used()) - .unwrap_or_default(); - - let gas_used = receipt.cumulative_gas_used() - prev_cumulative; - - let logs_bloom = receipt.bloom(); - let receipt_status = receipt.status_or_post_state(); - let receipt_cumulative_gas_used = receipt.cumulative_gas_used(); - - // get number of logs in the block - let num_logs: u64 = - all_receipts.iter().take(meta.index as usize).map(|r| r.logs().len() as u64).sum(); - - // Retrieve all corresponding logs for the receipt. - let logs: Vec = receipt - .logs - .into_iter() - .enumerate() - .map(|(tx_log_idx, log)| Log { - inner: log, - block_hash: Some(meta.block_hash), - block_number: Some(meta.block_number), - block_timestamp: Some(meta.timestamp), - transaction_hash: Some(meta.tx_hash), - transaction_index: Some(meta.index), - log_index: Some(num_logs + tx_log_idx as u64), - removed: false, - }) - .collect(); - - let rpc_receipt = alloy::rpc::types::eth::Receipt { - status: receipt_status, - cumulative_gas_used: receipt_cumulative_gas_used, - logs, - }; - - let (contract_address, to) = match transaction.kind() { - TxKind::Create => (Some(from.create(transaction.nonce())), None), - TxKind::Call(addr) => (None, Some(Address(*addr))), - }; - - Ok(TransactionReceipt { - inner: build_envelope( - ReceiptWithBloom { receipt: rpc_receipt, logs_bloom }, - transaction.tx_type(), - ), - transaction_hash: meta.tx_hash, - transaction_index: Some(meta.index), - block_hash: Some(meta.block_hash), - block_number: Some(meta.block_number), - from, - to, - gas_used, - contract_address, - effective_gas_price: transaction.effective_gas_price(meta.base_fee), - // Signet does not support EIP-4844, so these fields are always None. - blob_gas_price: None, - blob_gas_used: None, - }) -} - -fn build_envelope( - receipt_with_bloom: ReceiptWithBloom>, - tx_type: TxType, -) -> ReceiptEnvelope { - match tx_type { - TxType::Legacy => ReceiptEnvelope::Legacy(receipt_with_bloom), - TxType::Eip2930 => ReceiptEnvelope::Eip2930(receipt_with_bloom), - TxType::Eip1559 => ReceiptEnvelope::Eip1559(receipt_with_bloom), - TxType::Eip4844 => ReceiptEnvelope::Eip4844(receipt_with_bloom), - TxType::Eip7702 => ReceiptEnvelope::Eip7702(receipt_with_bloom), - #[allow(unreachable_patterns)] - _ => unreachable!(), - } -} - -// Some code in this file has been copied and modified from reth -// -// The original license is included below: -// -// The MIT License (MIT) -// -// Copyright (c) 2022-2025 Reth Contributors -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -//. -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. diff --git a/crates/rpc/src/signet/endpoints.rs b/crates/rpc/src/signet/endpoints.rs index 4abda48..b82280d 100644 --- a/crates/rpc/src/signet/endpoints.rs +++ b/crates/rpc/src/signet/endpoints.rs @@ -1,75 +1,91 @@ +//! Signet namespace RPC endpoint implementations. + use crate::{ - ctx::RpcCtx, + config::{EvmBlockContext, StorageRpcCtx}, + eth::helpers::{CfgFiller, await_handler, response_tri}, signet::error::SignetError, - utils::{await_handler, response_tri}, }; use ajj::{HandlerCtx, ResponsePayload}; -use reth_node_api::FullNodeComponents; +use alloy::eips::BlockId; use signet_bundle::{SignetBundleDriver, SignetCallBundle, SignetCallBundleResponse}; -use signet_node_types::Pnt; +use signet_hot::{HotKv, model::HotKvRead}; use signet_types::SignedOrder; use std::time::Duration; use tokio::select; +use trevm::revm::database::DBErrorMarker; -pub(super) async fn send_order( +/// `signet_sendOrder` handler. +/// +/// Forwards the order to the transaction cache asynchronously. The +/// response is returned immediately — forwarding errors are logged +/// but not propagated to the caller (fire-and-forget). +pub(super) async fn send_order( hctx: HandlerCtx, order: SignedOrder, - ctx: RpcCtx, -) -> Result<(), String> + ctx: StorageRpcCtx, +) -> ResponsePayload<(), SignetError> where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let task = |hctx: HandlerCtx| async move { - let Some(tx_cache) = ctx.signet().tx_cache() else { - return Err(SignetError::TxCacheUrlNotProvided.into_string()); - }; - - hctx.spawn(async move { tx_cache.forward_order(order).await.map_err(|e| e.to_string()) }); + let Some(tx_cache) = ctx.tx_cache().cloned() else { + return ResponsePayload(Err(SignetError::TxCacheNotProvided.into())); + }; - Ok(()) + let task = |hctx: HandlerCtx| async move { + hctx.spawn(async move { + if let Err(e) = tx_cache.forward_order(order).await { + tracing::warn!(error = %e, "failed to forward order"); + } + }); + ResponsePayload(Ok(())) }; - await_handler!(@option hctx.spawn_blocking_with_ctx(task)) + await_handler!(@response_option hctx.spawn_blocking_with_ctx(task)) } -pub(super) async fn call_bundle( +/// `signet_callBundle` handler. +pub(super) async fn call_bundle( hctx: HandlerCtx, bundle: SignetCallBundle, - ctx: RpcCtx, -) -> ResponsePayload + ctx: StorageRpcCtx, +) -> ResponsePayload where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - let timeout = bundle.bundle.timeout.unwrap_or(1000); + let timeout = bundle.bundle.timeout.unwrap_or(ctx.config().default_bundle_timeout_ms); let task = async move { let id = bundle.state_block_number(); - let block_cfg = match ctx.signet().block_cfg(id.into()).await { - Ok(block_cfg) => block_cfg, - Err(e) => { - return ResponsePayload::internal_error_with_message_and_obj( - "error while loading block cfg".into(), - e.to_string(), - ); - } - }; + let block_id: BlockId = id.into(); + + let EvmBlockContext { header, db } = + response_tri!(ctx.resolve_evm_block(block_id).map_err(|e| { + tracing::warn!(error = %e, ?block_id, "block resolution failed for bundle"); + SignetError::Resolve(e.to_string()) + })); let mut driver = SignetBundleDriver::from(&bundle); - let trevm = response_tri!(ctx.trevm(id.into(), &block_cfg)); + let trevm = signet_evm::signet_evm(db, ctx.constants().clone()) + .fill_cfg(&CfgFiller(ctx.chain_id())) + .fill_block(&header); - response_tri!(trevm.drive_bundle(&mut driver).map_err(|e| e.into_error())); + response_tri!(trevm.drive_bundle(&mut driver).map_err(|e| { + let e = e.into_error(); + tracing::warn!(error = %e, "evm error during bundle simulation"); + SignetError::Evm(e.to_string()) + })); - ResponsePayload::Success(driver.into_response()) + ResponsePayload(Ok(driver.into_response())) }; let task = async move { select! { _ = tokio::time::sleep(Duration::from_millis(timeout)) => { ResponsePayload::internal_error_message( - "timeout during bundle simulation".into(), + SignetError::Timeout.to_string().into(), ) } result = task => { @@ -78,5 +94,5 @@ where } }; - await_handler!(@response_option hctx.spawn_blocking(task)) + await_handler!(@response_option hctx.spawn(task)) } diff --git a/crates/rpc/src/signet/error.rs b/crates/rpc/src/signet/error.rs index 2225c1d..83570ab 100644 --- a/crates/rpc/src/signet/error.rs +++ b/crates/rpc/src/signet/error.rs @@ -1,23 +1,27 @@ -//! Signet RPC errors. +//! Error types for the signet namespace. -use reth::rpc::server_types::eth::EthApiError; - -/// Errors that can occur when interacting with the `signet` namespace. -#[derive(Debug, thiserror::Error)] +/// Errors that can occur in the `signet` namespace. +#[derive(Debug, Clone, thiserror::Error)] pub enum SignetError { - /// The transaction cache URL was not provided. - #[error("transaction cache URL not provided")] - TxCacheUrlNotProvided, - /// An error coming from interacting with components - /// that could emit `EthApiError`s, such as the tx-cache. - #[error(transparent)] - EthApiError(#[from] EthApiError), + /// The transaction cache was not provided. + #[error("transaction cache not provided")] + TxCacheNotProvided, + /// Block resolution failed. + #[error("block resolution error")] + Resolve(String), + /// EVM execution error. + #[error("evm execution error")] + Evm(String), + /// Bundle simulation timed out. + #[error("timeout during bundle simulation")] + Timeout, } -impl SignetError { - /// Turn into a string by value, allows for `.map_err(SignetError::to_string)` - /// to be used. - pub fn into_string(self) -> String { - ToString::to_string(&self) +impl serde::Serialize for SignetError { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(&self.to_string()) } } diff --git a/crates/rpc/src/signet/mod.rs b/crates/rpc/src/signet/mod.rs index c882000..ad3fdd7 100644 --- a/crates/rpc/src/signet/mod.rs +++ b/crates/rpc/src/signet/mod.rs @@ -1,19 +1,18 @@ //! Signet RPC methods and related code. mod endpoints; -use endpoints::*; - +use endpoints::{call_bundle, send_order}; pub(crate) mod error; -use crate::ctx::RpcCtx; -use reth_node_api::FullNodeComponents; -use signet_node_types::Pnt; +use crate::config::StorageRpcCtx; +use signet_hot::{HotKv, model::HotKvRead}; +use trevm::revm::database::DBErrorMarker; -/// Instantiate a `signet` API router. -pub fn signet() -> ajj::Router> +/// Instantiate a `signet` API router backed by storage. +pub(crate) fn signet() -> ajj::Router> where - Host: FullNodeComponents, - Signet: Pnt, + H: HotKv + Send + Sync + 'static, + ::Error: DBErrorMarker, { - ajj::Router::new().route("sendOrder", send_order).route("callBundle", call_bundle) + ajj::Router::new().route("sendOrder", send_order::).route("callBundle", call_bundle::) } diff --git a/crates/rpc/tests/eth_rpc.rs b/crates/rpc/tests/eth_rpc.rs new file mode 100644 index 0000000..4c7253a --- /dev/null +++ b/crates/rpc/tests/eth_rpc.rs @@ -0,0 +1,864 @@ +//! Integration tests for the `signet-rpc` ETH RPC endpoints. +//! +//! Tests exercise the public router API via the axum service layer, using +//! in-memory storage backends (`MemKv` + `MemColdBackend`). + +use alloy::{ + consensus::{ + EthereumTxEnvelope, Header, Receipt as AlloyReceipt, SignableTransaction, Signed, TxLegacy, + TxType, transaction::Recovered, + }, + primitives::{Address, B256, Log as PrimitiveLog, LogData, TxKind, U256, address, logs_bloom}, + signers::{SignerSync, local::PrivateKeySigner}, +}; +use axum::body::Body; +use http::Request; +use serde_json::{Value, json}; +use signet_cold::{BlockData, ColdStorageHandle, ColdStorageTask, mem::MemColdBackend}; +use signet_constants::SignetSystemConstants; +use signet_hot::{HotKv, db::UnsafeDbWrite, mem::MemKv}; +use signet_rpc::{ChainNotifier, StorageRpcConfig, StorageRpcCtx}; +use signet_storage::UnifiedStorage; +use signet_storage_types::{Account, Receipt, SealedHeader}; +use std::sync::Arc; +use tokio_util::sync::CancellationToken; +use tower::ServiceExt; +use trevm::revm::bytecode::Bytecode; + +// --------------------------------------------------------------------------- +// Test helpers +// --------------------------------------------------------------------------- + +/// Everything needed to make RPC calls against the storage-backed router. +struct TestHarness { + app: axum::Router, + cold: ColdStorageHandle, + hot: MemKv, + chain: ChainNotifier, + #[allow(dead_code)] + ctx: StorageRpcCtx, + _cancel: CancellationToken, +} + +impl TestHarness { + /// Create a minimal harness with empty storage. + async fn new(latest: u64) -> Self { + let cancel = CancellationToken::new(); + let hot = MemKv::new(); + let cold = ColdStorageTask::spawn(MemColdBackend::new(), cancel.clone()); + let storage = UnifiedStorage::new(hot.clone(), cold.clone()); + let constants = SignetSystemConstants::test(); + let chain = ChainNotifier::new(16); + chain.tags().update_all(latest, latest.saturating_sub(2), 0); + let ctx = StorageRpcCtx::new( + Arc::new(storage), + constants, + chain.clone(), + None, + StorageRpcConfig::default(), + ); + let app = signet_rpc::router::().into_axum("/").with_state(ctx.clone()); + + Self { app, cold, hot, chain, ctx, _cancel: cancel } + } + + /// Append a block to both hot and cold storage. + /// + /// Writes the header to hot so hash→number and header lookups work, + /// then writes the full block to cold. + async fn append_block(&self, block: BlockData) { + let writer = self.hot.writer().unwrap(); + writer.put_header(&block.header).unwrap(); + writer.commit().unwrap(); + self.cold.append_block(block).await.unwrap(); + } +} + +/// Make a JSON-RPC call and return the `"result"` field. +/// +/// The `method` parameter is the fully-qualified name (e.g. +/// `"eth_blockNumber"`). The router nests each namespace under its prefix. +/// +/// Panics if the response contains an `"error"` field. +async fn rpc_call(app: &axum::Router, method: &str, params: Value) -> Value { + let resp = rpc_call_raw(app, method, params).await; + if let Some(error) = resp.get("error") { + panic!("RPC error for {method}: {error}"); + } + resp["result"].clone() +} + +/// Make a JSON-RPC call and return the full response (including any error). +async fn rpc_call_raw(app: &axum::Router, method: &str, params: Value) -> Value { + let body = json!({ + "jsonrpc": "2.0", + "id": 1, + "method": method, + "params": params, + }); + + let req = Request::builder() + .method("POST") + .uri("/") + .header("content-type", "application/json") + .body(Body::from(serde_json::to_vec(&body).unwrap())) + .unwrap(); + + let response = app.clone().oneshot(req).await.unwrap(); + let bytes = axum::body::to_bytes(response.into_body(), 1024 * 1024).await.unwrap(); + serde_json::from_slice(&bytes).unwrap() +} + +// --------------------------------------------------------------------------- +// Test data builders +// --------------------------------------------------------------------------- + +/// Test address used for account state queries. +const TEST_ADDR: Address = address!("0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"); + +/// Test log-emitting contract address. +const LOG_ADDR: Address = address!("0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"); + +/// Test log topic. +const LOG_TOPIC: B256 = B256::repeat_byte(0xcc); + +/// Create a legacy transaction signed with a deterministic key. +/// +/// Returns a [`RecoveredTx`] with the sender pre-recovered, plus the sender +/// address for use in test assertions. +fn make_signed_tx(nonce: u64) -> (signet_storage_types::RecoveredTx, Address) { + make_signed_tx_with_gas_price(nonce, 1_000_000_000) +} + +/// Create a legacy transaction with a custom gas price. +fn make_signed_tx_with_gas_price( + nonce: u64, + gas_price: u128, +) -> (signet_storage_types::RecoveredTx, Address) { + let signer = PrivateKeySigner::from_signing_key( + alloy::signers::k256::ecdsa::SigningKey::from_slice( + &B256::repeat_byte((nonce as u8).wrapping_add(1)).0, + ) + .unwrap(), + ); + let sender = signer.address(); + + let tx = TxLegacy { + nonce, + gas_price, + gas_limit: 21_000, + to: TxKind::Call(Address::ZERO), + value: U256::from(1000), + ..Default::default() + }; + + let sig_hash = tx.signature_hash(); + let sig = signer.sign_hash_sync(&sig_hash).unwrap(); + let signed: signet_storage_types::TransactionSigned = + EthereumTxEnvelope::Legacy(Signed::new_unhashed(tx, sig)); + + (Recovered::new_unchecked(signed, sender), sender) +} + +/// Build a [`BlockData`] from pre-signed transactions. +/// +/// Creates receipts with incrementing `cumulative_gas_used` and optionally +/// attaches logs to each receipt. +fn make_block( + block_num: u64, + txs: Vec, + logs_per_receipt: usize, +) -> BlockData { + let receipts: Vec = txs + .iter() + .enumerate() + .map(|(i, _)| { + let logs: Vec = (0..logs_per_receipt) + .map(|l| PrimitiveLog { + address: LOG_ADDR, + data: LogData::new_unchecked( + vec![LOG_TOPIC], + alloy::primitives::Bytes::from(vec![l as u8]), + ), + }) + .collect(); + + Receipt { + tx_type: TxType::Legacy, + inner: AlloyReceipt { + status: true.into(), + cumulative_gas_used: 21_000 * (i as u64 + 1), + logs, + }, + } + }) + .collect(); + + // Compute the logs bloom from all receipt logs so getLogs bloom check passes. + let all_logs: Vec<_> = receipts.iter().flat_map(|r| r.inner.logs.iter()).collect(); + let bloom = logs_bloom(all_logs); + + let gas_used = receipts.last().map(|r| r.inner.cumulative_gas_used).unwrap_or_default(); + + let header = Header { + number: block_num, + timestamp: 1_700_000_000 + block_num, + base_fee_per_gas: Some(1_000_000_000), + gas_limit: 30_000_000, + gas_used, + logs_bloom: bloom, + ..Default::default() + }; + + BlockData::new(SealedHeader::new(header), txs, receipts, vec![], None) +} + +// --------------------------------------------------------------------------- +// Group 1: Simple queries +// --------------------------------------------------------------------------- + +#[tokio::test] +async fn test_block_number() { + let h = TestHarness::new(42).await; + let result = rpc_call(&h.app, "eth_blockNumber", json!([])).await; + assert_eq!(result, json!("0x2a")); +} + +#[tokio::test] +async fn test_chain_id() { + let h = TestHarness::new(0).await; + let result = rpc_call(&h.app, "eth_chainId", json!([])).await; + let expected = format!("0x{:x}", SignetSystemConstants::test().ru_chain_id()); + assert_eq!(result, json!(expected)); +} + +// --------------------------------------------------------------------------- +// Group 2: Cold storage — block queries +// --------------------------------------------------------------------------- + +/// Shared setup: append a block with 2 signed transactions to both hot and +/// cold storage. +async fn setup_cold_block(h: &TestHarness) -> (Vec, Vec
) { + let (tx0, sender0) = make_signed_tx(0); + let (tx1, sender1) = make_signed_tx(1); + + let hash0 = *tx0.tx_hash(); + let hash1 = *tx1.tx_hash(); + + let block = make_block(1, vec![tx0, tx1], 1); + h.append_block(block).await; + h.chain.tags().set_latest(1); + + (vec![hash0, hash1], vec![sender0, sender1]) +} + +#[tokio::test] +async fn test_get_block_by_number_hashes() { + let h = TestHarness::new(0).await; + let (tx_hashes, _) = setup_cold_block(&h).await; + + let result = rpc_call(&h.app, "eth_getBlockByNumber", json!(["0x1", false])).await; + + assert_eq!(result["number"], json!("0x1")); + let txs = result["transactions"].as_array().unwrap(); + assert_eq!(txs.len(), 2); + // When full=false, transactions are hashes (strings) + assert!(txs[0].is_string()); + assert_eq!(txs[0].as_str().unwrap(), format!("{:?}", tx_hashes[0])); +} + +#[tokio::test] +async fn test_get_block_by_number_full() { + let h = TestHarness::new(0).await; + let (tx_hashes, senders) = setup_cold_block(&h).await; + + let result = rpc_call(&h.app, "eth_getBlockByNumber", json!(["0x1", true])).await; + + assert_eq!(result["number"], json!("0x1")); + let txs = result["transactions"].as_array().unwrap(); + assert_eq!(txs.len(), 2); + // When full=true, transactions are objects + assert!(txs[0].is_object()); + assert_eq!(txs[0]["hash"], json!(format!("{:?}", tx_hashes[0]))); + assert_eq!(txs[0]["from"], json!(format!("{:?}", senders[0]))); + assert_eq!(txs[0]["blockNumber"], json!("0x1")); + assert_eq!(txs[0]["transactionIndex"], json!("0x0")); + assert_eq!(txs[1]["transactionIndex"], json!("0x1")); +} + +#[tokio::test] +async fn test_get_block_by_hash() { + let h = TestHarness::new(0).await; + setup_cold_block(&h).await; + + // Get the block to learn its hash + let block = rpc_call(&h.app, "eth_getBlockByNumber", json!(["0x1", false])).await; + let block_hash = block["hash"].as_str().unwrap().to_string(); + + let result = rpc_call(&h.app, "eth_getBlockByHash", json!([block_hash, false])).await; + assert_eq!(result["number"], json!("0x1")); + assert_eq!(result["hash"], json!(block_hash)); +} + +#[tokio::test] +async fn test_get_block_tx_count() { + let h = TestHarness::new(0).await; + setup_cold_block(&h).await; + + let result = rpc_call(&h.app, "eth_getBlockTransactionCountByNumber", json!(["0x1"])).await; + assert_eq!(result, json!("0x2")); +} + +#[tokio::test] +async fn test_get_block_header() { + let h = TestHarness::new(0).await; + setup_cold_block(&h).await; + + let result = rpc_call(&h.app, "eth_getBlockHeaderByNumber", json!(["0x1"])).await; + assert_eq!(result["number"], json!("0x1")); + assert!(result["baseFeePerGas"].is_string()); +} + +#[tokio::test] +async fn test_get_block_not_found() { + let h = TestHarness::new(255).await; + let result = rpc_call(&h.app, "eth_getBlockByNumber", json!(["0xff", false])).await; + assert!(result.is_null()); +} + +// --------------------------------------------------------------------------- +// Group 3: Cold storage — transaction queries +// --------------------------------------------------------------------------- + +#[tokio::test] +async fn test_get_transaction_by_hash() { + let h = TestHarness::new(0).await; + let (tx_hashes, senders) = setup_cold_block(&h).await; + + let result = + rpc_call(&h.app, "eth_getTransactionByHash", json!([format!("{:?}", tx_hashes[0])])).await; + + assert_eq!(result["hash"], json!(format!("{:?}", tx_hashes[0]))); + assert_eq!(result["from"], json!(format!("{:?}", senders[0]))); + assert_eq!(result["blockNumber"], json!("0x1")); + assert_eq!(result["transactionIndex"], json!("0x0")); +} + +#[tokio::test] +async fn test_get_raw_transaction_by_hash() { + let h = TestHarness::new(0).await; + let (tx_hashes, _) = setup_cold_block(&h).await; + + let result = + rpc_call(&h.app, "eth_getRawTransactionByHash", json!([format!("{:?}", tx_hashes[0])])) + .await; + + // Raw transaction is a hex string + let hex = result.as_str().unwrap(); + assert!(hex.starts_with("0x")); + assert!(hex.len() > 4); +} + +#[tokio::test] +async fn test_get_tx_by_block_and_index() { + let h = TestHarness::new(0).await; + let (tx_hashes, senders) = setup_cold_block(&h).await; + + let result = + rpc_call(&h.app, "eth_getTransactionByBlockNumberAndIndex", json!(["0x1", "0x0"])).await; + + assert_eq!(result["hash"], json!(format!("{:?}", tx_hashes[0]))); + assert_eq!(result["from"], json!(format!("{:?}", senders[0]))); +} + +#[tokio::test] +async fn test_get_transaction_receipt() { + let h = TestHarness::new(0).await; + let (tx_hashes, senders) = setup_cold_block(&h).await; + + let result = + rpc_call(&h.app, "eth_getTransactionReceipt", json!([format!("{:?}", tx_hashes[0])])).await; + + assert_eq!(result["transactionHash"], json!(format!("{:?}", tx_hashes[0]))); + assert_eq!(result["from"], json!(format!("{:?}", senders[0]))); + assert_eq!(result["blockNumber"], json!("0x1")); + assert_eq!(result["status"], json!("0x1")); + assert_eq!(result["gasUsed"], json!("0x5208")); // 21000 +} + +#[tokio::test] +async fn test_get_block_receipts() { + let h = TestHarness::new(0).await; + setup_cold_block(&h).await; + + let result = rpc_call(&h.app, "eth_getBlockReceipts", json!(["0x1"])).await; + + let receipts = result.as_array().unwrap(); + assert_eq!(receipts.len(), 2); + assert_eq!(receipts[0]["transactionIndex"], json!("0x0")); + assert_eq!(receipts[1]["transactionIndex"], json!("0x1")); + assert_eq!(receipts[0]["status"], json!("0x1")); + assert_eq!(receipts[1]["status"], json!("0x1")); +} + +// --------------------------------------------------------------------------- +// Group 4: Hot storage — account state +// --------------------------------------------------------------------------- + +/// Populate hot storage with a test account. +fn setup_hot_account(hot: &MemKv) { + let writer = hot.writer().unwrap(); + + let code = alloy::primitives::Bytes::from_static(&[0x60, 0x00, 0x60, 0x00, 0xf3]); + let bytecode = Bytecode::new_raw(code); + let code_hash = bytecode.hash_slow(); + + writer + .put_account( + &TEST_ADDR, + &Account { + nonce: 5, + balance: U256::from(1_000_000_000_000_000_000u128), + bytecode_hash: Some(code_hash), + }, + ) + .unwrap(); + + writer.put_storage(&TEST_ADDR, &U256::from(42), &U256::from(999)).unwrap(); + + writer.put_bytecode(&code_hash, &bytecode).unwrap(); + + writer.commit().unwrap(); +} + +#[tokio::test] +async fn test_get_balance() { + let h = TestHarness::new(1).await; + setup_hot_account(&h.hot); + + // Append a dummy block so tag resolution succeeds + let block = make_block(1, vec![], 0); + h.append_block(block).await; + + let result = + rpc_call(&h.app, "eth_getBalance", json!([format!("{:?}", TEST_ADDR), "latest"])).await; + + // 1 ETH = 10^18 + assert_eq!(result, json!("0xde0b6b3a7640000")); +} + +#[tokio::test] +async fn test_get_transaction_count() { + let h = TestHarness::new(1).await; + setup_hot_account(&h.hot); + + let block = make_block(1, vec![], 0); + h.append_block(block).await; + + let result = + rpc_call(&h.app, "eth_getTransactionCount", json!([format!("{:?}", TEST_ADDR), "latest"])) + .await; + + assert_eq!(result, json!("0x5")); +} + +#[tokio::test] +async fn test_get_storage_at() { + let h = TestHarness::new(1).await; + setup_hot_account(&h.hot); + + let block = make_block(1, vec![], 0); + h.append_block(block).await; + + let slot = format!("{:#066x}", 42u64); + let result = + rpc_call(&h.app, "eth_getStorageAt", json!([format!("{:?}", TEST_ADDR), slot, "latest"])) + .await; + + // 999 = 0x3e7, padded to 32 bytes + let expected = format!("{:#066x}", 999u64); + assert_eq!(result, json!(expected)); +} + +#[tokio::test] +async fn test_get_code() { + let h = TestHarness::new(1).await; + setup_hot_account(&h.hot); + + let block = make_block(1, vec![], 0); + h.append_block(block).await; + + let result = + rpc_call(&h.app, "eth_getCode", json!([format!("{:?}", TEST_ADDR), "latest"])).await; + + assert_eq!(result, json!("0x60006000f3")); +} + +#[tokio::test] +async fn test_get_balance_unknown_account() { + let h = TestHarness::new(1).await; + + let block = make_block(1, vec![], 0); + h.append_block(block).await; + + let unknown = Address::repeat_byte(0xff); + let result = + rpc_call(&h.app, "eth_getBalance", json!([format!("{:?}", unknown), "latest"])).await; + + assert_eq!(result, json!("0x0")); +} + +// --------------------------------------------------------------------------- +// Group 5: Logs +// --------------------------------------------------------------------------- + +#[tokio::test] +async fn test_get_logs_by_block_hash() { + let h = TestHarness::new(0).await; + + // Create block with transactions that have logs + let (tx0, _) = make_signed_tx(0); + let block = make_block(1, vec![tx0], 2); // 2 logs per receipt + h.append_block(block).await; + h.chain.tags().set_latest(1); + + // Get the block hash + let block_result = rpc_call(&h.app, "eth_getBlockByNumber", json!(["0x1", false])).await; + let block_hash = block_result["hash"].as_str().unwrap().to_string(); + + let result = rpc_call( + &h.app, + "eth_getLogs", + json!([{ + "blockHash": block_hash, + "address": format!("{:?}", LOG_ADDR), + }]), + ) + .await; + + let logs = result.as_array().unwrap(); + assert_eq!(logs.len(), 2); + assert_eq!(logs[0]["address"], json!(format!("{:?}", LOG_ADDR))); + assert_eq!(logs[0]["blockNumber"], json!("0x1")); + assert_eq!(logs[0]["logIndex"], json!("0x0")); + assert_eq!(logs[1]["logIndex"], json!("0x1")); +} + +#[tokio::test] +async fn test_get_logs_by_range() { + let h = TestHarness::new(0).await; + + let (tx0, _) = make_signed_tx(0); + let block = make_block(1, vec![tx0], 1); + h.append_block(block).await; + h.chain.tags().set_latest(1); + + let result = rpc_call( + &h.app, + "eth_getLogs", + json!([{ + "fromBlock": "0x1", + "toBlock": "0x1", + "topics": [format!("{:?}", LOG_TOPIC)], + }]), + ) + .await; + + let logs = result.as_array().unwrap(); + assert_eq!(logs.len(), 1); + assert!(logs[0]["topics"].as_array().unwrap().contains(&json!(format!("{:?}", LOG_TOPIC)))); +} + +#[tokio::test] +async fn test_get_logs_empty() { + let h = TestHarness::new(0).await; + + let (tx0, _) = make_signed_tx(0); + let block = make_block(1, vec![tx0], 0); // no logs + h.append_block(block).await; + h.chain.tags().set_latest(1); + + let result = rpc_call( + &h.app, + "eth_getLogs", + json!([{ + "fromBlock": "0x1", + "toBlock": "0x1", + "address": format!("{:?}", LOG_ADDR), + }]), + ) + .await; + + assert_eq!(result.as_array().unwrap().len(), 0); +} + +// --------------------------------------------------------------------------- +// Group 6: Edge cases & errors +// --------------------------------------------------------------------------- + +#[tokio::test] +async fn test_not_supported() { + let h = TestHarness::new(0).await; + let resp = rpc_call_raw(&h.app, "eth_protocolVersion", json!([])).await; + assert!(resp.get("error").is_some()); + let msg = resp["error"]["message"].as_str().unwrap(); + assert!(msg.contains("not found"), "unexpected error: {msg}"); +} + +#[tokio::test] +async fn test_syncing_not_syncing() { + let h = TestHarness::new(0).await; + let result = rpc_call(&h.app, "eth_syncing", json!([])).await; + assert_eq!(result, json!(false)); +} + +#[tokio::test] +async fn test_syncing_in_progress() { + let h = TestHarness::new(0).await; + h.chain.tags().set_sync_status(signet_rpc::SyncStatus { + starting_block: 0, + current_block: 50, + highest_block: 100, + }); + let result = rpc_call(&h.app, "eth_syncing", json!([])).await; + assert_eq!(result["starting_block"], json!("0x0")); + assert_eq!(result["current_block"], json!("0x32")); + assert_eq!(result["highest_block"], json!("0x64")); +} + +#[tokio::test] +async fn test_send_raw_tx_no_cache() { + let h = TestHarness::new(0).await; + let resp = rpc_call_raw(&h.app, "eth_sendRawTransaction", json!(["0x00"])).await; + assert!(resp.get("error").is_some()); +} + +// --------------------------------------------------------------------------- +// Group 7: Gas & Fee Queries +// --------------------------------------------------------------------------- + +#[tokio::test] +async fn test_gas_price() { + let h = TestHarness::new(0).await; + + // Create a block with txs that have gas_price (2 gwei) > base_fee (1 gwei) + let (tx0, _) = make_signed_tx_with_gas_price(0, 2_000_000_000); + let block = make_block(1, vec![tx0], 0); + h.append_block(block).await; + h.chain.tags().set_latest(1); + + let result = rpc_call(&h.app, "eth_gasPrice", json!([])).await; + + // tip = gas_price - base_fee = 2e9 - 1e9 = 1e9 + // gasPrice = tip + base_fee = 1e9 + 1e9 = 2e9 = 0x77359400 + assert_eq!(result, json!("0x77359400")); +} + +#[tokio::test] +async fn test_max_priority_fee_per_gas() { + let h = TestHarness::new(0).await; + + let (tx0, _) = make_signed_tx_with_gas_price(0, 2_000_000_000); + let block = make_block(1, vec![tx0], 0); + h.append_block(block).await; + h.chain.tags().set_latest(1); + + let result = rpc_call(&h.app, "eth_maxPriorityFeePerGas", json!([])).await; + + // tip only = gas_price - base_fee = 1e9 = 0x3b9aca00 + assert_eq!(result, json!("0x3b9aca00")); +} + +#[tokio::test] +async fn test_gas_price_empty_blocks() { + let h = TestHarness::new(0).await; + + let block = make_block(1, vec![], 0); + h.append_block(block).await; + h.chain.tags().set_latest(1); + + let result = rpc_call(&h.app, "eth_gasPrice", json!([])).await; + + // No txs → tip defaults to 1 Gwei, gasPrice = base_fee + tip = 2 Gwei + assert_eq!(result, json!("0x77359400")); +} + +#[tokio::test] +async fn test_fee_history_basic() { + let h = TestHarness::new(0).await; + + for i in 1u64..=3 { + let (tx, _) = make_signed_tx_with_gas_price(i - 1, 2_000_000_000); + let block = make_block(i, vec![tx], 0); + h.append_block(block).await; + } + h.chain.tags().set_latest(3); + + // Request 2 blocks of fee history ending at block 3 + let result = rpc_call(&h.app, "eth_feeHistory", json!(["0x2", "0x3", null])).await; + + // oldest_block = end_block + 1 - block_count = 3 + 1 - 2 = 2 + assert_eq!(result["oldestBlock"], json!("0x2")); + // base_fee_per_gas has block_count + 1 entries (includes next-block prediction) + let base_fees = result["baseFeePerGas"].as_array().unwrap(); + assert_eq!(base_fees.len(), 3); + // gas_used_ratio has block_count entries + let gas_ratios = result["gasUsedRatio"].as_array().unwrap(); + assert_eq!(gas_ratios.len(), 2); + // No reward field when no percentiles requested + assert!(result["reward"].is_null()); +} + +#[tokio::test] +async fn test_fee_history_with_rewards() { + let h = TestHarness::new(0).await; + + let (tx0, _) = make_signed_tx_with_gas_price(0, 2_000_000_000); + let block = make_block(1, vec![tx0], 0); + h.append_block(block).await; + h.chain.tags().set_latest(1); + + let result = rpc_call(&h.app, "eth_feeHistory", json!(["0x1", "0x1", [25.0, 75.0]])).await; + + assert_eq!(result["oldestBlock"], json!("0x1")); + let rewards = result["reward"].as_array().unwrap(); + assert_eq!(rewards.len(), 1); + let block_rewards = rewards[0].as_array().unwrap(); + assert_eq!(block_rewards.len(), 2); +} + +// --------------------------------------------------------------------------- +// Group 8: Filters +// --------------------------------------------------------------------------- + +#[tokio::test] +async fn test_new_block_filter_and_changes() { + let h = TestHarness::new(0).await; + + // Install a block filter at block 0 + let filter_id = rpc_call(&h.app, "eth_newBlockFilter", json!([])).await; + let filter_id_str = filter_id.as_str().unwrap().to_string(); + + // Append a block + let (tx0, _) = make_signed_tx(0); + let block = make_block(1, vec![tx0], 0); + h.append_block(block).await; + h.chain.tags().set_latest(1); + + // Poll for changes — should get block hash for block 1 + let changes = rpc_call(&h.app, "eth_getFilterChanges", json!([filter_id_str])).await; + let hashes = changes.as_array().unwrap(); + assert_eq!(hashes.len(), 1); + assert!(hashes[0].is_string()); + + // Poll again with no new blocks — should be empty + let changes = rpc_call(&h.app, "eth_getFilterChanges", json!([filter_id_str])).await; + let hashes = changes.as_array().unwrap(); + assert!(hashes.is_empty()); +} + +#[tokio::test] +async fn test_new_log_filter_and_changes() { + let h = TestHarness::new(0).await; + + // Install a log filter for LOG_ADDR with LOG_TOPIC + let filter_id = rpc_call( + &h.app, + "eth_newFilter", + json!([{ + "address": format!("{:?}", LOG_ADDR), + "topics": [format!("{:?}", LOG_TOPIC)], + }]), + ) + .await; + let filter_id_str = filter_id.as_str().unwrap().to_string(); + + // Append a block with matching logs + let (tx0, _) = make_signed_tx(0); + let block = make_block(1, vec![tx0], 2); + h.append_block(block).await; + h.chain.tags().set_latest(1); + + // Poll for changes — should get matching logs + let changes = rpc_call(&h.app, "eth_getFilterChanges", json!([filter_id_str])).await; + let logs = changes.as_array().unwrap(); + assert_eq!(logs.len(), 2); + assert_eq!(logs[0]["address"], json!(format!("{:?}", LOG_ADDR))); +} + +#[tokio::test] +async fn test_uninstall_filter() { + let h = TestHarness::new(0).await; + + let filter_id = rpc_call(&h.app, "eth_newBlockFilter", json!([])).await; + let filter_id_str = filter_id.as_str().unwrap().to_string(); + + // Uninstall + let result = rpc_call(&h.app, "eth_uninstallFilter", json!([filter_id_str])).await; + assert_eq!(result, json!(true)); + + // Uninstall again — should return false + let result = rpc_call(&h.app, "eth_uninstallFilter", json!([filter_id_str])).await; + assert_eq!(result, json!(false)); +} + +// --------------------------------------------------------------------------- +// Group 9: Debug namespace +// --------------------------------------------------------------------------- + +/// Set up hot storage with a genesis header and fund an address. +/// +/// The genesis header at block 0 is required so `revm_reader_at_height` +/// can validate height bounds. Without it, MemKv returns `NoBlocks`. +fn setup_hot_for_evm(hot: &MemKv, addr: Address, balance: U256) { + let writer = hot.writer().unwrap(); + + // Write a genesis header so the hot storage tracks block 0. + let genesis = SealedHeader::new(Header::default()); + writer.put_header(&genesis).unwrap(); + + writer.put_account(&addr, &Account { nonce: 0, balance, bytecode_hash: None }).unwrap(); + writer.commit().unwrap(); +} + +#[tokio::test] +async fn test_trace_block_by_number_noop() { + let h = TestHarness::new(0).await; + + let (tx0, sender) = make_signed_tx(0); + setup_hot_for_evm(&h.hot, sender, U256::from(1_000_000_000_000_000_000u128)); + + let block = make_block(1, vec![tx0], 0); + h.append_block(block).await; + h.chain.tags().set_latest(1); + + let result = + rpc_call(&h.app, "debug_traceBlockByNumber", json!(["0x1", {"tracer": "noopTracer"}])) + .await; + + let traces = result.as_array().unwrap(); + assert_eq!(traces.len(), 1); +} + +#[tokio::test] +async fn test_trace_transaction_noop() { + let h = TestHarness::new(0).await; + + let (tx0, sender) = make_signed_tx(0); + let tx_hash = *tx0.tx_hash(); + setup_hot_for_evm(&h.hot, sender, U256::from(1_000_000_000_000_000_000u128)); + + let block = make_block(1, vec![tx0], 0); + h.append_block(block).await; + h.chain.tags().set_latest(1); + + let result = rpc_call( + &h.app, + "debug_traceTransaction", + json!([format!("{:?}", tx_hash), {"tracer": "noopTracer"}]), + ) + .await; + + // NoopFrame result is not null + assert!(!result.is_null()); +}