From 59f511b8faad23315c2b90640dc828c1e10e742d Mon Sep 17 00:00:00 2001 From: frisitano Date: Wed, 14 Jan 2026 17:53:49 +0000 Subject: [PATCH 1/9] debug toolkit --- Cargo.lock | 35 +- Cargo.toml | 1 + book/src/SUMMARY.md | 1 + book/src/debug-toolkit.md | 605 ++++++++++++ crates/node/Cargo.toml | 25 + crates/node/src/args.rs | 2 +- crates/node/src/bin/scroll_debug.rs | 44 + crates/node/src/debug_toolkit/actions.rs | 370 ++++++++ crates/node/src/debug_toolkit/cli.rs | 114 +++ crates/node/src/debug_toolkit/commands.rs | 404 ++++++++ crates/node/src/debug_toolkit/event_stream.rs | 298 ++++++ crates/node/src/debug_toolkit/mod.rs | 74 ++ crates/node/src/debug_toolkit/repl.rs | 859 ++++++++++++++++++ crates/node/src/lib.rs | 3 + crates/node/src/test_utils/fixture.rs | 57 +- crates/node/src/test_utils/mod.rs | 21 +- crates/node/tests/e2e.rs | 32 +- crates/node/tests/sync.rs | 9 +- hackathon.json | 168 ++++ 19 files changed, 3096 insertions(+), 26 deletions(-) create mode 100644 book/src/debug-toolkit.md create mode 100644 crates/node/src/bin/scroll_debug.rs create mode 100644 crates/node/src/debug_toolkit/actions.rs create mode 100644 crates/node/src/debug_toolkit/cli.rs create mode 100644 crates/node/src/debug_toolkit/commands.rs create mode 100644 crates/node/src/debug_toolkit/event_stream.rs create mode 100644 crates/node/src/debug_toolkit/mod.rs create mode 100644 crates/node/src/debug_toolkit/repl.rs create mode 100644 hackathon.json diff --git a/Cargo.lock b/Cargo.lock index c6b29bad..2ec339b9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2677,7 +2677,7 @@ checksum = "c3b847e05a34be5c38f3f2a5052178a3bd32e6b5702f3ea775efde95c483a539" dependencies = [ "anyhow", "cc", - "colored", + "colored 2.2.0", "getrandom 0.2.16", "glob", "libc", @@ -2696,7 +2696,7 @@ dependencies = [ "clap", "codspeed", "codspeed-criterion-compat-walltime", - "colored", + "colored 2.2.0", "futures", "regex", "tokio", @@ -2825,6 +2825,15 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "colored" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fde0e0ec90c9dfb3b4b1a0891a7dcd0e2bffde2f7efed5fe7c9bb00e5bfb915e" +dependencies = [ + "windows-sys 0.59.0", +] + [[package]] name = "combine" version = "4.6.7" @@ -11092,7 +11101,10 @@ name = "rollup-node" version = "1.0.5" dependencies = [ "alloy-chains", + "alloy-consensus", "alloy-eips", + "alloy-genesis", + "alloy-network", "alloy-primitives", "alloy-provider", "alloy-rpc-client", @@ -11108,15 +11120,19 @@ dependencies = [ "aws-sdk-kms", "clap", "color-eyre", + "colored 3.0.0", "console-subscriber", + "crossterm 0.28.1", "eyre", "futures", + "glob", "http-body-util", "hyper 1.8.1", "hyper-util", "jsonrpsee", "pprof", "rayon", + "regex-lite", "reqwest", "reth-chainspec", "reth-cli-util", @@ -11127,6 +11143,7 @@ dependencies = [ "reth-network", "reth-network-api", "reth-network-p2p", + "reth-network-peers", "reth-node-api", "reth-node-builder", "reth-node-core", @@ -11176,6 +11193,7 @@ dependencies = [ "serde_json", "tokio", "tracing", + "tracing-subscriber 0.3.20", ] [[package]] @@ -13688,6 +13706,17 @@ dependencies = [ "tracing-subscriber 0.3.20", ] +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + [[package]] name = "tracing-logfmt" version = "0.3.5" @@ -13732,9 +13761,11 @@ dependencies = [ "serde", "serde_json", "sharded-slab", + "smallvec", "thread_local", "tracing", "tracing-core", + "tracing-log", "tracing-serde", ] diff --git a/Cargo.toml b/Cargo.toml index 492af659..f46d87c9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -127,6 +127,7 @@ strip = "none" alloy-chains = { version = "0.2.5", default-features = false } alloy-consensus = { version = "1.0.37", default-features = false } alloy-eips = { version = "1.0.37", default-features = false } +alloy-genesis = { version = "1.0.37", default-features = false } alloy-json-rpc = { version = "1.0.37", default-features = false } alloy-network = { version = "1.0.37", default-features = false } alloy-primitives = { version = "1.4.1", default-features = false } diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md index e63b526d..cc712023 100644 --- a/book/src/SUMMARY.md +++ b/book/src/SUMMARY.md @@ -4,3 +4,4 @@ - [Running a Node](./running-a-node.md) - [Running a Sequencer](./running-a-sequencer.md) - [Running with Docker Compose](./docker-operations.md) +- [Debug Toolkit](./debug-toolkit.md) diff --git a/book/src/debug-toolkit.md b/book/src/debug-toolkit.md new file mode 100644 index 00000000..50f17f61 --- /dev/null +++ b/book/src/debug-toolkit.md @@ -0,0 +1,605 @@ +# Debug Toolkit + +The Debug Toolkit is an interactive REPL (Read-Eval-Print Loop) for debugging, development, and hackathon scenarios. It provides a command-line interface to interact with rollup nodes, inspect chain state, inject transactions, and run custom actions. + +## Getting Started + +### Building + +The debug toolkit is built with the `debug-toolkit` feature flag: + +```bash +cargo build -p rollup-node --features debug-toolkit --release +``` + +### Running + +Launch the debug REPL using the `scroll-debug` binary: + +```bash +# Basic usage with dev chain and sequencer mode +cargo run --features debug-toolkit --bin scroll-debug -- --chain dev --sequencer + +# With multiple follower nodes +cargo run --features debug-toolkit --bin scroll-debug -- --chain dev --sequencer --followers 2 + +# With custom block time (auto-build every 1000ms) +cargo run --features debug-toolkit --bin scroll-debug -- --chain dev --sequencer --block-time 1000 + +# With a real L1 endpoint +cargo run --features debug-toolkit --bin scroll-debug -- --chain dev --sequencer --l1-url https://eth.llamarpc.com +``` + +### CLI Options + +| Option | Description | +|--------|-------------| +| `--chain ` | Chain to use: `dev`, `scroll-sepolia`, `scroll-mainnet`, or path to genesis JSON file (default: `dev`) | +| `--sequencer` | Enable sequencer mode | +| `--followers ` | Number of follower nodes to create (default: 0) | +| `--block-time ` | Block time in milliseconds (default: 0 = manual block building only) | +| `--allow-empty-blocks` | Allow building empty blocks (default: true) | +| `--l1-message-delay ` | L1 message inclusion delay in blocks (default: 0 = immediate) | +| `--l1-url ` | L1 RPC endpoint URL (optional, uses mock L1 if not specified) | + +Run `cargo run --features debug-toolkit --bin scroll-debug -- --help` to see all available options. + +## Quick Start: Multi-Node Environment with Mock L1 + +This walkthrough demonstrates how to spin up a complete local environment with a mock L1, one sequencer, and two follower nodes. + +### Starting the Environment + +```bash +cargo run --features debug-toolkit --bin scroll-debug -- \ + --chain dev \ + --sequencer \ + --followers 2 +``` + +This creates: +- **Node 0**: Sequencer (produces blocks) +- **Node 1**: Follower (receives blocks via P2P) +- **Node 2**: Follower (receives blocks via P2P) + +### Understanding Mock L1 + +When no `--l1-url` is specified, the toolkit uses a **mock L1**. The mock L1 starts in an "unsynced" state, which means the sequencer won't produce blocks until you explicitly sync it. + +If you try to build a block before syncing L1: + +``` +scroll-debug [seq:0]> build +Error: L1 is not synced +Hint: Run 'l1 sync' to mark the mock L1 as synced before building blocks +``` + +### Step-by-Step Walkthrough + +**1. Check initial status:** + +``` +scroll-debug [seq:0]> status +=== Node 0 (Sequencer) === +Node: + Database: /tmp/.tmpXYZ/db/scroll.db + HTTP RPC: http://127.0.0.1:62491 +L2: + Head: #0 (0x1234abcd...) + Safe: #0 (0x1234abcd...) + Finalized: #0 (0x1234abcd...) + Synced: false +L1: + Head: #0 + Finalized: #0 + Processed: #0 + Synced: false +``` + +Note that L1 `Synced` is `false`. + +**2. Sync the mock L1:** + +``` +scroll-debug [seq:0]> l1 sync +L1 synced event sent to all nodes +``` + +**3. Build your first block:** + +``` +scroll-debug [seq:0]> build +Block build triggered! + [EVENT] BlockSequenced { block: 1, hash: 0xabcd1234... } +``` + +**4. Send a transaction and build another block:** + +``` +scroll-debug [seq:0]> tx send 0x742d35Cc6634C0532925a3b844Bc9e7595f0bEb2 1000000000000000000 +Transaction sent! + Hash: 0x5678... + From: 0x1234... + To: 0x742d... + Value: 1000000000000000000 wei +Note: Run 'build' to include in a block + +scroll-debug [seq:0]> build +Block build triggered! + [EVENT] BlockSequenced { block: 2, hash: 0xefgh5678... } +``` + +**5. Check that followers received the blocks:** + +``` +scroll-debug [seq:0]> node 1 +Switched to node 1 (Follower) + +scroll-debug [fol:1]> status +=== Node 1 (Follower) === +Node: + Database: /tmp/.tmpABC/db/scroll.db + HTTP RPC: http://127.0.0.1:62502 +L2: + Head: #2 (0xefgh5678...) + Safe: #0 (0x1234abcd...) + Finalized: #0 (0x1234abcd...) + Synced: false +... +``` + +The follower's head is at block #2, showing it received the blocks via P2P. + +**6. Build multiple blocks quickly:** + +``` +scroll-debug [seq:0]> run build-blocks 5 +Building 5 blocks (timeout: 5000ms per block)... + Block 1 triggered, waiting... sequenced at #3 + Block 2 triggered, waiting... sequenced at #4 + Block 3 triggered, waiting... sequenced at #5 + Block 4 triggered, waiting... sequenced at #6 + Block 5 triggered, waiting... sequenced at #7 +Done! Head is now at block #7 +``` + +**7. View all nodes:** + +``` +scroll-debug [seq:0]> nodes +Nodes: + [0] Sequencer * + [1] Follower + [2] Follower +``` + +## Commands + +### Status & Inspection + +| Command | Description | +|---------|-------------| +| `status` | Show node status (L2 head/safe/finalized, L1 state, sync status) | +| `block [n\|latest]` | Display block details | +| `blocks ` | List blocks in range | +| `fcs` | Show forkchoice state | + +**Example:** + +``` +scroll-debug [seq:0]> status +=== Node 0 (Sequencer) === +L2: + Head: #42 (0x1234abcd...) + Safe: #40 (0x5678efgh...) + Finalized: #35 (0x9abc1234...) + Synced: true +L1: + Head: #18923456 + Finalized: #18923400 + Processed: #18923450 + Synced: true +``` + +### L1 Commands + +These commands allow you to simulate L1 events: + +| Command | Description | +|---------|-------------| +| `l1 status` | Show L1 sync state | +| `l1 sync` | Inject L1 synced event | +| `l1 block ` | Inject new L1 block notification | +| `l1 message ` | Inject an L1 message | +| `l1 commit ` | Inject batch commit | +| `l1 finalize ` | Inject batch finalization | +| `l1 reorg ` | Inject L1 reorg | + +### Block & Transaction + +| Command | Description | +|---------|-------------| +| `build` | Build a new block (sequencer mode) | +| `tx send [idx]` | Send ETH transfer (value in wei, idx = wallet index) | +| `tx pending` | List pending transactions | +| `tx inject ` | Inject raw transaction | + +**Example:** + +``` +scroll-debug [seq:0]> tx send 0x742d35Cc6634C0532925a3b844Bc9e7595f0bEb2 1000000000000000000 +Transaction sent! + Hash: 0xabcd... + From: 0x1234... + To: 0x742d... + Value: 1000000000000000000 wei +Note: Run 'build' to include in a block (sequencer mode) + +scroll-debug [seq:0]> build +Block build triggered! +``` + +**Viewing pending transactions:** + +``` +scroll-debug [seq:0]> tx pending +Pending Transactions (2): + [0] hash=0x1234abcd5678... from=0x742d35Cc... nonce=5 gas_price=1000000000 + [1] hash=0xabcdef123456... from=0x742d35Cc... nonce=6 gas_price=1000000000 +``` + +### Wallet + +| Command | Description | +|---------|-------------| +| `wallet` | Show wallet address, balance, and nonce | +| `wallet gen` | Generate and list all available wallets | + +The toolkit includes pre-funded test wallets. Use `wallet gen` to see all available wallets, then reference them by index in `tx send`: + +``` +scroll-debug [seq:0]> wallet gen +Generated Wallets (10): + Chain ID: 222222 + + [0] 0x1234567890abcdef... + Balance: 1000000000000000000000 wei (1000.000000 ETH) + + [1] 0xabcdef1234567890... + Balance: 1000000000000000000000 wei (1000.000000 ETH) + ... + +scroll-debug [seq:0]> tx send 0x742d... 1000000 2 +# Sends from wallet index 2 +``` + +### Network + +| Command | Description | +|---------|-------------| +| `peers` | List connected peers and show local enode | +| `peers connect ` | Connect to a peer (enode://...) | + +### Events + +The REPL streams chain events in real-time as they occur: + +| Command | Description | +|---------|-------------| +| `events on` | Enable background event stream | +| `events off` | Disable background event stream | +| `events [count]` | Stream next N events (default: 10) | +| `events filter ` | Filter events by type (e.g., `Block*`, `L1*`) | +| `events history [n]` | Show last N events (default: 20) | + +**Example with events enabled:** + +``` +scroll-debug [seq:0]> build + [EVENT] BlockSequenced { block: 1, hash: 0xabcd... } + [EVENT] ChainExtended { block: 1 } +Block build triggered! +``` + +### Custom Actions + +Run pre-built or custom actions with full access to the test fixture: + +| Command | Description | +|---------|-------------| +| `run list` | List available custom actions | +| `run [args]` | Execute a custom action | + +**Built-in Actions:** + +| Action | Description | +|--------|-------------| +| `build-blocks [delay_ms]` | Build multiple blocks in sequence | +| `stress-test [build_every]` | Send multiple transactions and build blocks | +| `sync-all` | Send L1 sync event to all nodes | + +**Example:** + +``` +scroll-debug [seq:0]> run build-blocks 10 100 +Running action: build-blocks + +Building 10 blocks with 100ms delay... + Block 1 triggered + Block 2 triggered + ... +Done! Head is now at block #10 +``` + +### Node Management + +When running with multiple nodes (e.g., `--followers 2`): + +| Command | Description | +|---------|-------------| +| `node ` | Switch active node context | +| `nodes` | List all nodes in fixture | + +``` +scroll-debug [seq:0]> nodes +Nodes: + [0] Sequencer * + [1] Follower + [2] Follower + +scroll-debug [seq:0]> node 1 +Switched to node 1 (Follower) + +scroll-debug [fol:1]> status +... +``` + +### Database + +| Command | Description | +|---------|-------------| +| `db` | Show database path and access command | + +The `db` command shows the SQLite database path and provides a command to access it from another terminal: + +``` +scroll-debug [seq:0]> db +Database Info: + Path: /path/to/datadir/db/scroll.db + +Access from another terminal: + sqlite3 /path/to/datadir/db/scroll.db + +Useful queries: + .tables -- List all tables + .schema -- Show table schema + SELECT * FROM metadata; -- View metadata + SELECT * FROM l2_block ORDER BY number DESC LIMIT 10; +``` + +The database path is also shown in the `status` command output. + +### Other + +| Command | Description | +|---------|-------------| +| `help` | Show available commands | +| `exit` | Exit the REPL | + +## Creating Custom Actions + +You can create custom actions by implementing the `Action` trait. Actions have full access to the `TestFixture`, allowing you to: + +- Access all nodes and their RPC interfaces +- Send transactions from test wallets +- Trigger block building +- Inject L1 events +- Query chain state + +### Implementing an Action + +```rust +use rollup_node::debug_toolkit::actions::{Action, ActionRegistry}; +use rollup_node::test_utils::TestFixture; +use async_trait::async_trait; + +struct MyCustomAction; + +#[async_trait] +impl Action for MyCustomAction { + fn name(&self) -> &'static str { + "my-action" + } + + fn description(&self) -> &'static str { + "Does something cool with the fixture" + } + + fn usage(&self) -> Option<&'static str> { + Some("run my-action [arg1] [arg2]") + } + + async fn execute( + &self, + fixture: &mut TestFixture, + args: &[String], + ) -> eyre::Result<()> { + // Access nodes + println!("Fixture has {} nodes", fixture.nodes.len()); + + // Access wallet + let wallet = fixture.wallet.lock().await; + println!("Wallet address: {:?}", wallet.inner.address()); + drop(wallet); + + // Access specific node + let node = &fixture.nodes[0]; + let status = node.rollup_manager_handle.status().await?; + println!("Head block: {}", status.l2.fcs.head_block_info().number); + + // Trigger block building (sequencer only) + if node.is_sequencer() { + node.rollup_manager_handle.build_block(); + } + + // Inject L1 events + fixture.l1().sync().await?; + + Ok(()) + } +} +``` + +### Registering Actions + +Add your action to the registry in `crates/node/src/debug_toolkit/actions.rs`: + +```rust +impl ActionRegistry { + pub fn new() -> Self { + let mut registry = Self { actions: Vec::new() }; + + // Built-in actions + registry.register(Box::new(BuildBlocksAction)); + registry.register(Box::new(StressTestAction)); + registry.register(Box::new(SyncAllAction)); + + // Add your custom action here: + registry.register(Box::new(MyCustomAction)); + + registry + } +} +``` + +Or register programmatically before running the REPL: + +```rust +let fixture = TestFixture::builder() + .with_chain("dev") + .sequencer() + .build() + .await?; + +let mut repl = DebugRepl::new(fixture); +repl.action_registry_mut().register(Box::new(MyCustomAction)); +repl.run().await?; +``` + +## Use Cases + +### Hackathon Development + +The debug toolkit is ideal for hackathons where you need to: + +- Quickly spin up a local Scroll environment +- Test smart contract interactions +- Debug transaction flows +- Simulate L1-L2 message passing + +### Integration Testing + +Use custom actions to create reproducible test scenarios: + +```rust +struct IntegrationTestAction; + +#[async_trait] +impl Action for IntegrationTestAction { + fn name(&self) -> &'static str { "integration-test" } + fn description(&self) -> &'static str { "Run integration test suite" } + + async fn execute(&self, fixture: &mut TestFixture, _args: &[String]) -> eyre::Result<()> { + // 1. Sync L1 + fixture.l1().sync().await?; + + // 2. Send some transactions + // ... + + // 3. Build blocks + let sequencer = fixture.nodes.iter().find(|n| n.is_sequencer()).unwrap(); + sequencer.rollup_manager_handle.build_block(); + + // 4. Verify state + // ... + + Ok(()) + } +} +``` + +### Debugging + +Inspect chain state interactively: + +``` +scroll-debug [seq:0]> block 42 +Block #42 + Hash: 0xabcd... + Parent: 0x1234... + Timestamp: 1705123456 + Gas Used: 21000 + Gas Limit: 20000000 + Txs: 3 + [0] hash=0x1111... + [1] hash=0x2222... + [2] hash=0x3333... + +scroll-debug [seq:0]> fcs +Forkchoice State: + Head: + Number: 42 + Hash: 0xabcd... + Safe: + Number: 40 + Hash: 0x5678... + Finalized: + Number: 35 + Hash: 0x9abc... +``` + +## External Tools + +### Using Cast + +The `status` command shows the HTTP RPC endpoint for each node. You can use [Foundry's `cast`](https://book.getfoundry.sh/cast/) to interact with the node from another terminal: + +``` +scroll-debug [seq:0]> status +=== Node 0 (Sequencer) === +Node: + Database: /tmp/.tmpXYZ/db/scroll.db + HTTP RPC: http://127.0.0.1:62491 +... +``` + +Then in another terminal, use `cast` with the HTTP RPC URL: + +```bash +# Get the current block number +cast block-number --rpc-url http://127.0.0.1:62491 + +# Get block details +cast block latest --rpc-url http://127.0.0.1:62491 + +# Get an account balance +cast balance 0x742d35Cc6634C0532925a3b844Bc9e7595f0bEb2 --rpc-url http://127.0.0.1:62491 + +# Send a transaction (using a test private key) +cast send 0x742d35Cc6634C0532925a3b844Bc9e7595f0bEb2 \ + --value 1ether \ + --private-key 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 \ + --rpc-url http://127.0.0.1:62491 + +# Call a contract +cast call 0xContractAddress "balanceOf(address)" 0xUserAddress --rpc-url http://127.0.0.1:62491 + +# Get transaction receipt +cast receipt 0xTransactionHash --rpc-url http://127.0.0.1:62491 +``` + +This is useful for: +- Testing smart contract deployments and interactions +- Debugging transaction behavior +- Scripting complex test scenarios +- Using familiar Ethereum tooling with your local rollup node diff --git a/crates/node/Cargo.toml b/crates/node/Cargo.toml index b90e609b..7b771921 100644 --- a/crates/node/Cargo.toml +++ b/crates/node/Cargo.toml @@ -10,6 +10,11 @@ exclude.workspace = true name = "rollup-node" path = "src/main.rs" +[[bin]] +name = "scroll-debug" +path = "src/bin/scroll_debug.rs" +required-features = ["debug-toolkit"] + [lints] workspace = true @@ -19,6 +24,9 @@ async-trait.workspace = true # alloy alloy-chains.workspace = true +alloy-consensus.workspace = true +alloy-genesis.workspace = true +alloy-network.workspace = true alloy-primitives.workspace = true alloy-provider.workspace = true alloy-rpc-client.workspace = true @@ -54,6 +62,7 @@ reth-node-types.workspace = true reth-network.workspace = true reth-network-api.workspace = true reth-network-p2p.workspace = true +reth-network-peers.workspace = true reth-revm.workspace = true reth-rpc-api.workspace = true reth-rpc-eth-api.workspace = true @@ -89,6 +98,13 @@ reth-tokio-util = { workspace = true, optional = true } scroll-alloy-rpc-types-engine = { workspace = true, optional = true } scroll-alloy-rpc-types.workspace = true +# debug-toolkit dependencies +colored = { version = "3.0", optional = true } +crossterm = { version = "0.28", optional = true } +glob = { version = "0.3", optional = true } +regex-lite = { version = "0.1", optional = true } +tracing-subscriber = { version = "0.3", optional = true } + scroll-db.workspace = true scroll-engine.workspace = true scroll-migration.workspace = true @@ -99,6 +115,7 @@ auto_impl.workspace = true clap = { workspace = true, features = ["derive", "env"] } eyre.workspace = true futures.workspace = true +serde_json.workspace = true jsonrpsee = { version = "0.26.0", features = ["server", "client", "macros"] } rayon.workspace = true reqwest.workspace = true @@ -135,6 +152,14 @@ alloy-rpc-types-eth = { workspace = true } [features] js-tracer = ["reth-scroll-node/js-tracer", "reth-scroll-rpc/js-tracer"] +debug-toolkit = [ + "test-utils", + "dep:colored", + "dep:crossterm", + "dep:glob", + "dep:regex-lite", + "dep:tracing-subscriber", +] test-utils = [ "reth-engine-local", "reth-trie-db/test-utils", diff --git a/crates/node/src/args.rs b/crates/node/src/args.rs index a6f006bc..0bfb5cd9 100644 --- a/crates/node/src/args.rs +++ b/crates/node/src/args.rs @@ -5,6 +5,7 @@ use crate::{ pprof::PprofConfig, }; use alloy_chains::NamedChain; +use alloy_consensus::BlockHeader; use alloy_primitives::{hex, Address, U128}; use alloy_provider::{layers::CacheLayer, Provider, ProviderBuilder}; use alloy_rpc_client::RpcClient; @@ -19,7 +20,6 @@ use reth_network::NetworkProtocols; use reth_network_api::FullNetwork; use reth_network_p2p::FullBlockClient; use reth_node_builder::{rpc::RethRpcServerHandles, NodeConfig as RethNodeConfig}; -use reth_node_core::primitives::BlockHeader; use reth_scroll_chainspec::{ ChainConfig, ScrollChainConfig, ScrollChainSpec, SCROLL_FEE_VAULT_ADDRESS, }; diff --git a/crates/node/src/bin/scroll_debug.rs b/crates/node/src/bin/scroll_debug.rs new file mode 100644 index 00000000..085afd7a --- /dev/null +++ b/crates/node/src/bin/scroll_debug.rs @@ -0,0 +1,44 @@ +//! Scroll Debug Toolkit - Interactive REPL for debugging rollup nodes. +//! +//! Usage: +//! ```bash +//! # Start REPL with dev chain and sequencer mode +//! cargo run --features debug-toolkit --bin scroll-debug -- --chain dev --sequencer +//! +//! # Start with persistent storage +//! cargo run --features debug-toolkit --bin scroll-debug -- --chain dev --sequencer --datadir ./data +//! +//! # Start with followers +//! cargo run --features debug-toolkit --bin scroll-debug -- --chain dev --sequencer --followers 2 +//! +//! # Start with a real L1 endpoint +//! cargo run --features debug-toolkit --bin scroll-debug -- --chain dev --sequencer --l1-url https://eth.llamarpc.com +//! +//! # See all available options +//! cargo run --features debug-toolkit --bin scroll-debug -- --help +//! ``` + +#[cfg(feature = "debug-toolkit")] +fn main() -> eyre::Result<()> { + use clap::Parser; + use rollup_node::debug_toolkit::DebugArgs; + + // Initialize tracing + if std::env::var("RUST_LOG").is_err() { + std::env::set_var("RUST_LOG", "info"); + } + tracing_subscriber::fmt::init(); + + // Create tokio runtime and run + tokio::runtime::Builder::new_multi_thread().enable_all().build()?.block_on(async { + let args = DebugArgs::parse(); + args.run().await + }) +} + +#[cfg(not(feature = "debug-toolkit"))] +fn main() { + eprintln!("Error: scroll-debug requires the 'debug-toolkit' feature."); + eprintln!("Run with: cargo run --features debug-toolkit --bin scroll-debug"); + std::process::exit(1); +} diff --git a/crates/node/src/debug_toolkit/actions.rs b/crates/node/src/debug_toolkit/actions.rs new file mode 100644 index 00000000..b84ad7a1 --- /dev/null +++ b/crates/node/src/debug_toolkit/actions.rs @@ -0,0 +1,370 @@ +//! Custom action framework for the debug toolkit. +//! +//! Users can implement the [`Action`] trait to create custom commands +//! that have full access to the [`TestFixture`]. +//! +//! # Example +//! +//! ```rust,ignore +//! use rollup_node::debug_toolkit::actions::{Action, ActionRegistry}; +//! use rollup_node::test_utils::TestFixture; +//! use async_trait::async_trait; +//! +//! struct MyAction; +//! +//! #[async_trait] +//! impl Action for MyAction { +//! fn name(&self) -> &'static str { +//! "my-action" +//! } +//! +//! fn description(&self) -> &'static str { +//! "Does something cool with the fixture" +//! } +//! +//! async fn execute( +//! &self, +//! fixture: &mut TestFixture, +//! args: &[String], +//! ) -> eyre::Result<()> { +//! // Your custom logic here +//! println!("Running my action with {} nodes!", fixture.nodes.len()); +//! Ok(()) +//! } +//! } +//! +//! // Register in ActionRegistry::new() +//! ``` + +use crate::test_utils::TestFixture; +use async_trait::async_trait; +use colored::Colorize; +use futures::StreamExt; +use rollup_node_chain_orchestrator::ChainOrchestratorEvent; + +/// Trait for custom debug actions. +/// +/// Implement this trait to create actions that can be invoked via `run `. +#[async_trait] +pub trait Action: Send + Sync { + /// Name of the action (used in `run `). + fn name(&self) -> &'static str; + + /// Short description shown in `run list`. + fn description(&self) -> &'static str; + + /// Optional usage string for help. + fn usage(&self) -> Option<&'static str> { + None + } + + /// Execute the action with full access to the fixture. + /// + /// # Arguments + /// * `fixture` - Mutable reference to the test fixture + /// * `args` - Arguments passed after the action name + async fn execute(&self, fixture: &mut TestFixture, args: &[String]) -> eyre::Result<()>; +} + +/// Registry of available actions. +pub struct ActionRegistry { + actions: Vec>, +} + +impl std::fmt::Debug for ActionRegistry { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("ActionRegistry").field("action_count", &self.actions.len()).finish() + } +} + +impl Default for ActionRegistry { + fn default() -> Self { + Self::new() + } +} + +impl ActionRegistry { + /// Create a new registry with built-in actions. + pub fn new() -> Self { + let mut registry = Self { actions: Vec::new() }; + + // Register built-in example actions + registry.register(Box::new(BuildBlocksAction)); + registry.register(Box::new(StressTestAction)); + registry.register(Box::new(SyncAllAction)); + + // ======================================== + // ADD YOUR CUSTOM ACTIONS HERE: + // registry.register(Box::new(MyCustomAction)); + // ======================================== + + registry + } + + /// Register a new action. + pub fn register(&mut self, action: Box) { + self.actions.push(action); + } + + /// Get an action by name. + pub fn get(&self, name: &str) -> Option<&dyn Action> { + self.actions.iter().find(|a| a.name() == name).map(|a| a.as_ref()) + } + + /// List all registered actions. + pub fn list(&self) -> impl Iterator { + self.actions.iter().map(|a| a.as_ref()) + } +} + +// ============================================================================ +// Built-in Example Actions +// ============================================================================ + +/// Build multiple blocks in sequence. +struct BuildBlocksAction; + +#[async_trait] +impl Action for BuildBlocksAction { + fn name(&self) -> &'static str { + "build-blocks" + } + + fn description(&self) -> &'static str { + "Build multiple blocks in sequence" + } + + fn usage(&self) -> Option<&'static str> { + Some("run build-blocks [timeout_ms]") + } + + async fn execute(&self, fixture: &mut TestFixture, args: &[String]) -> eyre::Result<()> { + let count: usize = args.first().and_then(|s| s.parse().ok()).unwrap_or(5); + let timeout_ms: u64 = args.get(1).and_then(|s| s.parse().ok()).unwrap_or(5000); + + println!("Building {} blocks (timeout: {}ms per block)...", count, timeout_ms); + + let sequencer_idx = fixture + .nodes + .iter() + .position(|n| n.is_sequencer()) + .ok_or_else(|| eyre::eyre!("No sequencer node found"))?; + + // Get an event listener for the sequencer + let mut event_rx = fixture.nodes[sequencer_idx] + .rollup_manager_handle + .get_event_listener() + .await + .map_err(|e| eyre::eyre!("Failed to get event listener: {}", e))?; + + for i in 1..=count { + fixture.nodes[sequencer_idx].rollup_manager_handle.build_block(); + print!(" Block {} triggered, waiting...", i); + let _ = std::io::Write::flush(&mut std::io::stdout()); + + // Wait for BlockSequenced event + let timeout = tokio::time::sleep(std::time::Duration::from_millis(timeout_ms)); + tokio::pin!(timeout); + + loop { + tokio::select! { + event = event_rx.next() => { + if let Some(ChainOrchestratorEvent::BlockSequenced(block)) = event { + println!(" sequenced at #{}", block.header.number); + break; + } + // Continue waiting for BlockSequenced event + } + _ = &mut timeout => { + println!(" timeout!"); + return Err(eyre::eyre!("Timeout waiting for block {} to be sequenced", i)); + } + } + } + } + + let status = fixture.nodes[sequencer_idx].rollup_manager_handle.status().await?; + println!( + "{}", + format!("Done! Head is now at block #{}", status.l2.fcs.head_block_info().number) + .green() + ); + + Ok(()) + } +} + +/// Stress test by sending many transactions. +struct StressTestAction; + +#[async_trait] +impl Action for StressTestAction { + fn name(&self) -> &'static str { + "stress-test" + } + + fn description(&self) -> &'static str { + "Send multiple transactions and build blocks" + } + + fn usage(&self) -> Option<&'static str> { + Some("run stress-test [build_every_n]") + } + + async fn execute(&self, fixture: &mut TestFixture, args: &[String]) -> eyre::Result<()> { + use alloy_consensus::{SignableTransaction, TxEip1559}; + use alloy_eips::eip2718::Encodable2718; + use alloy_network::TxSignerSync; + use alloy_primitives::{TxKind, U256}; + + let tx_count: usize = args.first().and_then(|s| s.parse().ok()).unwrap_or(10); + let build_every: usize = args.get(1).and_then(|s| s.parse().ok()).unwrap_or(5); + + println!("Stress test: {} txs, build every {} txs", tx_count, build_every); + + let sequencer_idx = fixture + .nodes + .iter() + .position(|n| n.is_sequencer()) + .ok_or_else(|| eyre::eyre!("No sequencer node found"))?; + + let mut wallet = fixture.wallet.lock().await; + let chain_id = wallet.chain_id; + let signer = wallet.inner.clone(); + let to_address = signer.address(); // Send to self + + for i in 0..tx_count { + let nonce = wallet.inner_nonce; + wallet.inner_nonce += 1; + + let mut tx = TxEip1559 { + chain_id, + nonce, + gas_limit: 21000, + max_fee_per_gas: 1_000_000_000, + max_priority_fee_per_gas: 1_000_000_000, + to: TxKind::Call(to_address), + value: U256::from(1), + access_list: Default::default(), + input: Default::default(), + }; + + let signature = signer.sign_transaction_sync(&mut tx)?; + let signed = tx.into_signed(signature); + let raw_tx = alloy_primitives::Bytes::from(signed.encoded_2718()); + + // Need to drop wallet lock to inject + drop(wallet); + + fixture.inject_tx_on(sequencer_idx, raw_tx).await?; + print!("."); + + // Re-acquire wallet lock + wallet = fixture.wallet.lock().await; + + // Build block periodically + if (i + 1) % build_every == 0 { + drop(wallet); + fixture.nodes[sequencer_idx].rollup_manager_handle.build_block(); + tokio::time::sleep(std::time::Duration::from_millis(200)).await; + wallet = fixture.wallet.lock().await; + print!("B"); + } + } + + drop(wallet); + + // Final build + fixture.nodes[sequencer_idx].rollup_manager_handle.build_block(); + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + + println!(); + let status = fixture.nodes[sequencer_idx].rollup_manager_handle.status().await?; + println!( + "{}", + format!( + "Done! Sent {} txs, head at block #{}", + tx_count, + status.l2.fcs.head_block_info().number + ) + .green() + ); + + Ok(()) + } +} + +/// Ensure L1 is synced on all nodes. +struct SyncAllAction; + +#[async_trait] +impl Action for SyncAllAction { + fn name(&self) -> &'static str { + "sync-all" + } + + fn description(&self) -> &'static str { + "Send L1 sync event to all nodes" + } + + fn usage(&self) -> Option<&'static str> { + None + } + + async fn execute(&self, fixture: &mut TestFixture, _args: &[String]) -> eyre::Result<()> { + println!("Syncing L1 on all {} nodes...", fixture.nodes.len()); + + fixture.l1().sync().await?; + + println!("{}", "All nodes synced!".green()); + Ok(()) + } +} + +// ============================================================================ +// Template for custom actions - copy this to create your own! +// ============================================================================ + +#[allow(dead_code)] +struct TemplateAction; + +#[allow(dead_code)] +#[async_trait] +impl Action for TemplateAction { + fn name(&self) -> &'static str { + "template" + } + + fn description(&self) -> &'static str { + "Template action - copy and modify!" + } + + fn usage(&self) -> Option<&'static str> { + Some("run template [arg1] [arg2]") + } + + async fn execute(&self, fixture: &mut TestFixture, args: &[String]) -> eyre::Result<()> { + // Access nodes + println!("Fixture has {} nodes", fixture.nodes.len()); + + // Access wallet + let wallet = fixture.wallet.lock().await; + println!("Wallet address: {:?}", wallet.inner.address()); + drop(wallet); + + // Access L1 provider + // fixture.l1().sync().await?; + + // Access specific node + let node = &fixture.nodes[0]; + let status = node.rollup_manager_handle.status().await?; + println!("Head block: {}", status.l2.fcs.head_block_info().number); + + // Process arguments + for (i, arg) in args.iter().enumerate() { + println!("Arg {}: {}", i, arg); + } + + Ok(()) + } +} diff --git a/crates/node/src/debug_toolkit/cli.rs b/crates/node/src/debug_toolkit/cli.rs new file mode 100644 index 00000000..1ccfc04e --- /dev/null +++ b/crates/node/src/debug_toolkit/cli.rs @@ -0,0 +1,114 @@ +//! CLI subcommand for the debug toolkit. + +use crate::test_utils::TestFixtureBuilder; +use clap::Parser; +use reth_network_peers::TrustedPeer; +use std::{path::PathBuf, str::FromStr}; + +/// Debug toolkit CLI arguments. +#[derive(Debug, Parser)] +#[command(name = "scroll-debug", about = "Scroll Debug Toolkit - Interactive REPL for debugging")] +pub struct DebugArgs { + /// Chain to use (dev, scroll-sepolia, scroll-mainnet) or path to genesis file. + #[arg(long, default_value = "dev")] + pub chain: String, + + /// Enable sequencer mode. + #[arg(long)] + pub sequencer: bool, + + /// Number of follower nodes. + #[arg(long, default_value = "0")] + pub followers: usize, + + /// Persistent data directory (uses temp dir if not specified). + #[arg(long)] + pub datadir: Option, + + /// Block time in milliseconds (0 = manual block building only). + #[arg(long, default_value = "0")] + pub block_time: u64, + + /// Allow building empty blocks (default: true when sequencer is enabled). + #[arg(long, default_value = "true")] + pub allow_empty_blocks: bool, + + /// L1 message inclusion delay in blocks (0 = immediate). + #[arg(long, default_value = "0")] + pub l1_message_delay: u64, + + /// L1 RPC endpoint URL (optional, uses mock L1 if not specified). + #[arg(long)] + pub l1_url: Option, + + /// Comma-separated list of bootnode enode URLs to connect to. + #[arg(long, value_delimiter = ',')] + pub bootnodes: Option>, +} + +impl DebugArgs { + /// Run the debug toolkit with these arguments. + pub async fn run(self) -> eyre::Result<()> { + use super::DebugRepl; + + // Build the fixture + let mut builder = TestFixtureBuilder::new().with_chain(&self.chain); + + if self.sequencer { + builder = builder.sequencer(); + } + + if self.followers > 0 { + builder = builder.followers(self.followers); + } + + // Apply sequencer settings + builder = builder + .block_time(self.block_time) + .allow_empty_blocks(self.allow_empty_blocks) + .with_l1_message_delay(self.l1_message_delay); + + // Apply L1 URL if provided + if let Some(l1_url) = self.l1_url { + let config = builder.config_mut(); + config.l1_provider_args.url = Some(l1_url); + } + + // Parse and apply bootnodes if provided + if let Some(bootnode_strs) = self.bootnodes { + let mut bootnodes = Vec::with_capacity(bootnode_strs.len()); + for enode in bootnode_strs { + match TrustedPeer::from_str(&enode) { + Ok(peer) => bootnodes.push(peer), + Err(e) => { + return Err(eyre::eyre!("Failed to parse bootnode '{}': {}", enode, e)); + } + } + } + if !bootnodes.is_empty() { + builder = builder.bootnodes(bootnodes); + } + } + + let fixture = builder.build().await?; + + // Create and run REPL + let mut repl = DebugRepl::new(fixture); + repl.run().await + } +} + +/// Entry point for the debug toolkit. +/// +/// Usage: +/// ```bash +/// cargo run --features debug-toolkit --bin scroll-debug -- --chain dev --sequencer +/// ``` +pub async fn main() -> eyre::Result<()> { + // Initialize tracing + tracing_subscriber::fmt::init(); + + // Parse arguments and run + let args = DebugArgs::parse(); + args.run().await +} diff --git a/crates/node/src/debug_toolkit/commands.rs b/crates/node/src/debug_toolkit/commands.rs new file mode 100644 index 00000000..7125159e --- /dev/null +++ b/crates/node/src/debug_toolkit/commands.rs @@ -0,0 +1,404 @@ +//! Command parsing and execution for the debug REPL. + +use alloy_primitives::{Address, Bytes, U256}; +use colored::Colorize; +use std::str::FromStr; + +/// A parsed REPL command. +#[derive(Debug, Clone)] +pub enum Command { + /// Show node status. + Status, + /// Show block details. + Block(BlockArg), + /// List blocks in range. + Blocks { + /// Starting block number. + from: u64, + /// Ending block number. + to: u64, + }, + /// Show forkchoice state. + Fcs, + /// L1 commands. + L1(L1Command), + /// Build a new block. + Build, + /// Transaction commands. + Tx(TxCommand), + /// Wallet commands. + Wallet(WalletCommand), + /// Peer commands. + Peers(PeersCommand), + /// Event commands. + Events(EventsCommand), + /// Run a custom action. + Run(RunCommand), + /// Switch to a different node. + Node(usize), + /// List all nodes. + Nodes, + /// Show database path and access command. + Db, + /// Show help. + Help, + /// Exit the REPL. + Exit, + /// Unknown command. + Unknown(String), +} + +/// Run command variants. +#[derive(Debug, Clone)] +pub enum RunCommand { + /// List available actions. + List, + /// Execute an action by name. + Execute { + /// Action name. + name: String, + /// Arguments to pass to the action. + args: Vec, + }, +} + +/// Block argument: either a number or "latest". +#[derive(Debug, Clone)] +pub enum BlockArg { + /// Latest block. + Latest, + /// Block by number. + Number(u64), +} + +/// L1-related commands. +#[derive(Debug, Clone)] +pub enum L1Command { + /// Show L1 status. + Status, + /// Inject L1 synced event. + Sync, + /// Inject new L1 block. + Block(u64), + /// Inject L1 message (JSON). + Message(String), + /// Inject batch commit (JSON). + Commit(String), + /// Inject batch finalization. + Finalize(u64), + /// Inject L1 reorg. + Reorg(u64), +} + +/// Transaction-related commands. +#[derive(Debug, Clone)] +pub enum TxCommand { + /// Send a transfer. + Send { + /// Recipient address. + to: Address, + /// Transfer value. + value: U256, + /// Wallet index to send from (from `wallet gen` list). + from: Option, + }, + /// List pending transactions. + Pending, + /// Inject raw transaction. + Inject(Bytes), +} + +/// Peer-related commands. +#[derive(Debug, Clone)] +pub enum PeersCommand { + /// List connected peers. + List, + /// Connect to a peer. + Connect(String), +} + +/// Wallet-related commands. +#[derive(Debug, Clone)] +pub enum WalletCommand { + /// Show wallet info (address, balance, nonce). + Info, + /// Generate and list available wallets. + Gen, +} + +/// Event-related commands. +#[derive(Debug, Clone)] +pub enum EventsCommand { + /// Enable background event stream. + On, + /// Disable background event stream. + Off, + /// Stream next N events. + Stream(usize), + /// Set event filter. + Filter(Option), + /// Show event history. + History(usize), +} + +impl Command { + /// Parse a command from input string. + pub fn parse(input: &str) -> Self { + let input = input.trim(); + if input.is_empty() { + return Self::Unknown(String::new()); + } + + let parts: Vec<&str> = input.split_whitespace().collect(); + let cmd = parts[0].to_lowercase(); + let args = &parts[1..]; + + match cmd.as_str() { + "status" => Self::Status, + "block" => Self::parse_block(args), + "blocks" => Self::parse_blocks(args), + "fcs" | "forkchoice" => Self::Fcs, + "l1" => Self::parse_l1(args), + "build" => Self::Build, + "tx" => Self::parse_tx(args), + "wallet" => Self::parse_wallet(args), + "peers" | "peer" => Self::parse_peers(args), + "events" | "event" => Self::parse_events(args), + "run" => Self::parse_run(args), + "node" => Self::parse_node(args), + "nodes" => Self::Nodes, + "db" | "database" => Self::Db, + "help" | "?" => Self::Help, + "exit" | "quit" | "q" => Self::Exit, + _ => Self::Unknown(cmd), + } + } + + fn parse_block(args: &[&str]) -> Self { + let arg = args.first().copied().unwrap_or("latest"); + if arg == "latest" { + Self::Block(BlockArg::Latest) + } else { + match arg.parse::() { + Ok(n) => Self::Block(BlockArg::Number(n)), + Err(_) => Self::Unknown(format!("block {}", arg)), + } + } + } + + fn parse_blocks(args: &[&str]) -> Self { + if args.len() < 2 { + return Self::Unknown("blocks requires arguments".to_string()); + } + match (args[0].parse::(), args[1].parse::()) { + (Ok(from), Ok(to)) => Self::Blocks { from, to }, + _ => Self::Unknown("blocks requires numeric arguments".to_string()), + } + } + + fn parse_l1(args: &[&str]) -> Self { + let subcmd = args.first().copied().unwrap_or("status"); + let subargs = if args.len() > 1 { &args[1..] } else { &[] }; + + match subcmd { + "status" => Self::L1(L1Command::Status), + "sync" | "synced" => Self::L1(L1Command::Sync), + "block" => { + if let Some(n) = subargs.first().and_then(|s| s.parse::().ok()) { + Self::L1(L1Command::Block(n)) + } else { + Self::Unknown("l1 block requires a block number".to_string()) + } + } + "message" | "msg" => { + if subargs.is_empty() { + Self::Unknown("l1 message requires JSON data".to_string()) + } else { + Self::L1(L1Command::Message(subargs.join(" "))) + } + } + "commit" => { + if subargs.is_empty() { + Self::Unknown("l1 commit requires JSON data".to_string()) + } else { + Self::L1(L1Command::Commit(subargs.join(" "))) + } + } + "finalize" => { + if let Some(n) = subargs.first().and_then(|s| s.parse::().ok()) { + Self::L1(L1Command::Finalize(n)) + } else { + Self::Unknown("l1 finalize requires a batch index".to_string()) + } + } + "reorg" => { + if let Some(n) = subargs.first().and_then(|s| s.parse::().ok()) { + Self::L1(L1Command::Reorg(n)) + } else { + Self::Unknown("l1 reorg requires a block number".to_string()) + } + } + _ => Self::Unknown(format!("l1 {}", subcmd)), + } + } + + fn parse_tx(args: &[&str]) -> Self { + let subcmd = args.first().copied().unwrap_or("pending"); + let subargs = if args.len() > 1 { &args[1..] } else { &[] }; + + match subcmd { + "pending" => Self::Tx(TxCommand::Pending), + "send" => { + if subargs.len() < 2 { + return Self::Unknown( + "tx send requires [wallet_index]".to_string(), + ); + } + match (Address::from_str(subargs[0]), U256::from_str(subargs[1])) { + (Ok(to), Ok(value)) => { + let from = subargs.get(2).and_then(|s| s.parse::().ok()); + Self::Tx(TxCommand::Send { to, value, from }) + } + _ => Self::Unknown("tx send: invalid address or value".to_string()), + } + } + "inject" => { + if let Some(hex) = subargs.first() { + match Bytes::from_str(hex) { + Ok(bytes) => Self::Tx(TxCommand::Inject(bytes)), + Err(_) => Self::Unknown("tx inject: invalid hex data".to_string()), + } + } else { + Self::Unknown("tx inject requires hex data".to_string()) + } + } + _ => Self::Unknown(format!("tx {}", subcmd)), + } + } + + fn parse_wallet(args: &[&str]) -> Self { + let subcmd = args.first().copied().unwrap_or("info"); + match subcmd { + "info" | "" => Self::Wallet(WalletCommand::Info), + "gen" | "generate" => Self::Wallet(WalletCommand::Gen), + _ => Self::Unknown("wallet command not recognized".to_string()), + } + } + + fn parse_peers(args: &[&str]) -> Self { + let subcmd = args.first().copied().unwrap_or("list"); + let subargs = if args.len() > 1 { &args[1..] } else { &[] }; + + match subcmd { + "list" | "" => Self::Peers(PeersCommand::List), + "connect" => { + if let Some(enode) = subargs.first() { + Self::Peers(PeersCommand::Connect(enode.to_string())) + } else { + Self::Unknown("peers connect requires enode URL".to_string()) + } + } + _ => Self::Unknown("peers command not recognized".to_string()), + } + } + + fn parse_events(args: &[&str]) -> Self { + let subcmd = args.first().copied().unwrap_or("stream"); + let subargs = if args.len() > 1 { &args[1..] } else { &[] }; + + match subcmd { + "on" => Self::Events(EventsCommand::On), + "off" => Self::Events(EventsCommand::Off), + "filter" => { + let pattern = subargs.first().map(|s| s.to_string()); + Self::Events(EventsCommand::Filter(pattern)) + } + "history" => { + let count = subargs.first().and_then(|s| s.parse().ok()).unwrap_or(20); + Self::Events(EventsCommand::History(count)) + } + _ => { + // Try to parse as a number for stream count + if let Ok(count) = subcmd.parse::() { + Self::Events(EventsCommand::Stream(count)) + } else { + Self::Events(EventsCommand::Stream(10)) + } + } + } + } + + fn parse_node(args: &[&str]) -> Self { + if let Some(n) = args.first().and_then(|s| s.parse::().ok()) { + Self::Node(n) + } else { + Self::Unknown("node requires an index".to_string()) + } + } + + fn parse_run(args: &[&str]) -> Self { + if args.is_empty() || args[0] == "list" { + Self::Run(RunCommand::List) + } else { + let name = args[0].to_string(); + let action_args: Vec = args.iter().skip(1).map(|s| s.to_string()).collect(); + Self::Run(RunCommand::Execute { name, args: action_args }) + } + } +} + +/// Print the help message. +pub fn print_help() { + println!("{}", "Scroll Debug Toolkit - Commands".bold()); + println!(); + println!("{}", "Status & Inspection:".underline()); + println!(" status Show node status (head, safe, finalized, L1 state)"); + println!(" block [n|latest] Display block details"); + println!(" blocks List blocks in range"); + println!(" fcs Show forkchoice state"); + println!(); + println!("{}", "L1 Commands:".underline()); + println!(" l1 status Show L1 sync state"); + println!(" l1 sync Inject L1 synced event"); + println!(" l1 block Inject new L1 block notification"); + println!(" l1 message Inject an L1 message"); + println!(" l1 commit Inject batch commit"); + println!(" l1 finalize Inject batch finalization"); + println!(" l1 reorg Inject L1 reorg"); + println!(); + println!("{}", "Block & Transaction:".underline()); + println!(" build Build a new block (sequencer mode)"); + println!(" tx send [idx] Send ETH transfer (idx = wallet index from gen)"); + println!(" tx pending List pending transactions"); + println!(" tx inject Inject raw transaction"); + println!(); + println!("{}", "Wallet:".underline()); + println!(" wallet Show wallet address, balance, and nonce"); + println!(" wallet gen Generate and list all available wallets"); + println!(); + println!("{}", "Network:".underline()); + println!(" peers List connected peers and show local enode"); + println!(" peers connect Connect to a peer (enode://...)"); + println!(); + println!("{}", "Events:".underline()); + println!(" events on Enable background event stream"); + println!(" events off Disable background event stream"); + println!(" events [count] Stream next N events (default: 10)"); + println!(" events filter Filter events by type (e.g., Block*, L1*)"); + println!(" events history [n] Show last N events (default: 20)"); + println!(); + println!("{}", "Custom Actions:".underline()); + println!(" run list List available custom actions"); + println!(" run [args] Execute a custom action"); + println!(); + println!("{}", "Node Management:".underline()); + println!(" node Switch active node context"); + println!(" nodes List all nodes in fixture"); + println!(); + println!("{}", "Database:".underline()); + println!(" db Show database path and access command"); + println!(); + println!("{}", "Other:".underline()); + println!(" help Show this help message"); + println!(" exit Exit the REPL"); +} diff --git a/crates/node/src/debug_toolkit/event_stream.rs b/crates/node/src/debug_toolkit/event_stream.rs new file mode 100644 index 00000000..b15713a1 --- /dev/null +++ b/crates/node/src/debug_toolkit/event_stream.rs @@ -0,0 +1,298 @@ +//! Background event streaming for the debug REPL. + +use colored::Colorize; +use rollup_node_chain_orchestrator::ChainOrchestratorEvent; +use std::{ + collections::VecDeque, + time::{Duration, Instant}, +}; + +/// Maximum number of events to keep in history. +const DEFAULT_HISTORY_CAPACITY: usize = 100; + +/// State for background event streaming. +#[derive(Debug)] +pub struct EventStreamState { + /// Whether background streaming is enabled. + enabled: bool, + /// Event type filter (glob pattern). + filter: Option, + /// Ring buffer of recent events for `events history`. + history: VecDeque<(Instant, ChainOrchestratorEvent)>, + /// Max history size. + history_capacity: usize, + /// Counter for event numbering. + event_counter: usize, +} + +impl Default for EventStreamState { + fn default() -> Self { + Self::new() + } +} + +impl EventStreamState { + /// Create a new event stream state. + pub fn new() -> Self { + Self { + enabled: false, + filter: None, + history: VecDeque::with_capacity(DEFAULT_HISTORY_CAPACITY), + history_capacity: DEFAULT_HISTORY_CAPACITY, + event_counter: 0, + } + } + + /// Enable background event streaming. + pub const fn enable(&mut self) { + self.enabled = true; + } + + /// Disable background event streaming. + pub const fn disable(&mut self) { + self.enabled = false; + } + + /// Check if streaming is enabled. + pub const fn is_enabled(&self) -> bool { + self.enabled + } + + /// Set the event filter pattern. + pub fn set_filter(&mut self, pattern: Option) { + self.filter = pattern; + } + + /// Get the current filter pattern. + pub fn filter(&self) -> Option<&str> { + self.filter.as_deref() + } + + /// Record an event in history and optionally display it. + pub fn record_event(&mut self, event: ChainOrchestratorEvent) -> Option { + let now = Instant::now(); + + // Add to history + if self.history.len() >= self.history_capacity { + self.history.pop_front(); + } + self.history.push_back((now, event.clone())); + self.event_counter += 1; + + // Check if we should display this event + if !self.enabled { + return None; + } + + let event_name = event_type_name(&event); + if !self.matches_filter(&event_name) { + return None; + } + + Some(self.format_event(&event)) + } + + /// Check if an event name matches the filter. + fn matches_filter(&self, event_name: &str) -> bool { + match &self.filter { + None => true, + Some(pattern) => { + // Simple glob matching: * matches any sequence of characters + let pattern = pattern.replace('*', ".*"); + regex_lite::Regex::new(&format!("^{}$", pattern)) + .map(|re| re.is_match(event_name)) + .unwrap_or(true) + } + } + } + + /// Format an event for display. + pub fn format_event(&self, event: &ChainOrchestratorEvent) -> String { + let prefix = " [EVENT]".cyan(); + let event_str = format_event_short(event); + format!("{} {}", prefix, event_str) + } + + /// Get recent events from history. + pub fn get_history(&self, count: usize) -> Vec<(Duration, &ChainOrchestratorEvent)> { + let start = Instant::now(); + self.history + .iter() + .rev() + .take(count) + .map(|(t, e)| (start.duration_since(*t), e)) + .collect::>() + .into_iter() + .rev() + .collect() + } + + /// Get the total number of events recorded. + pub const fn total_events(&self) -> usize { + self.event_counter + } +} + +/// Get the type name of an event for filtering. +pub fn event_type_name(event: &ChainOrchestratorEvent) -> String { + match event { + ChainOrchestratorEvent::BlockSequenced(_) => "BlockSequenced".to_string(), + ChainOrchestratorEvent::ChainConsolidated { .. } => "ChainConsolidated".to_string(), + ChainOrchestratorEvent::ChainExtended(_) => "ChainExtended".to_string(), + ChainOrchestratorEvent::ChainReorged(_) => "ChainReorged".to_string(), + ChainOrchestratorEvent::L1Synced => "L1Synced".to_string(), + ChainOrchestratorEvent::OptimisticSync(_) => "OptimisticSync".to_string(), + ChainOrchestratorEvent::NewL1Block(_) => "NewL1Block".to_string(), + ChainOrchestratorEvent::L1MessageCommitted(_) => "L1MessageCommitted".to_string(), + ChainOrchestratorEvent::L1Reorg { .. } => "L1Reorg".to_string(), + ChainOrchestratorEvent::BatchConsolidated(_) => "BatchConsolidated".to_string(), + ChainOrchestratorEvent::UnwoundToL1Block(_) => "UnwoundToL1Block".to_string(), + ChainOrchestratorEvent::BlockConsolidated(_) => "BlockConsolidated".to_string(), + ChainOrchestratorEvent::BatchReverted { .. } => "BatchReverted".to_string(), + ChainOrchestratorEvent::L1BlockFinalized(_, _) => "L1BlockFinalized".to_string(), + ChainOrchestratorEvent::NewBlockReceived(_) => "NewBlockReceived".to_string(), + ChainOrchestratorEvent::L1MessageNotFoundInDatabase(_) => { + "L1MessageNotFoundInDatabase".to_string() + } + ChainOrchestratorEvent::BlockFailedConsensusChecks(_, _) => { + "BlockFailedConsensusChecks".to_string() + } + ChainOrchestratorEvent::InsufficientDataForReceivedBlock(_) => { + "InsufficientDataForReceivedBlock".to_string() + } + ChainOrchestratorEvent::BlockAlreadyKnown(_, _) => "BlockAlreadyKnown".to_string(), + ChainOrchestratorEvent::OldForkReceived { .. } => "OldForkReceived".to_string(), + ChainOrchestratorEvent::BatchCommitIndexed { .. } => "BatchCommitIndexed".to_string(), + ChainOrchestratorEvent::BatchFinalized { .. } => "BatchFinalized".to_string(), + ChainOrchestratorEvent::L2ChainCommitted(_, _, _) => "L2ChainCommitted".to_string(), + ChainOrchestratorEvent::L2ConsolidatedBlockCommitted(_) => { + "L2ConsolidatedBlockCommitted".to_string() + } + ChainOrchestratorEvent::SignedBlock { .. } => "SignedBlock".to_string(), + ChainOrchestratorEvent::L1MessageMismatch { .. } => "L1MessageMismatch".to_string(), + ChainOrchestratorEvent::FcsHeadUpdated(_) => "FcsHeadUpdated".to_string(), + } +} + +/// Format an event for short display. +pub fn format_event_short(event: &ChainOrchestratorEvent) -> String { + match event { + ChainOrchestratorEvent::BlockSequenced(block) => { + format!( + "BlockSequenced {{ block: {}, hash: {:.8}... }}", + block.header.number, + format!("{:?}", block.header.hash_slow()) + ) + } + ChainOrchestratorEvent::ChainConsolidated { from, to } => { + format!("ChainConsolidated {{ from: {}, to: {} }}", from, to) + } + ChainOrchestratorEvent::ChainExtended(import) => { + format!("ChainExtended {{ blocks: {} }}", import.chain.len()) + } + ChainOrchestratorEvent::ChainReorged(import) => { + format!("ChainReorged {{ blocks: {} }}", import.chain.len()) + } + ChainOrchestratorEvent::L1Synced => "L1Synced".to_string(), + ChainOrchestratorEvent::OptimisticSync(info) => { + format!("OptimisticSync {{ block: {} }}", info.number) + } + ChainOrchestratorEvent::NewL1Block(num) => format!("NewL1Block {{ block: {} }}", num), + ChainOrchestratorEvent::L1MessageCommitted(queue_index) => { + format!("L1MessageCommitted {{ queue_index: {} }}", queue_index) + } + ChainOrchestratorEvent::L1Reorg { l1_block_number, .. } => { + format!("L1Reorg {{ l1_block: {} }}", l1_block_number) + } + ChainOrchestratorEvent::BatchConsolidated(outcome) => { + format!("BatchConsolidated {{ blocks: {} }}", outcome.blocks.len()) + } + ChainOrchestratorEvent::UnwoundToL1Block(num) => { + format!("UnwoundToL1Block {{ block: {} }}", num) + } + ChainOrchestratorEvent::BlockConsolidated(outcome) => { + format!("BlockConsolidated {{ block: {} }}", outcome.block_info().block_info.number) + } + ChainOrchestratorEvent::BatchReverted { batch_info, safe_head } => { + format!("BatchReverted {{ index: {}, safe: {} }}", batch_info.index, safe_head.number) + } + ChainOrchestratorEvent::L1BlockFinalized(num, batches) => { + format!("L1BlockFinalized {{ block: {}, batches: {} }}", num, batches.len()) + } + ChainOrchestratorEvent::NewBlockReceived(nbwp) => { + format!( + "NewBlockReceived {{ block: {}, peer: {:.8}... }}", + nbwp.block.header.number, + format!("{:?}", nbwp.peer_id) + ) + } + ChainOrchestratorEvent::L1MessageNotFoundInDatabase(key) => { + format!("L1MessageNotFoundInDatabase {{ key: {:?} }}", key) + } + ChainOrchestratorEvent::BlockFailedConsensusChecks(hash, peer) => { + format!( + "BlockFailedConsensusChecks {{ hash: {:.8}..., peer: {:.8}... }}", + format!("{:?}", hash), + format!("{:?}", peer) + ) + } + ChainOrchestratorEvent::InsufficientDataForReceivedBlock(hash) => { + format!("InsufficientDataForReceivedBlock {{ hash: {:.8}... }}", format!("{:?}", hash)) + } + ChainOrchestratorEvent::BlockAlreadyKnown(hash, peer) => { + format!( + "BlockAlreadyKnown {{ hash: {:.8}..., peer: {:.8}... }}", + format!("{:?}", hash), + format!("{:?}", peer) + ) + } + ChainOrchestratorEvent::OldForkReceived { headers, peer_id, .. } => { + format!( + "OldForkReceived {{ headers: {}, peer: {:.8}... }}", + headers.len(), + format!("{:?}", peer_id) + ) + } + ChainOrchestratorEvent::BatchCommitIndexed { batch_info, l1_block_number } => { + format!( + "BatchCommitIndexed {{ index: {}, l1_block: {} }}", + batch_info.index, l1_block_number + ) + } + ChainOrchestratorEvent::BatchFinalized { l1_block_info, triggered_batches } => { + format!( + "BatchFinalized {{ l1_block: {}, batches: {} }}", + l1_block_info.number, + triggered_batches.len() + ) + } + ChainOrchestratorEvent::L2ChainCommitted(info, batch, is_consolidated) => { + format!( + "L2ChainCommitted {{ block: {}, batch: {:?}, consolidated: {} }}", + info.block_info.number, + batch.as_ref().map(|b| b.index), + is_consolidated + ) + } + ChainOrchestratorEvent::L2ConsolidatedBlockCommitted(info) => { + format!("L2ConsolidatedBlockCommitted {{ block: {} }}", info.block_info.number) + } + ChainOrchestratorEvent::SignedBlock { block, .. } => { + format!( + "SignedBlock {{ block: {}, hash: {:.8}... }}", + block.header.number, + format!("{:?}", block.header.hash_slow()) + ) + } + ChainOrchestratorEvent::L1MessageMismatch { expected, actual } => { + format!( + "L1MessageMismatch {{ expected: {:.8}..., actual: {:.8}... }}", + format!("{:?}", expected), + format!("{:?}", actual) + ) + } + ChainOrchestratorEvent::FcsHeadUpdated(info) => { + format!("FcsHeadUpdated {{ block: {} }}", info.number) + } + } +} diff --git a/crates/node/src/debug_toolkit/mod.rs b/crates/node/src/debug_toolkit/mod.rs new file mode 100644 index 00000000..492c4a43 --- /dev/null +++ b/crates/node/src/debug_toolkit/mod.rs @@ -0,0 +1,74 @@ +//! Debug Toolkit for Scroll Rollup Node +//! +//! This module provides an interactive REPL and debugging utilities for +//! hackathons, development, and debugging scenarios. +//! +//! # Quick Start +//! +//! ```rust,ignore +//! use rollup_node::debug_toolkit::prelude::*; +//! use rollup_node::test_utils::TestFixture; +//! +//! #[tokio::main] +//! async fn main() -> eyre::Result<()> { +//! // Create a test fixture +//! let fixture = TestFixture::builder() +//! .with_chain("dev") +//! .sequencer() +//! .with_noop_consensus() +//! .build() +//! .await?; +//! +//! // Start the REPL +//! let mut repl = DebugRepl::new(fixture); +//! repl.run().await?; +//! +//! Ok(()) +//! } +//! ``` +//! +//! # Custom Actions +//! +//! You can create custom actions by implementing the [`actions::Action`] trait: +//! +//! ```rust,ignore +//! use rollup_node::debug_toolkit::actions::{Action, ActionRegistry}; +//! use rollup_node::test_utils::TestFixture; +//! use async_trait::async_trait; +//! +//! struct MyAction; +//! +//! #[async_trait] +//! impl Action for MyAction { +//! fn name(&self) -> &'static str { "my-action" } +//! fn description(&self) -> &'static str { "Does something cool" } +//! +//! async fn execute( +//! &self, +//! fixture: &mut TestFixture, +//! args: &[String], +//! ) -> eyre::Result<()> { +//! // Your logic here with full fixture access +//! Ok(()) +//! } +//! } +//! ``` + +pub mod actions; +pub mod cli; +mod commands; +mod event_stream; +mod repl; + +pub use cli::DebugArgs; +pub use commands::*; +pub use event_stream::*; +pub use repl::*; + +/// Prelude for convenient imports. +pub mod prelude { + pub use super::{ + actions::{Action, ActionRegistry}, + DebugRepl, EventStreamState, + }; +} diff --git a/crates/node/src/debug_toolkit/repl.rs b/crates/node/src/debug_toolkit/repl.rs new file mode 100644 index 00000000..e8dfda06 --- /dev/null +++ b/crates/node/src/debug_toolkit/repl.rs @@ -0,0 +1,859 @@ +//! Interactive REPL for debugging rollup nodes. + +use super::{ + actions::ActionRegistry, + commands::{ + print_help, BlockArg, Command, EventsCommand, L1Command, PeersCommand, RunCommand, + TxCommand, WalletCommand, + }, + event_stream::EventStreamState, +}; +use crate::test_utils::{fixture::NodeType, TestFixture}; +use alloy_consensus::{SignableTransaction, TxEip1559}; +use alloy_eips::{eip2718::Encodable2718, BlockNumberOrTag}; +use alloy_network::{TransactionResponse, TxSignerSync}; +use alloy_primitives::TxKind; +use colored::Colorize; +use crossterm::event::{self, Event, KeyCode, KeyModifiers}; +use futures::StreamExt; +use reth_network::PeersInfo; +use reth_network_api::Peers; +use reth_network_peers::NodeRecord; +use reth_rpc_api::EthApiServer; +use reth_transaction_pool::TransactionPool; +use std::{io::Write, str::FromStr, time::Duration}; + +/// Interactive REPL for debugging rollup nodes. +pub struct DebugRepl { + /// The test fixture containing nodes. + fixture: TestFixture, + /// Whether the REPL is running. + running: bool, + /// Current active node index. + active_node: usize, + /// Event stream state per node. + event_streams: Vec, + /// Registry of custom actions. + action_registry: ActionRegistry, +} + +impl std::fmt::Debug for DebugRepl { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("DebugRepl") + .field("running", &self.running) + .field("active_node", &self.active_node) + .field("event_streams", &self.event_streams) + .field("action_registry", &"ActionRegistry { ... }") + .finish_non_exhaustive() + } +} + +impl DebugRepl { + /// Create a new REPL with the given fixture. + pub fn new(fixture: TestFixture) -> Self { + // Create one event stream per node, enabled by default + let event_streams = (0..fixture.nodes.len()) + .map(|_| { + let mut es = EventStreamState::new(); + es.enable(); + es + }) + .collect(); + + Self { + fixture, + running: false, + active_node: 0, + event_streams, + action_registry: ActionRegistry::new(), + } + } + + /// Create a new REPL with a custom action registry. + pub fn with_action_registry(fixture: TestFixture, action_registry: ActionRegistry) -> Self { + let event_streams = (0..fixture.nodes.len()) + .map(|_| { + let mut es = EventStreamState::new(); + es.enable(); + es + }) + .collect(); + + Self { fixture, running: false, active_node: 0, event_streams, action_registry } + } + + /// Get mutable access to the action registry to register custom actions. + pub const fn action_registry_mut(&mut self) -> &mut ActionRegistry { + &mut self.action_registry + } + + /// Run the REPL loop. + pub async fn run(&mut self) -> eyre::Result<()> { + self.running = true; + + // Print welcome message + println!(); + println!("{}", "Scroll Debug Toolkit".bold().cyan()); + println!("Type 'help' for available commands, 'exit' to quit."); + println!(); + + // Show initial status + self.cmd_status().await?; + + // Current input line buffer + let mut input_buffer = String::new(); + let mut stdout = std::io::stdout(); + + // Print initial prompt + print!("{}", self.get_prompt()); + let _ = stdout.flush(); + + while self.running { + // Poll for events and input + tokio::select! { + biased; + + // Check for events from the node + Some(event) = self.fixture.nodes[self.active_node].chain_orchestrator_rx.next() => { + // Display if streaming is enabled + if let Some(formatted) = self.event_streams[self.active_node].record_event(event) { + // Clear current line, print event, reprint prompt + print!("\r\x1b[K{}\n{}{}", formatted, self.get_prompt(), input_buffer); + let _ = stdout.flush(); + } + } + + // Check for keyboard input (non-blocking) + _ = tokio::time::sleep(Duration::from_millis(50)) => { + // Poll for keyboard events + while event::poll(Duration::from_millis(0))? { + if let Event::Key(key_event) = event::read()? { + match key_event.code { + KeyCode::Enter => { + println!(); + let line = input_buffer.trim().to_string(); + input_buffer.clear(); + + if !line.is_empty() { + if let Err(e) = self.execute_command(&line).await { + println!("{}: {}", "Error".red(), e); + } + } + + if self.running { + print!("{}", self.get_prompt()); + let _ = stdout.flush(); + } + } + KeyCode::Backspace => { + if !input_buffer.is_empty() { + input_buffer.pop(); + print!("\x08 \x08"); // Move back, overwrite, move back + let _ = stdout.flush(); + } + } + KeyCode::Char(c) => { + if key_event.modifiers.contains(KeyModifiers::CONTROL) && c == 'c' { + println!("\nUse 'exit' to quit"); + print!("{}{}", self.get_prompt(), input_buffer); + let _ = stdout.flush(); + } else if key_event.modifiers.contains(KeyModifiers::CONTROL) && c == 'd' { + println!(); + self.running = false; + } else { + input_buffer.push(c); + print!("{}", c); + let _ = stdout.flush(); + } + } + KeyCode::Esc => { + // Clear current input + print!("\r\x1b[K{}", self.get_prompt()); + let _ = stdout.flush(); + input_buffer.clear(); + } + _ => {} + } + } + } + } + } + } + + println!("Goodbye!"); + Ok(()) + } + + /// Get the REPL prompt. + fn get_prompt(&self) -> String { + let node_type = match self.fixture.nodes[self.active_node].typ { + NodeType::Sequencer => "seq", + NodeType::Follower => "fol", + }; + format!("{} [{}:{}]> ", "scroll-debug".cyan(), node_type, self.active_node) + } + + /// Execute a command. + async fn execute_command(&mut self, input: &str) -> eyre::Result<()> { + let cmd = Command::parse(input); + + match cmd { + Command::Status => self.cmd_status().await, + Command::Block(arg) => self.cmd_block(arg).await, + Command::Blocks { from, to } => self.cmd_blocks(from, to).await, + Command::Fcs => self.cmd_fcs().await, + Command::L1(l1_cmd) => self.cmd_l1(l1_cmd).await, + Command::Build => self.cmd_build().await, + Command::Tx(tx_cmd) => self.cmd_tx(tx_cmd).await, + Command::Wallet(wallet_cmd) => self.cmd_wallet(wallet_cmd).await, + Command::Peers(peers_cmd) => self.cmd_peers(peers_cmd).await, + Command::Events(events_cmd) => self.cmd_events(events_cmd).await, + Command::Run(run_cmd) => self.cmd_run(run_cmd).await, + Command::Node(idx) => self.cmd_switch_node(idx), + Command::Nodes => self.cmd_list_nodes(), + Command::Db => self.cmd_db(), + Command::Help => { + print_help(); + Ok(()) + } + Command::Exit => { + self.running = false; + Ok(()) + } + Command::Unknown(s) => { + if !s.is_empty() { + println!("Unknown command: {}. Type 'help' for available commands.", s); + } + Ok(()) + } + } + } + + /// Show node status. + async fn cmd_status(&self) -> eyre::Result<()> { + let node = &self.fixture.nodes[self.active_node]; + let node_type = match node.typ { + NodeType::Sequencer => "Sequencer", + NodeType::Follower => "Follower", + }; + + let status = node.rollup_manager_handle.status().await?; + let fcs = &status.l2.fcs; + + println!("{}", format!("=== Node {} ({}) ===", self.active_node, node_type).bold()); + + // Node Info + let db_path = node.node.inner.config.datadir().db().join("scroll.db"); + let http_addr = node.node.inner.rpc_server_handle().http_local_addr(); + println!("{}", "Node:".underline()); + println!(" Database: {}", db_path.display()); + if let Some(addr) = http_addr { + println!(" HTTP RPC: http://{}", addr); + } + + // L2 Status + println!("{}", "L2:".underline()); + println!( + " Head: #{} ({:.12}...)", + fcs.head_block_info().number.to_string().green(), + format!("{:?}", fcs.head_block_info().hash) + ); + println!( + " Safe: #{} ({:.12}...)", + fcs.safe_block_info().number.to_string().yellow(), + format!("{:?}", fcs.safe_block_info().hash) + ); + println!( + " Finalized: #{} ({:.12}...)", + fcs.finalized_block_info().number.to_string().blue(), + format!("{:?}", fcs.finalized_block_info().hash) + ); + println!( + " Synced: {}", + if status.l2.status.is_synced() { "true".green() } else { "false".red() } + ); + + // L1 Status + println!("{}", "L1:".underline()); + println!(" Head: #{}", status.l1.latest.to_string().cyan()); + println!(" Finalized: #{}", status.l1.finalized); + println!(" Processed: #{}", status.l1.processed); + println!( + " Synced: {}", + if status.l1.status.is_synced() { "true".green() } else { "false".red() } + ); + + Ok(()) + } + + /// Show block details. + async fn cmd_block(&self, arg: BlockArg) -> eyre::Result<()> { + let node = &self.fixture.nodes[self.active_node]; + + let block_id = match arg { + BlockArg::Latest => BlockNumberOrTag::Latest, + BlockArg::Number(n) => BlockNumberOrTag::Number(n), + }; + + let block = node + .node + .rpc + .inner + .eth_api() + .block_by_number(block_id, true) + .await? + .ok_or_else(|| eyre::eyre!("Block not found"))?; + + println!("{}", format!("Block #{}", block.header.number).bold()); + println!(" Hash: {:?}", block.header.hash); + println!(" Parent: {:?}", block.header.parent_hash); + println!(" Timestamp: {}", block.header.timestamp); + println!(" Gas Used: {}", block.header.gas_used); + println!(" Gas Limit: {}", block.header.gas_limit); + println!(" Txs: {}", block.transactions.len()); + + if let Some(txs) = block.transactions.as_transactions() { + for (i, tx) in txs.iter().enumerate() { + println!(" [{}] hash={:?}", i, tx.inner.tx_hash()); + } + } + + Ok(()) + } + + /// List blocks in range. + async fn cmd_blocks(&self, from: u64, to: u64) -> eyre::Result<()> { + let node = &self.fixture.nodes[self.active_node]; + + println!("{}", format!("Blocks {} to {}:", from, to).bold()); + + for n in from..=to { + let block = node + .node + .rpc + .inner + .eth_api() + .block_by_number(BlockNumberOrTag::Number(n), false) + .await?; + + if let Some(block) = block { + println!( + " #{}: {} txs, gas: {}, hash: {:.12}...", + n, + block.transactions.len(), + block.header.gas_used, + format!("{:?}", block.header.hash) + ); + } else { + println!(" #{}: {}", n, "not found".dimmed()); + } + } + + Ok(()) + } + + /// Show forkchoice state. + async fn cmd_fcs(&self) -> eyre::Result<()> { + let node = &self.fixture.nodes[self.active_node]; + let status = node.rollup_manager_handle.status().await?; + let fcs = &status.l2.fcs; + + println!("{}", "Forkchoice State:".bold()); + println!(" Head:"); + println!(" Number: {}", fcs.head_block_info().number); + println!(" Hash: {:?}", fcs.head_block_info().hash); + println!(" Safe:"); + println!(" Number: {}", fcs.safe_block_info().number); + println!(" Hash: {:?}", fcs.safe_block_info().hash); + println!(" Finalized:"); + println!(" Number: {}", fcs.finalized_block_info().number); + println!(" Hash: {:?}", fcs.finalized_block_info().hash); + + Ok(()) + } + + /// Execute L1 commands. + async fn cmd_l1(&mut self, cmd: L1Command) -> eyre::Result<()> { + match cmd { + L1Command::Status => { + let node = &self.fixture.nodes[self.active_node]; + let status = node.rollup_manager_handle.status().await?; + + println!("{}", "L1 Status:".bold()); + println!( + " Synced: {}", + if status.l1.status.is_synced() { "true".green() } else { "false".red() } + ); + println!(" L1 Head: #{}", status.l1.latest); + println!(" L1 Final: #{}", status.l1.finalized); + } + L1Command::Sync => { + self.fixture.l1().sync().await?; + println!("{}", "L1 synced event sent".green()); + } + L1Command::Block(n) => { + self.fixture.l1().new_block(n).await?; + println!("{}", format!("L1 block {} notification sent", n).green()); + } + L1Command::Message(json) => { + // Parse JSON and inject L1 message + // For now, just show that we received the command + println!( + "{}", + format!("L1 message injection not yet implemented. JSON: {}", json).yellow() + ); + } + L1Command::Commit(json) => { + println!( + "{}", + format!("Batch commit injection not yet implemented. JSON: {}", json).yellow() + ); + } + L1Command::Finalize(idx) => { + println!( + "{}", + format!("Batch finalization for index {} not yet implemented", idx).yellow() + ); + } + L1Command::Reorg(block) => { + self.fixture.l1().reorg_to(block).await?; + println!("{}", format!("L1 reorg to block {} sent", block).green()); + } + } + Ok(()) + } + + /// Build a new block. + async fn cmd_build(&self) -> eyre::Result<()> { + if !self.fixture.nodes[self.active_node].is_sequencer() { + println!("{}", "Error: build command requires sequencer node".red()); + return Ok(()); + } + + let handle = &self.fixture.nodes[self.active_node].rollup_manager_handle; + + // Check if L1 is synced + let status = handle.status().await?; + if !status.l1.status.is_synced() { + println!("{}", "Error: L1 is not synced".red()); + println!( + "{}", + "Hint: Run 'l1 sync' to mark the mock L1 as synced before building blocks".yellow() + ); + return Ok(()); + } + + // Trigger block building - events will be displayed through normal event stream + handle.build_block(); + println!("{}", "Block build triggered!".green()); + + Ok(()) + } + + /// Execute transaction commands. + async fn cmd_tx(&mut self, cmd: TxCommand) -> eyre::Result<()> { + match cmd { + TxCommand::Pending => { + let node = &self.fixture.nodes[self.active_node]; + let pending_txs = node.node.inner.pool.pooled_transactions(); + + if pending_txs.is_empty() { + println!("{}", "No pending transactions".dimmed()); + } else { + println!("{}", format!("Pending Transactions ({}):", pending_txs.len()).bold()); + for (i, tx) in pending_txs.iter().enumerate() { + let hash = tx.hash(); + let from = tx.sender(); + let nonce = tx.nonce(); + let gas_price = tx.max_fee_per_gas(); + println!( + " [{}] hash={:.16}... from={:.12}... nonce={} gas_price={}", + i, + format!("{:?}", hash), + format!("{:?}", from), + nonce, + gas_price + ); + } + } + } + TxCommand::Send { to, value, from } => { + // Get wallet info + let mut wallet = self.fixture.wallet.lock().await; + let chain_id = wallet.chain_id; + + // If a wallet index is specified, use that wallet from wallet_gen() + let (signer, nonce, from_address) = if let Some(idx) = from { + let wallets = wallet.wallet_gen(); + if idx >= wallets.len() { + println!( + "{}", + format!( + "Invalid wallet index {}. Valid range: 0-{}", + idx, + wallets.len() - 1 + ) + .red() + ); + return Ok(()); + } + let signer = wallets[idx].clone(); + let address = signer.address(); + + // Get the nonce from the chain for this wallet + let node = &self.fixture.nodes[self.active_node]; + let nonce = node + .node + .rpc + .inner + .eth_api() + .transaction_count(address, Some(BlockNumberOrTag::Latest.into())) + .await? + .to::(); + + (signer, nonce, address) + } else { + // Use the default wallet + let signer = wallet.inner.clone(); + let nonce = wallet.inner_nonce; + let address = signer.address(); + + // Update nonce for the default wallet + wallet.inner_nonce += 1; + + (signer, nonce, address) + }; + drop(wallet); + + // Build an EIP-1559 transaction + let mut tx = TxEip1559 { + chain_id, + nonce, + gas_limit: 21000, + max_fee_per_gas: 1_000_000_000, // 1 gwei + max_priority_fee_per_gas: 1_000_000_000, // 1 gwei + to: TxKind::Call(to), + value, + access_list: Default::default(), + input: Default::default(), + }; + + // Sign the transaction + let signature = signer.sign_transaction_sync(&mut tx)?; + let signed = tx.into_signed(signature); + + // Encode as raw bytes (EIP-2718 envelope) + let raw_tx = alloy_primitives::Bytes::from(signed.encoded_2718()); + + // Inject the transaction + let node = &self.fixture.nodes[self.active_node]; + let tx_hash = node.node.rpc.inject_tx(raw_tx.clone()).await?; + + println!("{}", "Transaction sent!".green()); + println!(" Hash: {:?}", tx_hash); + println!(" From: {:?}", from_address); + println!(" To: {:?}", to); + println!(" Value: {} wei", value); + println!("{}", "Note: Run 'build' to include in a block (sequencer mode)".dimmed()); + } + TxCommand::Inject(bytes) => { + self.fixture.inject_tx_on(self.active_node, bytes.clone()).await?; + println!("{}", "Transaction injected".green()); + } + } + Ok(()) + } + + /// Execute wallet commands. + async fn cmd_wallet(&self, cmd: WalletCommand) -> eyre::Result<()> { + match cmd { + WalletCommand::Info => { + let wallet = self.fixture.wallet.lock().await; + let address = wallet.inner.address(); + let chain_id = wallet.chain_id; + let nonce = wallet.inner_nonce; + drop(wallet); + + // Get balance from the node + let node = &self.fixture.nodes[self.active_node]; + let balance = node + .node + .rpc + .inner + .eth_api() + .balance(address, Some(BlockNumberOrTag::Latest.into())) + .await?; + + println!("{}", "Wallet Info:".bold()); + println!(" Address: {:?}", address); + println!(" Chain ID: {}", chain_id); + println!(" Nonce: {}", nonce); + println!( + " Balance: {} wei ({:.6} ETH)", + balance, + balance.to::() as f64 / 1e18 + ); + } + WalletCommand::Gen => { + let wallet = self.fixture.wallet.lock().await; + let wallets = wallet.wallet_gen(); + let chain_id = wallet.chain_id; + drop(wallet); + + println!("{}", format!("Generated Wallets ({}):", wallets.len()).bold()); + println!(" Chain ID: {}", chain_id); + println!(); + + for (i, signer) in wallets.iter().enumerate() { + let address = signer.address(); + // Get balance for each wallet + let node = &self.fixture.nodes[self.active_node]; + let balance = node + .node + .rpc + .inner + .eth_api() + .balance(address, Some(BlockNumberOrTag::Latest.into())) + .await + .unwrap_or_default(); + + println!(" [{}] {:?}", format!("{}", i).cyan(), address); + println!( + " Balance: {} wei ({:.6} ETH)", + balance, + balance.to::() as f64 / 1e18 + ); + } + } + } + Ok(()) + } + + /// Execute peer commands. + async fn cmd_peers(&self, cmd: PeersCommand) -> eyre::Result<()> { + let node = &self.fixture.nodes[self.active_node]; + let network_handle = node.rollup_manager_handle.get_network_handle().await?; + + match cmd { + PeersCommand::List => { + // Get this node's info + let local_record = network_handle.local_node_record(); + let peer_count = network_handle.inner().num_connected_peers(); + + println!("{}", "Local Node:".bold()); + println!(" Peer ID: {:?}", local_record.id); + println!(" Enode: {}", local_record); + println!(); + + // Get connected peers + let peers = network_handle + .inner() + .get_all_peers() + .await + .map_err(|e| eyre::eyre!("Failed to get peers: {}", e))?; + + println!("{}", format!("Connected Peers ({}):", peer_count).bold()); + + if peers.is_empty() { + println!(" {}", "No peers connected".dimmed()); + } else { + for peer in &peers { + println!(" {:?}", peer.remote_id); + println!(" Address: {}", peer.remote_addr); + println!(" Client: {}", peer.client_version); + println!(" Enode: {}", peer.enode); + println!(); + } + } + + // Show other nodes in fixture for convenience + if self.fixture.nodes.len() > 1 { + println!("{}", "Other Nodes in Fixture:".bold()); + for (i, other_node) in self.fixture.nodes.iter().enumerate() { + if i == self.active_node { + continue; + } + let other_handle = + other_node.rollup_manager_handle.get_network_handle().await?; + let other_record = other_handle.local_node_record(); + let node_type = match other_node.typ { + NodeType::Sequencer => "Sequencer", + NodeType::Follower => "Follower", + }; + println!(" [{}] {} - {}", i, node_type, other_record); + } + } + } + PeersCommand::Connect(enode_url) => { + // Parse the enode URL + match NodeRecord::from_str(&enode_url) { + Ok(record) => { + network_handle.inner().add_peer(record.id, record.tcp_addr()); + println!("{}", format!("Connecting to peer: {:?}", record.id).green()); + println!(" Address: {}", record.tcp_addr()); + println!("{}", "Note: Use 'peers' to check connection status".dimmed()); + } + Err(e) => { + println!("{}", format!("Invalid enode URL: {}", e).red()); + println!("Expected format: enode://@:"); + } + } + } + } + Ok(()) + } + + /// Execute events commands. + async fn cmd_events(&mut self, cmd: EventsCommand) -> eyre::Result<()> { + let event_stream = &mut self.event_streams[self.active_node]; + match cmd { + EventsCommand::On => { + event_stream.enable(); + println!("{}", "Event stream enabled".green()); + } + EventsCommand::Off => { + event_stream.disable(); + println!("{}", "Event stream disabled".yellow()); + } + EventsCommand::Stream(count) => { + println!("Streaming {} events (Ctrl+C to stop)...", count); + let mut received = 0; + let node = &mut self.fixture.nodes[self.active_node]; + + while received < count { + tokio::select! { + event = node.chain_orchestrator_rx.next() => { + if let Some(event) = event { + received += 1; + let formatted = event_stream.format_event(&event); + println!("[{}] {}", received, formatted); + } + } + _ = tokio::time::sleep(Duration::from_secs(30)) => { + println!("{}", "Timeout waiting for events".yellow()); + break; + } + } + } + } + EventsCommand::Filter(pattern) => { + event_stream.set_filter(pattern.clone()); + if let Some(p) = pattern { + println!("{}", format!("Event filter set: {}", p).green()); + } else { + println!("{}", "Event filter cleared".yellow()); + } + } + EventsCommand::History(count) => { + let history = event_stream.get_history(count); + if history.is_empty() { + println!("{}", "No events in history".dimmed()); + } else { + println!("{}", format!("Last {} events:", history.len()).bold()); + for (ago, event) in history { + let formatted = event_stream.format_event(event); + println!(" [{:?} ago] {}", ago, formatted); + } + } + } + } + Ok(()) + } + + /// Execute custom actions. + async fn cmd_run(&mut self, cmd: RunCommand) -> eyre::Result<()> { + match cmd { + RunCommand::List => { + println!("{}", "Available Actions:".bold()); + println!(); + + let actions: Vec<_> = self.action_registry.list().collect(); + if actions.is_empty() { + println!("{}", " No actions registered".dimmed()); + println!(); + println!( + "{}", + "To add actions, implement the Action trait and register in ActionRegistry" + .dimmed() + ); + } else { + for action in actions { + println!(" {}", action.name().cyan()); + println!(" {}", action.description()); + if let Some(usage) = action.usage() { + println!(" Usage: {}", usage.dimmed()); + } + println!(); + } + } + } + RunCommand::Execute { name, args } => { + if let Some(action) = self.action_registry.get(&name) { + println!("{}", format!("Running action: {}", action.name()).cyan().bold()); + println!(); + + // Execute the action with mutable access to fixture + action.execute(&mut self.fixture, &args).await?; + } else { + println!("{}", format!("Unknown action: {}", name).red()); + println!("{}", "Use 'run list' to see available actions".dimmed()); + } + } + } + Ok(()) + } + + /// Switch to a different node. + fn cmd_switch_node(&mut self, idx: usize) -> eyre::Result<()> { + if idx >= self.fixture.nodes.len() { + println!( + "{}", + format!("Invalid node index. Valid range: 0-{}", self.fixture.nodes.len() - 1) + .red() + ); + } else { + self.active_node = idx; + let node_type = match self.fixture.nodes[idx].typ { + NodeType::Sequencer => "Sequencer", + NodeType::Follower => "Follower", + }; + println!("{}", format!("Switched to node {} ({})", idx, node_type).green()); + } + Ok(()) + } + + /// List all nodes. + fn cmd_list_nodes(&self) -> eyre::Result<()> { + println!("{}", "Nodes:".bold()); + for (i, node) in self.fixture.nodes.iter().enumerate() { + let node_type = match node.typ { + NodeType::Sequencer => "Sequencer".cyan(), + NodeType::Follower => "Follower".normal(), + }; + let marker = if i == self.active_node { " *" } else { "" }; + println!(" [{}] {}{}", i, node_type, marker.green()); + } + Ok(()) + } + + /// Show database path and access command. + fn cmd_db(&self) -> eyre::Result<()> { + let node = &self.fixture.nodes[self.active_node]; + let db_dir = node.node.inner.config.datadir().db(); + let db_path = db_dir.join("scroll.db"); + + println!("{}", "Database Info:".bold()); + println!(" Path: {}", db_path.display()); + println!(); + println!("{}", "Access from another terminal:".underline()); + println!(" sqlite3 {}", db_path.display()); + println!(); + println!("{}", "Useful queries:".dimmed()); + println!(" .tables -- List all tables"); + println!(" .schema
-- Show table schema"); + println!(" SELECT * FROM metadata; -- View metadata"); + println!(" SELECT * FROM l2_block ORDER BY number DESC LIMIT 10;"); + + Ok(()) + } +} diff --git a/crates/node/src/lib.rs b/crates/node/src/lib.rs index 8aa24a22..5be56d74 100644 --- a/crates/node/src/lib.rs +++ b/crates/node/src/lib.rs @@ -10,6 +10,9 @@ pub mod pprof; #[cfg(feature = "test-utils")] pub mod test_utils; +#[cfg(feature = "debug-toolkit")] +pub mod debug_toolkit; + pub use add_ons::*; pub use args::*; pub use builder::network::ScrollNetworkBuilder; diff --git a/crates/node/src/test_utils/fixture.rs b/crates/node/src/test_utils/fixture.rs index f17618ee..371c81bf 100644 --- a/crates/node/src/test_utils/fixture.rs +++ b/crates/node/src/test_utils/fixture.rs @@ -18,10 +18,11 @@ use reth_chainspec::EthChainSpec; use reth_e2e_test_utils::{wallet::Wallet, NodeHelperType, TmpDB}; use reth_eth_wire_types::BasicNetworkPrimitives; use reth_network::NetworkHandle; +use reth_network_peers::TrustedPeer; use reth_node_builder::NodeTypes; use reth_node_types::NodeTypesWithDBAdapter; use reth_provider::providers::BlockchainProvider; -use reth_scroll_chainspec::SCROLL_DEV; +use reth_scroll_chainspec::{ScrollChainSpec, SCROLL_DEV, SCROLL_MAINNET, SCROLL_SEPOLIA}; use reth_scroll_primitives::ScrollPrimitives; use reth_tasks::TaskManager; use reth_tokio_util::EventStream; @@ -208,6 +209,7 @@ pub struct TestFixtureBuilder { chain_spec: Option::ChainSpec>>, is_dev: bool, no_local_transactions_propagation: bool, + bootnodes: Option>, } impl Default for TestFixtureBuilder { @@ -225,6 +227,7 @@ impl TestFixtureBuilder { chain_spec: None, is_dev: false, no_local_transactions_propagation: false, + bootnodes: None, } } @@ -256,7 +259,7 @@ impl TestFixtureBuilder { } /// Adds a sequencer node to the test with default settings. - pub fn sequencer(mut self) -> Self { + pub const fn sequencer(mut self) -> Self { self.config.sequencer_args.sequencer_enabled = true; self.config.sequencer_args.auto_start = false; self.config.sequencer_args.block_time = 100; @@ -264,12 +267,16 @@ impl TestFixtureBuilder { self.config.sequencer_args.l1_message_inclusion_mode = L1MessageInclusionMode::BlockDepth(0); self.config.sequencer_args.allow_empty_blocks = true; - self.config.database_args.rn_db_path = Some(PathBuf::from("sqlite::memory:")); - self.num_nodes += 1; self } + /// Sets the bootnodes for the test nodes. + pub fn bootnodes(mut self, bootnodes: Vec) -> Self { + self.bootnodes = Some(bootnodes); + self + } + /// Adds `count`s follower nodes to the test. pub const fn followers(mut self, count: usize) -> Self { self.num_nodes += count; @@ -303,6 +310,47 @@ impl TestFixtureBuilder { self } + /// Set the chain by name ("dev", "sepolia", "mainnet") or by file path. + /// + /// This is a convenience method that loads the appropriate chain spec. + /// If the input is a file path (contains '/' or ends with '.json'), it will + /// load the genesis from the file. + pub fn with_chain(mut self, chain: &str) -> Self { + let chain_spec: Arc = match chain.to_lowercase().as_str() { + "dev" => SCROLL_DEV.clone(), + "scroll-sepolia" => SCROLL_SEPOLIA.clone(), + "scroll-mainnet" | "scroll" => SCROLL_MAINNET.clone(), + _ => { + // Check if it's a file path + if chain.contains('/') || chain.ends_with(".json") { + match std::fs::read_to_string(chain) { + Ok(contents) => { + match serde_json::from_str::(&contents) { + Ok(genesis) => { + Arc::new(ScrollChainSpec::from_custom_genesis(genesis)) + } + Err(e) => { + tracing::error!(path = %chain, error = %e, "Failed to parse genesis file"); + SCROLL_DEV.clone() + } + } + } + Err(e) => { + tracing::error!(path = %chain, error = %e, "Failed to read genesis file"); + SCROLL_DEV.clone() + } + } + } else { + // For unknown chains, default to dev + tracing::warn!(chain = %chain, "Unknown chain, defaulting to dev"); + SCROLL_DEV.clone() + } + } + }; + self.chain_spec = Some(chain_spec); + self + } + /// Enable dev mode. pub const fn with_dev_mode(mut self, enabled: bool) -> Self { self.is_dev = enabled; @@ -430,6 +478,7 @@ impl TestFixtureBuilder { chain_spec.clone(), self.is_dev, self.no_local_transactions_propagation, + self.bootnodes, ) .await?; diff --git a/crates/node/src/test_utils/mod.rs b/crates/node/src/test_utils/mod.rs index 62e1cf04..46bdaffd 100644 --- a/crates/node/src/test_utils/mod.rs +++ b/crates/node/src/test_utils/mod.rs @@ -67,12 +67,14 @@ pub mod l1_helpers; pub mod network_helpers; pub mod tx_helpers; +use alloy_consensus::BlockHeader; // Re-export main types for convenience pub use event_utils::{EventAssertions, EventWaiter}; pub use fixture::{NodeHandle, TestFixture, TestFixtureBuilder}; pub use network_helpers::{ NetworkHelper, NetworkHelperProvider, ReputationChecker, ReputationChecks, }; +use reth_network_peers::TrustedPeer; // Legacy utilities - keep existing functions for backward compatibility use crate::{ @@ -92,12 +94,14 @@ use reth_node_builder::{ NodeHandle as RethNodeHandle, NodeTypes, NodeTypesWithDBAdapter, PayloadAttributesBuilder, PayloadTypes, TreeConfig, }; -use reth_node_core::args::{DiscoveryArgs, NetworkArgs, RpcServerArgs, TxPoolArgs}; +use reth_node_core::args::{ + DiscoveryArgs, NetworkArgs, PayloadBuilderArgs, RpcServerArgs, TxPoolArgs, +}; use reth_provider::providers::BlockchainProvider; use reth_rpc_server_types::RpcModuleSelection; use reth_tasks::TaskManager; use rollup_node_sequencer::L1MessageInclusionMode; -use std::{path::PathBuf, sync::Arc}; +use std::sync::Arc; use tokio::sync::Mutex; use tracing::{span, Level}; @@ -111,6 +115,7 @@ pub async fn setup_engine( chain_spec: Arc<::ChainSpec>, is_dev: bool, no_local_transactions_propagation: bool, + bootnodes: Option>, ) -> eyre::Result<( Vec< NodeHelperType< @@ -134,6 +139,7 @@ where let network_config = NetworkArgs { discovery: DiscoveryArgs { disable_discovery: true, ..DiscoveryArgs::default() }, + bootnodes, ..NetworkArgs::default() }; @@ -145,8 +151,13 @@ where if idx != 0 { scroll_node_config.sequencer_args.sequencer_enabled = false; } + let node_config = NodeConfig::new(chain_spec.clone()) .with_network(network_config.clone()) + .with_payload_builder(PayloadBuilderArgs { + gas_limit: Some(chain_spec.genesis_header().gas_limit()), + ..Default::default() + }) .with_unused_ports() .with_rpc( RpcServerArgs::default() @@ -202,7 +213,7 @@ where nodes.push(node); } - Ok((nodes, tasks, Wallet::default().with_chain_id(chain_spec.chain().into()))) + Ok((nodes, tasks, Wallet::new(10).with_chain_id(chain_spec.chain().into()))) } /// Generate a transfer transaction with the given wallet. @@ -256,9 +267,7 @@ pub fn default_sequencer_test_scroll_rollup_node_config() -> ScrollRollupNodeCon ScrollRollupNodeConfig { test: true, network_args: RollupNodeNetworkArgs::default(), - database_args: RollupNodeDatabaseArgs { - rn_db_path: Some(PathBuf::from("sqlite::memory:")), - }, + database_args: RollupNodeDatabaseArgs::default(), l1_provider_args: L1ProviderArgs::default(), engine_driver_args: EngineDriverArgs { sync_at_startup: true }, chain_orchestrator_args: ChainOrchestratorArgs { diff --git a/crates/node/tests/e2e.rs b/crates/node/tests/e2e.rs index 9132598a..70922a85 100644 --- a/crates/node/tests/e2e.rs +++ b/crates/node/tests/e2e.rs @@ -296,12 +296,14 @@ async fn can_forward_tx_to_sequencer() -> eyre::Result<()> { // Create the chain spec for scroll mainnet with Euclid v2 activated and a test genesis. let chain_spec = (*SCROLL_DEV).clone(); let (mut sequencer_node, _tasks, _) = - setup_engine(sequencer_node_config, 1, chain_spec.clone(), false, true).await.unwrap(); + setup_engine(sequencer_node_config, 1, chain_spec.clone(), false, true, None) + .await + .unwrap(); let sequencer_url = format!("http://localhost:{}", sequencer_node[0].rpc_url().port().unwrap()); follower_node_config.network_args.sequencer_url = Some(sequencer_url); let (mut follower_node, _tasks, wallet) = - setup_engine(follower_node_config, 1, chain_spec, false, true).await.unwrap(); + setup_engine(follower_node_config, 1, chain_spec, false, true, None).await.unwrap(); let wallet = Arc::new(Mutex::new(wallet)); @@ -463,9 +465,15 @@ async fn can_bridge_blocks() -> eyre::Result<()> { let chain_spec = (*SCROLL_DEV).clone(); // Setup the bridge node and a standard node. - let (mut nodes, tasks, _) = - setup_engine(default_test_scroll_rollup_node_config(), 1, chain_spec.clone(), false, false) - .await?; + let (mut nodes, tasks, _) = setup_engine( + default_test_scroll_rollup_node_config(), + 1, + chain_spec.clone(), + false, + false, + None, + ) + .await?; let mut bridge_node = nodes.pop().unwrap(); let bridge_peer_id = bridge_node.network.record().id; let bridge_node_l1_watcher_tx = @@ -564,9 +572,15 @@ async fn shutdown_consolidates_most_recent_batch_on_startup() -> eyre::Result<() let chain_spec = (*SCROLL_MAINNET).clone(); // Launch a node - let (mut nodes, _tasks, _) = - setup_engine(default_test_scroll_rollup_node_config(), 1, chain_spec.clone(), false, false) - .await?; + let (mut nodes, _tasks, _) = setup_engine( + default_test_scroll_rollup_node_config(), + 1, + chain_spec.clone(), + false, + false, + None, + ) + .await?; let node = nodes.pop().unwrap(); // Instantiate the rollup node manager. @@ -845,7 +859,7 @@ async fn graceful_shutdown_sets_fcs_to_latest_signed_block_in_db_on_start_up() - // Launch a node let (mut nodes, _tasks, _) = - setup_engine(config.clone(), 1, chain_spec.clone(), false, false).await?; + setup_engine(config.clone(), 1, chain_spec.clone(), false, false, None).await?; let node = nodes.pop().unwrap(); // Instantiate the rollup node manager. diff --git a/crates/node/tests/sync.rs b/crates/node/tests/sync.rs index dbd3f87a..a9a54535 100644 --- a/crates/node/tests/sync.rs +++ b/crates/node/tests/sync.rs @@ -81,7 +81,7 @@ async fn test_should_consolidate_to_block_15k() -> eyre::Result<()> { let chain_spec = (*SCROLL_SEPOLIA).clone(); let (mut nodes, _tasks, _) = - setup_engine(node_config, 1, chain_spec.clone(), false, false).await?; + setup_engine(node_config, 1, chain_spec.clone(), false, false, None).await?; let node = nodes.pop().unwrap(); // We perform consolidation up to block 15k. This allows us to capture a batch revert event at @@ -194,6 +194,7 @@ async fn test_should_consolidate_after_optimistic_sync() -> eyre::Result<()> { let mut sequencer = TestFixture::builder() .sequencer() + .with_memory_db() .with_eth_scroll_bridge(true) .with_scroll_wire(true) .auto_start(true) @@ -203,7 +204,7 @@ async fn test_should_consolidate_after_optimistic_sync() -> eyre::Result<()> { .build() .await?; - let mut follower = TestFixture::builder().followers(1).build().await?; + let mut follower = TestFixture::builder().followers(1).with_memory_db().build().await?; // Send a notification to the sequencer node that the L1 watcher is synced. sequencer.l1().sync().await?; @@ -553,7 +554,7 @@ async fn test_chain_orchestrator_l1_reorg() -> eyre::Result<()> { // Create a sequencer node and an unsynced node. let (mut nodes, _tasks, _) = - setup_engine(sequencer_node_config.clone(), 1, chain_spec.clone(), false, false) + setup_engine(sequencer_node_config.clone(), 1, chain_spec.clone(), false, false, None) .await .unwrap(); let mut sequencer = nodes.pop().unwrap(); @@ -563,7 +564,7 @@ async fn test_chain_orchestrator_l1_reorg() -> eyre::Result<()> { sequencer.inner.add_ons_handle.rollup_manager_handle.l1_watcher_mock.clone().unwrap(); let (mut nodes, _tasks, _) = - setup_engine(node_config.clone(), 1, chain_spec.clone(), false, false).await.unwrap(); + setup_engine(node_config.clone(), 1, chain_spec.clone(), false, false, None).await.unwrap(); let mut follower = nodes.pop().unwrap(); let mut follower_events = follower.inner.rollup_manager_handle.get_event_listener().await?; let follower_l1_watcher_tx = diff --git a/hackathon.json b/hackathon.json new file mode 100644 index 00000000..7eb3b08e --- /dev/null +++ b/hackathon.json @@ -0,0 +1,168 @@ + { + "config": { + "chainId": 938471, + "homesteadBlock": 0, + "eip150Block": 0, + "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "eip155Block": 0, + "eip158Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "archimedesBlock": 0, + "shanghaiBlock": 0, + "bernoulliBlock": 0, + "curieBlock": 0, + "darwinTime": 0, + "darwinV2Time": 0, + "euclidTime": 0, + "euclidV2Time": 0, + "feynmanTime": 0, + "scroll": { + "maxTxPayloadBytesPerBlock": 122880, + "feeVaultAddress": "0x5300000000000000000000000000000000000005", + "l1Config": { + "l1ChainId": 22222222, + "l1MessageQueueAddress": "0x0000000000000000000000000000000000000001", + "l1MessageQueueV2Address": "0xDc64a140Aa3E981100a9becA4E685f962f0cF6C9", + "scrollChainAddress": "0x84044d3a645843bAF0752eA591E1EAB643beD904", + "systemContractAddress": "0x9fE46736679d2D9a65F0992F2272dE9f3c7fa6e0", + "l2SystemConfigAddress": "0x2E48aC0df81f1fa57722e115e807C9dB1819bA13", + "numL1MessagesPerBlock": 10, + "startL1Block": 0 + } + } + }, + "nonce": "0x0", + "timestamp": "0x00000000000000000000000000000000000000000000000000000000689b3f30", + "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "gasLimit": "30000000", + "difficulty": "0x1", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "coinbase": "0x0000000000000000000000000000000000000000", + "baseFeePerGas": "0x1", + "alloc": { + "0x4e59b44847b379578588920ca78fbf26c0b4956c": { + "balance": "0x0", + "code": "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3", + "nonce": "0x1", + "storage": {} + }, + "0x5300000000000000000000000000000000000000": { + "balance": "0x0", + "code": "0x608060405234801561000f575f80fd5b5060043610610090575f3560e01c806383cc76601161006357806383cc7660146100f85780638da5cb5b1461010b578063c4d66de81461011e578063d4b9f4fa14610131578063f2fde38b14610139575f80fd5b806326aad7b7146100945780633cb747bf146100b0578063600a2e77146100db578063715018a6146100ee575b5f80fd5b61009d60015481565b6040519081526020015b60405180910390f35b6053546100c3906001600160a01b031681565b6040516001600160a01b0390911681526020016100a7565b61009d6100e9366004610539565b61014c565b6100f66101ef565b005b61009d610106366004610539565b610224565b6052546100c3906001600160a01b031681565b6100f661012c366004610550565b61023a565b61009d5f5481565b6100f6610147366004610550565b6102d2565b6053545f906001600160a01b0316331461019e5760405162461bcd60e51b815260206004820152600e60248201526d37b7363c9036b2b9b9b2b733b2b960911b60448201526064015b60405180910390fd5b5f806101a98461035e565b60408051838152602081018890529294509092507ffaa617c2d8ce12c62637dbce76efcc18dae60574aa95709bdcedce7e76071693910160405180910390a19392505050565b6052546001600160a01b031633146102195760405162461bcd60e51b81526004016101959061057d565b6102225f610477565b565b602a8160288110610233575f80fd5b0154905081565b6052546001600160a01b031633146102645760405162461bcd60e51b81526004016101959061057d565b600154156102a85760405162461bcd60e51b815260206004820152601160248201527063616e6e6f7420696e697469616c697a6560781b6044820152606401610195565b6102b06104c8565b605380546001600160a01b0319166001600160a01b0392909216919091179055565b6052546001600160a01b031633146102fc5760405162461bcd60e51b81526004016101959061057d565b6001600160a01b0381166103525760405162461bcd60e51b815260206004820152601d60248201527f6e6577206f776e657220697320746865207a65726f20616464726573730000006044820152606401610195565b61035b81610477565b50565b6003545f9081906103b15760405162461bcd60e51b815260206004820152601a60248201527f63616c6c206265666f726520696e697469616c697a6174696f6e0000000000006044820152606401610195565b600154835f5b8215610448576103c86002846105c8565b5f036104125781602a82602881106103e2576103e26105b4565b015561040b82600283602881106103fb576103fb6105b4565b01545f9182526020526040902090565b915061043c565b610439602a8260288110610428576104286105b4565b0154835f9182526020526040902090565b91505b600192831c92016103b7565b81602a826028811061045c5761045c6105b4565b0155505f819055600180548082019091559590945092505050565b605280546001600160a01b038381166001600160a01b0319831681179093556040519116919082907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0905f90a35050565b5f5b60286104d78260016105fb565b101561035b57610508600282602881106104f3576104f36105b4565b0154600283602881106103fb576103fb6105b4565b60026105158360016105fb565b60288110610525576105256105b4565b01558061053181610614565b9150506104ca565b5f60208284031215610549575f80fd5b5035919050565b5f60208284031215610560575f80fd5b81356001600160a01b0381168114610576575f80fd5b9392505050565b60208082526017908201527f63616c6c6572206973206e6f7420746865206f776e6572000000000000000000604082015260600190565b634e487b7160e01b5f52603260045260245ffd5b5f826105e257634e487b7160e01b5f52601260045260245ffd5b500690565b634e487b7160e01b5f52601160045260245ffd5b8082018082111561060e5761060e6105e7565b92915050565b5f60018201610625576106256105e7565b506001019056fea164736f6c6343000818000a", + "nonce": "0x0", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000052": "0x000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb92266" + } + }, + "0x5300000000000000000000000000000000000002": { + "balance": "0x0", + "code": "0x608060405234801561000f575f80fd5b50600436106101a1575f3560e01c806384189161116100f3578063c63b9e2d11610093578063e88a60ad1161006e578063e88a60ad1461032e578063f2fde38b14610341578063f45e65d814610354578063fe5b04151461035d575f80fd5b8063c63b9e2d146102ff578063c91e514914610312578063de26c4a11461031b575f80fd5b8063944b247f116100ce578063944b247f146102be578063a911d77f146102d1578063aa5e9334146102d9578063bede39b5146102ec575f80fd5b806384189161146102785780638da5cb5b1461028157806393e59dc1146102ab575f80fd5b80633d0f963e1161015e5780636112d6db116101395780636112d6db1461024b5780636a5e67e514610254578063704655971461025d578063715018a614610270575f80fd5b80633d0f963e1461021c57806349948e0e1461022f578063519b4bd314610242575f80fd5b80630c18c162146101a557806313dad5be146101c157806323e524ac146101de5780633577afc5146101e757806339455d3a146101fc5780633b7656bb1461020f575b5f80fd5b6101ae60025481565b6040519081526020015b60405180910390f35b6008546101ce9060ff1681565b60405190151581526020016101b8565b6101ae60065481565b6101fa6101f5366004610c73565b610365565b005b6101fa61020a366004610c8a565b6103f7565b600b546101ce9060ff1681565b6101fa61022a366004610caa565b6104f4565b6101ae61023d366004610ceb565b610577565b6101ae60015481565b6101ae600a5481565b6101ae60075481565b6101fa61026b366004610c73565b6105b0565b6101fa61063e565b6101ae60055481565b5f54610293906001600160a01b031681565b6040516001600160a01b0390911681526020016101b8565b600454610293906001600160a01b031681565b6101fa6102cc366004610c73565b610672565b6101fa6106fe565b6101fa6102e7366004610c73565b61075a565b6101fa6102fa366004610c73565b6107f4565b6101fa61030d366004610c73565b6108b1565b6101ae60095481565b6101ae610329366004610ceb565b61094a565b6101fa61033c366004610c73565b610974565b6101fa61034f366004610caa565b610a00565b6101ae60035481565b6101fa610a8b565b5f546001600160a01b031633146103975760405162461bcd60e51b815260040161038e90610d96565b60405180910390fd5b621c9c388111156103bb57604051635742c80560e11b815260040160405180910390fd5b60028190556040518181527f32740b35c0ea213650f60d44366b4fb211c9033b50714e4a1d34e65d5beb9bb4906020015b60405180910390a150565b6004805460405163efc7840160e01b815233928101929092526001600160a01b03169063efc7840190602401602060405180830381865afa15801561043e573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906104629190610dcd565b61047f576040516326b3506d60e11b815260040160405180910390fd5b600182905560058190556040518281527f351fb23757bb5ea0546c85b7996ddd7155f96b939ebaa5ff7bc49c75f27f2c449060200160405180910390a16040518181527f9a14bfb5d18c4c3cf14cae19c23d7cf1bcede357ea40ca1f75cd49542c71c214906020015b60405180910390a15050565b5f546001600160a01b0316331461051d5760405162461bcd60e51b815260040161038e90610d96565b600480546001600160a01b038381166001600160a01b031983168117909355604080519190921680825260208201939093527f22d1c35fe072d2e42c3c8f9bd4a0d34aa84a0101d020a62517b33fdb3174e5f791016104e8565b600b545f9060ff16156105935761058d82610ae7565b92915050565b60085460ff16156105a75761058d82610b45565b61058d82610b81565b5f546001600160a01b031633146105d95760405162461bcd60e51b815260040161038e90610d96565b6105e9633b9aca006103e8610e00565b81111561060957604051631e44fdeb60e11b815260040160405180910390fd5b60038190556040518181527f3336cd9708eaf2769a0f0dc0679f30e80f15dcd88d1921b5a16858e8b85c591a906020016103ec565b5f546001600160a01b031633146106675760405162461bcd60e51b815260040161038e90610d96565b6106705f610bc4565b565b5f546001600160a01b0316331461069b5760405162461bcd60e51b815260040161038e90610d96565b6106a9633b9aca0080610e00565b8111156106c95760405163874f603160e01b815260040160405180910390fd5b60068190556040518181527f2ab3f5a4ebbcbf3c24f62f5454f52f10e1a8c9dcc5acac8f19199ce881a6a108906020016103ec565b5f546001600160a01b031633146107275760405162461bcd60e51b815260040161038e90610d96565b60085460ff161561074b576040516379f9c57560e01b815260040160405180910390fd5b6008805460ff19166001179055565b5f546001600160a01b031633146107835760405162461bcd60e51b815260040161038e90610d96565b633b9aca008110806107a1575061079e633b9aca0080610e00565b81115b156107bf5760405163d9b5dcdf60e01b815260040160405180910390fd5b60098190556040518181527fd50d3079c77df569cd58d55d4e5614bfe7066449009425d22bde8e75242f50bb906020016103ec565b6004805460405163efc7840160e01b815233928101929092526001600160a01b03169063efc7840190602401602060405180830381865afa15801561083b573d5f803e3d5ffd5b505050506040513d601f19601f8201168201806040525081019061085f9190610dcd565b61087c576040516326b3506d60e11b815260040160405180910390fd5b60018190556040518181527f351fb23757bb5ea0546c85b7996ddd7155f96b939ebaa5ff7bc49c75f27f2c44906020016103ec565b5f546001600160a01b031633146108da5760405162461bcd60e51b815260040161038e90610d96565b633b9aca008110806108f857506108f5633b9aca0080610e00565b81115b156109155760405162ae184360e01b815260040160405180910390fd5b600a8190556040518181527f8647cebb7e57360673a28415c0bed2f68c42a86c5035f1c9b2eda2b09509288a906020016103ec565b600b545f9060ff168061095f575060085460ff165b1561096b57505f919050565b61058d82610c13565b5f546001600160a01b0316331461099d5760405162461bcd60e51b815260040161038e90610d96565b6109ab633b9aca0080610e00565b8111156109cb5760405163f37ec21560e01b815260040160405180910390fd5b60078190556040518181527f6b332a036d8c3ead57dcb06c87243bd7a2aed015ddf2d0528c2501dae56331aa906020016103ec565b5f546001600160a01b03163314610a295760405162461bcd60e51b815260040161038e90610d96565b6001600160a01b038116610a7f5760405162461bcd60e51b815260206004820152601d60248201527f6e6577206f776e657220697320746865207a65726f2061646472657373000000604482015260640161038e565b610a8881610bc4565b50565b5f546001600160a01b03163314610ab45760405162461bcd60e51b815260040161038e90610d96565b600b5460ff1615610ad857604051631a7c228b60e21b815260040160405180910390fd5b600b805460ff19166001179055565b5f633b9aca0080600a548451600554600754610b039190610e00565b600154600654610b139190610e00565b610b1d9190610e17565b610b279190610e00565b610b319190610e00565b610b3b9190610e2a565b61058d9190610e2a565b5f633b9aca006005548351600754610b5d9190610e00565b610b679190610e00565b600154600654610b779190610e00565b610b3b9190610e17565b5f80610b8c83610c13565b90505f60015482610b9d9190610e00565b9050633b9aca0060035482610bb29190610e00565b610bbc9190610e2a565b949350505050565b5f80546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b80515f908190815b81811015610c6457848181518110610c3557610c35610e49565b01602001516001600160f81b0319165f03610c5557600483019250610c5c565b6010830192505b600101610c1b565b50506002540160400192915050565b5f60208284031215610c83575f80fd5b5035919050565b5f8060408385031215610c9b575f80fd5b50508035926020909101359150565b5f60208284031215610cba575f80fd5b81356001600160a01b0381168114610cd0575f80fd5b9392505050565b634e487b7160e01b5f52604160045260245ffd5b5f60208284031215610cfb575f80fd5b813567ffffffffffffffff80821115610d12575f80fd5b818401915084601f830112610d25575f80fd5b813581811115610d3757610d37610cd7565b604051601f8201601f19908116603f01168101908382118183101715610d5f57610d5f610cd7565b81604052828152876020848701011115610d77575f80fd5b826020860160208301375f928101602001929092525095945050505050565b60208082526017908201527f63616c6c6572206973206e6f7420746865206f776e6572000000000000000000604082015260600190565b5f60208284031215610ddd575f80fd5b81518015158114610cd0575f80fd5b634e487b7160e01b5f52601160045260245ffd5b808202811582820484141761058d5761058d610dec565b8082018082111561058d5761058d610dec565b5f82610e4457634e487b7160e01b5f52601260045260245ffd5b500490565b634e487b7160e01b5f52603260045260245ffdfea164736f6c6343000818000a", + "nonce": "0x0", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb92266", + "0x0000000000000000000000000000000000000000000000000000000000000008": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0x0000000000000000000000000000000000000000000000000000000000000009": "0x000000000000000000000000000000000000000000000000000000003b9aca00", + "0x000000000000000000000000000000000000000000000000000000000000000a": "0x000000000000000000000000000000000000000000000000000000003b9aca00", + "0x000000000000000000000000000000000000000000000000000000000000000b": "0x0000000000000000000000000000000000000000000000000000000000000001" + } + }, + "0x5300000000000000000000000000000000000003": { + "balance": "0x0", + "code": "0x608060405234801561000f575f80fd5b5060043610610055575f3560e01c8063715018a61461005957806379586dd7146100635780638da5cb5b14610076578063efc78401146100a5578063f2fde38b146100e0575b5f80fd5b6100616100f3565b005b61006161007136600461033a565b610130565b5f54610088906001600160a01b031681565b6040516001600160a01b0390911681526020015b60405180910390f35b6100d06100b336600461040c565b6001600160a01b03165f9081526001602052604090205460ff1690565b604051901515815260200161009c565b6100616100ee36600461040c565b610222565b5f546001600160a01b031633146101255760405162461bcd60e51b815260040161011c9061042c565b60405180910390fd5b61012e5f6102ad565b565b5f546001600160a01b031633146101595760405162461bcd60e51b815260040161011c9061042c565b5f5b825181101561021d578160015f85848151811061017a5761017a610463565b60200260200101516001600160a01b03166001600160a01b031681526020019081526020015f205f6101000a81548160ff0219169083151502179055508281815181106101c9576101c9610463565b60200260200101516001600160a01b03167f8daaf060c3306c38e068a75c054bf96ecd85a3db1252712c4d93632744c42e0d8360405161020d911515815260200190565b60405180910390a260010161015b565b505050565b5f546001600160a01b0316331461024b5760405162461bcd60e51b815260040161011c9061042c565b6001600160a01b0381166102a15760405162461bcd60e51b815260206004820152601d60248201527f6e6577206f776e657220697320746865207a65726f2061646472657373000000604482015260640161011c565b6102aa816102ad565b50565b5f80546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b634e487b7160e01b5f52604160045260245ffd5b80356001600160a01b0381168114610326575f80fd5b919050565b80358015158114610326575f80fd5b5f806040838503121561034b575f80fd5b823567ffffffffffffffff80821115610362575f80fd5b818501915085601f830112610375575f80fd5b8135602082821115610389576103896102fc565b8160051b604051601f19603f830116810181811086821117156103ae576103ae6102fc565b6040529283528183019350848101820192898411156103cb575f80fd5b948201945b838610156103f0576103e186610310565b855294820194938201936103d0565b96506103ff905087820161032b565b9450505050509250929050565b5f6020828403121561041c575f80fd5b61042582610310565b9392505050565b60208082526017908201527f63616c6c6572206973206e6f7420746865206f776e6572000000000000000000604082015260600190565b634e487b7160e01b5f52603260045260245ffdfea164736f6c6343000818000a", + "nonce": "0x0", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb92266" + } + }, + "0x5300000000000000000000000000000000000005": { + "balance": "0x0", + "code": "0x6080604052600436106100a8575f3560e01c806384411d651161006257806384411d651461017a5780638da5cb5b1461018f5780639e7adc79146101ad578063f2fde38b146101cc578063feec756c146101eb578063ff4f35461461020a575f80fd5b80632e1a7d4d146100b35780633cb747bf146100d45780633ccfd60b14610110578063457e1a491461012457806366d003ac14610147578063715018a614610166575f80fd5b366100af57005b5f80fd5b3480156100be575f80fd5b506100d26100cd366004610663565b610229565b005b3480156100df575f80fd5b506002546100f3906001600160a01b031681565b6040516001600160a01b0390911681526020015b60405180910390f35b34801561011b575f80fd5b506100d26103e9565b34801561012f575f80fd5b5061013960015481565b604051908152602001610107565b348015610152575f80fd5b506003546100f3906001600160a01b031681565b348015610171575f80fd5b506100d26103f6565b348015610185575f80fd5b5061013960045481565b34801561019a575f80fd5b505f546100f3906001600160a01b031681565b3480156101b8575f80fd5b506100d26101c736600461067a565b61042a565b3480156101d7575f80fd5b506100d26101e636600461067a565b6104a4565b3480156101f6575f80fd5b506100d261020536600461067a565b61052c565b348015610215575f80fd5b506100d2610224366004610663565b6105a6565b6001548110156102b95760405162461bcd60e51b815260206004820152604a60248201527f4665655661756c743a207769746864726177616c20616d6f756e74206d75737460448201527f2062652067726561746572207468616e206d696e696d756d20776974686472616064820152691dd85b08185b5bdd5b9d60b21b608482015260a4015b60405180910390fd5b478082111561031d5760405162461bcd60e51b815260206004820152602a60248201527f4665655661756c743a20696e73756666696369656e742062616c616e636520746044820152696f20776974686472617760b01b60648201526084016102b0565b6004805483019055600354604080518481526001600160a01b0390921660208301523382820152517fc8a211cc64b6ed1b50595a9fcb1932b6d1e5a6e8ef15b60e5b1f988ea9086bba9181900360600190a1600254600354604080516020810182525f808252915163b2267a7b60e01b81526001600160a01b039485169463b2267a7b9488946103b79491909216928592906004016106a7565b5f604051808303818588803b1580156103ce575f80fd5b505af11580156103e0573d5f803e3d5ffd5b50505050505050565b476103f381610229565b50565b5f546001600160a01b0316331461041f5760405162461bcd60e51b81526004016102b090610711565b6104285f610614565b565b5f546001600160a01b031633146104535760405162461bcd60e51b81526004016102b090610711565b600280546001600160a01b038381166001600160a01b0319831681179093556040519116919082907f1c928c417a10a21c3cddad148c5dba5d710e4b1442d6d8a36de345935ad84612905f90a35050565b5f546001600160a01b031633146104cd5760405162461bcd60e51b81526004016102b090610711565b6001600160a01b0381166105235760405162461bcd60e51b815260206004820152601d60248201527f6e6577206f776e657220697320746865207a65726f206164647265737300000060448201526064016102b0565b6103f381610614565b5f546001600160a01b031633146105555760405162461bcd60e51b81526004016102b090610711565b600380546001600160a01b038381166001600160a01b0319831681179093556040519116919082907f7e1e96961a397c8aa26162fe259cc837afc95e33aad4945ddc61c18dabb7a6ad905f90a35050565b5f546001600160a01b031633146105cf5760405162461bcd60e51b81526004016102b090610711565b600180549082905560408051828152602081018490527f0d3c80219fe57713b9f9c83d1e51426792d0c14d8e330e65b102571816140965910160405180910390a15050565b5f80546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b5f60208284031215610673575f80fd5b5035919050565b5f6020828403121561068a575f80fd5b81356001600160a01b03811681146106a0575f80fd5b9392505050565b60018060a01b03851681525f60208560208401526080604084015284518060808501525f5b818110156106e85786810183015185820160a0015282016106cc565b505f60a0828601015260a0601f19601f8301168501019250505082606083015295945050505050565b60208082526017908201527f63616c6c6572206973206e6f7420746865206f776e657200000000000000000060408201526060019056fea164736f6c6343000818000a", + "nonce": "0x0", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb92266", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000000000000000000000000000000de0b6b3a7640000", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000df45f4790e20509959fccc0d09245b216abdaa37", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb92266" + } + }, + "0xdf45f4790e20509959fccc0d09245b216abdaa37": { + "balance": "0x7ffffffffffffffffffffffffffffffffffffffffffffff21f494c589c0000", + "code": "0x", + "nonce": "0x0", + "storage": { + "0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266": { + "balance": "0xde0b6b3a7640000", + "code": "0x", + "nonce": "0x1", + "storage": {} + }, + "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x70997970C51812dc3A010C7d01b50e0d17dc79C8": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x90F79bf6EB2c4f870365E785982E1f101E93b906": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x15d34AAf54267DB7D7c367839AAf71A00a2C6A65": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x976EA74026E726554dB657fA54763abd0C3a0aa9": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x14dC79964da2C08b23698B3D3cc7Ca32193d9955": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x23618e81E3f5cdF7f54C3d65f7FBc0aBf5B21E8f": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xa0Ee7A142d267C1f36714E4a8F75612F20a79720": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xBcd4042DE499D14e55001CcbB24a551F3b954096": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x71bE63f3384f5fb98995898A86B02Fb2426c5788": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xFABB0ac9d68B0B445fB7357272Ff202C5651694a": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x1CBd3b2770909D4e10f157cABC84C7264073C9Ec": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xdF3e18d64BC6A983f673Ab319CCaE4f1a57C7097": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xcd3B766CCDd6AE721141F452C550Ca635964ce71": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x2546BcD3c84621e976D8185a91A922aE77ECEc30": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xbDA5747bFD65F08deb54cb465eB87D40e51B197E": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0xdD2FD4581271e230360230F9337D5c0430Bf44C0": { + "balance": "0xD3C21BCECCEDA1000000" + }, + "0x8626f6940E2eb28930eFb4CeF49B2d1F2C9C1199": { + "balance": "0xD3C21BCECCEDA1000000" + } + } + } \ No newline at end of file From de68697a1a0d1ebdb4b69ec5d0a4a783bee6d43d Mon Sep 17 00:00:00 2001 From: frisitano Date: Wed, 14 Jan 2026 18:03:05 +0000 Subject: [PATCH 2/9] fixes --- crates/node/src/test_utils/mod.rs | 4 ++-- crates/sequencer/tests/e2e.rs | 14 ++++++++------ 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/crates/node/src/test_utils/mod.rs b/crates/node/src/test_utils/mod.rs index 46bdaffd..2f0c8a9b 100644 --- a/crates/node/src/test_utils/mod.rs +++ b/crates/node/src/test_utils/mod.rs @@ -115,7 +115,7 @@ pub async fn setup_engine( chain_spec: Arc<::ChainSpec>, is_dev: bool, no_local_transactions_propagation: bool, - bootnodes: Option>, + trusted_peers: Option>, ) -> eyre::Result<( Vec< NodeHelperType< @@ -139,7 +139,7 @@ where let network_config = NetworkArgs { discovery: DiscoveryArgs { disable_discovery: true, ..DiscoveryArgs::default() }, - bootnodes, + trusted_peers: trusted_peers.unwrap_or_default(), ..NetworkArgs::default() }; diff --git a/crates/sequencer/tests/e2e.rs b/crates/sequencer/tests/e2e.rs index 991d183f..27df2710 100644 --- a/crates/sequencer/tests/e2e.rs +++ b/crates/sequencer/tests/e2e.rs @@ -212,7 +212,7 @@ async fn can_build_blocks_with_delayed_l1_messages() { // setup a test node let (mut nodes, _tasks, wallet) = - setup_engine(default_test_scroll_rollup_node_config(), 1, chain_spec, false, false) + setup_engine(default_test_scroll_rollup_node_config(), 1, chain_spec, false, false, None) .await .unwrap(); @@ -337,7 +337,7 @@ async fn can_build_blocks_with_finalized_l1_messages() { let chain_spec = SCROLL_DEV.clone(); // setup a test node let (mut nodes, _tasks, wallet) = - setup_engine(default_test_scroll_rollup_node_config(), 1, chain_spec, false, false) + setup_engine(default_test_scroll_rollup_node_config(), 1, chain_spec, false, false, None) .await .unwrap(); let node = nodes.pop().unwrap(); @@ -512,7 +512,7 @@ async fn can_sequence_blocks_with_private_key_file() -> eyre::Result<()> { }; let (nodes, _tasks, wallet) = - setup_engine(rollup_manager_args, 1, chain_spec, false, false).await?; + setup_engine(rollup_manager_args, 1, chain_spec, false, false, None).await?; let wallet = Arc::new(Mutex::new(wallet)); let sequencer_rnm_handle = nodes[0].inner.add_ons_handle.rollup_manager_handle.clone(); @@ -614,7 +614,7 @@ async fn can_sequence_blocks_with_hex_key_file_without_prefix() -> eyre::Result< }; let (nodes, _tasks, wallet) = - setup_engine(rollup_manager_args, 1, chain_spec, false, false).await?; + setup_engine(rollup_manager_args, 1, chain_spec, false, false, None).await?; let wallet = Arc::new(Mutex::new(wallet)); let sequencer_rnm_handle = nodes[0].inner.add_ons_handle.rollup_manager_handle.clone(); @@ -684,6 +684,7 @@ async fn can_build_blocks_and_exit_at_gas_limit() { chain_spec, false, false, + None, ) .await .unwrap(); @@ -770,6 +771,7 @@ async fn can_build_blocks_and_exit_at_time_limit() { chain_spec, false, false, + None, ) .await .unwrap(); @@ -850,7 +852,7 @@ async fn should_limit_l1_message_cumulative_gas() { // setup a test node let chain_spec = SCROLL_DEV.clone(); let (mut nodes, _tasks, wallet) = - setup_engine(default_test_scroll_rollup_node_config(), 1, chain_spec, false, false) + setup_engine(default_test_scroll_rollup_node_config(), 1, chain_spec, false, false, None) .await .unwrap(); let node = nodes.pop().unwrap(); @@ -967,7 +969,7 @@ async fn should_not_add_skipped_messages() { // setup a test node let chain_spec = SCROLL_DEV.clone(); let (mut nodes, _tasks, wallet) = - setup_engine(default_test_scroll_rollup_node_config(), 1, chain_spec, false, false) + setup_engine(default_test_scroll_rollup_node_config(), 1, chain_spec, false, false, None) .await .unwrap(); let node = nodes.pop().unwrap(); From 913b7239bff9a60475307e21f6770e619c1d028c Mon Sep 17 00:00:00 2001 From: frisitano Date: Wed, 14 Jan 2026 18:36:49 +0000 Subject: [PATCH 3/9] fixes --- crates/node/src/debug_toolkit/repl.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/crates/node/src/debug_toolkit/repl.rs b/crates/node/src/debug_toolkit/repl.rs index e8dfda06..62e70978 100644 --- a/crates/node/src/debug_toolkit/repl.rs +++ b/crates/node/src/debug_toolkit/repl.rs @@ -18,7 +18,7 @@ use crossterm::event::{self, Event, KeyCode, KeyModifiers}; use futures::StreamExt; use reth_network::PeersInfo; use reth_network_api::Peers; -use reth_network_peers::NodeRecord; +use reth_network_peers::TrustedPeer; use reth_rpc_api::EthApiServer; use reth_transaction_pool::TransactionPool; use std::{io::Write, str::FromStr, time::Duration}; @@ -686,8 +686,9 @@ impl DebugRepl { } PeersCommand::Connect(enode_url) => { // Parse the enode URL - match NodeRecord::from_str(&enode_url) { + match TrustedPeer::from_str(&enode_url) { Ok(record) => { + let record = record.resolve().await?; network_handle.inner().add_peer(record.id, record.tcp_addr()); println!("{}", format!("Connecting to peer: {:?}", record.id).green()); println!(" Address: {}", record.tcp_addr()); From 94c2a488d450008ed8c4fae05ca876ccc50e2c6c Mon Sep 17 00:00:00 2001 From: frisitano Date: Wed, 14 Jan 2026 22:52:50 +0000 Subject: [PATCH 4/9] update debug-toolkit --- Cargo.lock | 1 + Cargo.toml | 1 + crates/node/Cargo.toml | 1 + crates/node/src/args.rs | 15 ++- crates/node/src/debug_toolkit/cli.rs | 20 ++- crates/node/src/debug_toolkit/repl.rs | 46 +++++-- crates/node/src/test_utils/fixture.rs | 53 +++----- crates/node/src/test_utils/mod.rs | 3 +- crates/node/tests/e2e.rs | 6 +- hackathon.json | 168 -------------------------- 10 files changed, 93 insertions(+), 221 deletions(-) delete mode 100644 hackathon.json diff --git a/Cargo.lock b/Cargo.lock index 2ec339b9..c8f0502e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11135,6 +11135,7 @@ dependencies = [ "regex-lite", "reqwest", "reth-chainspec", + "reth-cli", "reth-cli-util", "reth-e2e-test-utils", "reth-engine-local", diff --git a/Cargo.toml b/Cargo.toml index f46d87c9..ce3f6db1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -182,6 +182,7 @@ reth-testing-utils = { git = "https://github.com/scroll-tech/reth.git", tag = "s reth-revm = { git = "https://github.com/scroll-tech/reth.git", tag = "scroll-v91.4", default-features = false } reth-evm = { git = "https://github.com/scroll-tech/reth.git", tag = "scroll-v91.4", default-features = false } reth-engine-local = { git = "https://github.com/scroll-tech/reth.git", tag = "scroll-v91.4", default-features = false } +reth-cli = { git = "https://github.com/scroll-tech/reth.git", tag = "scroll-v91.4", default-features = false } reth-cli-util = { git = "https://github.com/scroll-tech/reth.git", tag = "scroll-v91.4", default-features = false } # reth-scroll diff --git a/crates/node/Cargo.toml b/crates/node/Cargo.toml index 7b771921..854bf2e8 100644 --- a/crates/node/Cargo.toml +++ b/crates/node/Cargo.toml @@ -52,6 +52,7 @@ reth-scroll-node.workspace = true reth-scroll-rpc.workspace = true reth-chainspec.workspace = true +reth-cli.workspace = true reth-cli-util.workspace = true reth-eth-wire-types.workspace = true reth-evm.workspace = true diff --git a/crates/node/src/args.rs b/crates/node/src/args.rs index 0bfb5cd9..15db70a9 100644 --- a/crates/node/src/args.rs +++ b/crates/node/src/args.rs @@ -668,7 +668,7 @@ impl RollupNodeNetworkArgs { } /// The arguments for the L1 provider. -#[derive(Debug, Default, Clone, clap::Args)] +#[derive(Debug, Clone, clap::Args)] pub struct L1ProviderArgs { /// The URL for the L1 RPC. #[arg(long = "l1.url", id = "l1_url", value_name = "L1_URL")] @@ -690,6 +690,19 @@ pub struct L1ProviderArgs { pub cache_max_items: u32, } +impl Default for L1ProviderArgs { + fn default() -> Self { + Self { + url: None, + compute_units_per_second: constants::PROVIDER_COMPUTE_UNITS_PER_SECOND, + max_retries: constants::L1_PROVIDER_MAX_RETRIES, + initial_backoff: constants::L1_PROVIDER_INITIAL_BACKOFF, + logs_query_block_range: constants::LOGS_QUERY_BLOCK_RANGE, + cache_max_items: constants::L1_PROVIDER_CACHE_MAX_ITEMS, + } + } +} + /// The arguments for the Beacon provider. #[derive(Debug, Default, Clone, clap::Args)] pub struct BlobProviderArgs { diff --git a/crates/node/src/debug_toolkit/cli.rs b/crates/node/src/debug_toolkit/cli.rs index 1ccfc04e..1d20d7ff 100644 --- a/crates/node/src/debug_toolkit/cli.rs +++ b/crates/node/src/debug_toolkit/cli.rs @@ -1,6 +1,7 @@ //! CLI subcommand for the debug toolkit. use crate::test_utils::TestFixtureBuilder; +use alloy_primitives::Address; use clap::Parser; use reth_network_peers::TrustedPeer; use std::{path::PathBuf, str::FromStr}; @@ -44,6 +45,10 @@ pub struct DebugArgs { /// Comma-separated list of bootnode enode URLs to connect to. #[arg(long, value_delimiter = ',')] pub bootnodes: Option>, + + /// The valid signer address for the network. + #[arg(long)] + pub valid_signer: Option
, } impl DebugArgs { @@ -52,7 +57,7 @@ impl DebugArgs { use super::DebugRepl; // Build the fixture - let mut builder = TestFixtureBuilder::new().with_chain(&self.chain); + let mut builder = TestFixtureBuilder::new().with_chain(&self.chain)?; if self.sequencer { builder = builder.sequencer(); @@ -62,6 +67,19 @@ impl DebugArgs { builder = builder.followers(self.followers); } + if self.valid_signer.is_some() { + builder = builder.with_consensus_system_contract(self.valid_signer); + builder = builder.with_network_valid_signer(self.valid_signer); + } + + if self.bootnodes.as_ref().map(|b| !b.is_empty()).unwrap_or(false) || + self.l1_url.is_some() || + self.valid_signer.is_some() + { + // Disable test mode if bootnodes or l1 url are specified + builder.config_mut().test = false; + } + // Apply sequencer settings builder = builder .block_time(self.block_time) diff --git a/crates/node/src/debug_toolkit/repl.rs b/crates/node/src/debug_toolkit/repl.rs index 62e70978..d9fc668f 100644 --- a/crates/node/src/debug_toolkit/repl.rs +++ b/crates/node/src/debug_toolkit/repl.rs @@ -14,11 +14,14 @@ use alloy_eips::{eip2718::Encodable2718, BlockNumberOrTag}; use alloy_network::{TransactionResponse, TxSignerSync}; use alloy_primitives::TxKind; use colored::Colorize; -use crossterm::event::{self, Event, KeyCode, KeyModifiers}; +use crossterm::{ + event::{self, Event, KeyCode, KeyModifiers}, + terminal::{disable_raw_mode, enable_raw_mode}, +}; use futures::StreamExt; use reth_network::PeersInfo; use reth_network_api::Peers; -use reth_network_peers::TrustedPeer; +use reth_network_peers::NodeRecord; use reth_rpc_api::EthApiServer; use reth_transaction_pool::TransactionPool; use std::{io::Write, str::FromStr, time::Duration}; @@ -91,7 +94,21 @@ impl DebugRepl { pub async fn run(&mut self) -> eyre::Result<()> { self.running = true; - // Print welcome message + // Enable raw mode for proper terminal control + enable_raw_mode()?; + + // Guard to ensure raw mode is disabled on exit + struct RawModeGuard; + impl Drop for RawModeGuard { + fn drop(&mut self) { + let _ = disable_raw_mode(); + } + } + let _guard = RawModeGuard; + + // Print welcome message and initial status + // Disable raw mode temporarily so println! works correctly + let _ = disable_raw_mode(); println!(); println!("{}", "Scroll Debug Toolkit".bold().cyan()); println!("Type 'help' for available commands, 'exit' to quit."); @@ -100,6 +117,9 @@ impl DebugRepl { // Show initial status self.cmd_status().await?; + // Re-enable raw mode for input handling + let _ = enable_raw_mode(); + // Current input line buffer let mut input_buffer = String::new(); let mut stdout = std::io::stdout(); @@ -117,8 +137,8 @@ impl DebugRepl { Some(event) = self.fixture.nodes[self.active_node].chain_orchestrator_rx.next() => { // Display if streaming is enabled if let Some(formatted) = self.event_streams[self.active_node].record_event(event) { - // Clear current line, print event, reprint prompt - print!("\r\x1b[K{}\n{}{}", formatted, self.get_prompt(), input_buffer); + // Clear current line, print event, reprint prompt with input buffer + print!("\r\x1b[K{}\r\n{}{}", formatted, self.get_prompt(), input_buffer); let _ = stdout.flush(); } } @@ -130,14 +150,19 @@ impl DebugRepl { if let Event::Key(key_event) = event::read()? { match key_event.code { KeyCode::Enter => { - println!(); + print!("\r\n"); + let _ = stdout.flush(); let line = input_buffer.trim().to_string(); input_buffer.clear(); if !line.is_empty() { + // Disable raw mode for command output (println! works normally) + let _ = disable_raw_mode(); if let Err(e) = self.execute_command(&line).await { println!("{}: {}", "Error".red(), e); } + // Re-enable raw mode for input + let _ = enable_raw_mode(); } if self.running { @@ -154,11 +179,11 @@ impl DebugRepl { } KeyCode::Char(c) => { if key_event.modifiers.contains(KeyModifiers::CONTROL) && c == 'c' { - println!("\nUse 'exit' to quit"); + print!("\r\nUse 'exit' to quit\r\n"); print!("{}{}", self.get_prompt(), input_buffer); let _ = stdout.flush(); } else if key_event.modifiers.contains(KeyModifiers::CONTROL) && c == 'd' { - println!(); + print!("\r\n"); self.running = false; } else { input_buffer.push(c); @@ -180,7 +205,7 @@ impl DebugRepl { } } - println!("Goodbye!"); + print!("Goodbye!\r\n"); Ok(()) } @@ -686,9 +711,8 @@ impl DebugRepl { } PeersCommand::Connect(enode_url) => { // Parse the enode URL - match TrustedPeer::from_str(&enode_url) { + match NodeRecord::from_str(&enode_url) { Ok(record) => { - let record = record.resolve().await?; network_handle.inner().add_peer(record.id, record.tcp_addr()); println!("{}", format!("Connecting to peer: {:?}", record.id).green()); println!(" Address: {}", record.tcp_addr()); diff --git a/crates/node/src/test_utils/fixture.rs b/crates/node/src/test_utils/fixture.rs index 371c81bf..c59b7876 100644 --- a/crates/node/src/test_utils/fixture.rs +++ b/crates/node/src/test_utils/fixture.rs @@ -15,6 +15,7 @@ use alloy_primitives::Address; use alloy_rpc_types_eth::Block; use alloy_signer_local::PrivateKeySigner; use reth_chainspec::EthChainSpec; +use reth_cli::chainspec::ChainSpecParser; use reth_e2e_test_utils::{wallet::Wallet, NodeHelperType, TmpDB}; use reth_eth_wire_types::BasicNetworkPrimitives; use reth_network::NetworkHandle; @@ -22,7 +23,8 @@ use reth_network_peers::TrustedPeer; use reth_node_builder::NodeTypes; use reth_node_types::NodeTypesWithDBAdapter; use reth_provider::providers::BlockchainProvider; -use reth_scroll_chainspec::{ScrollChainSpec, SCROLL_DEV, SCROLL_MAINNET, SCROLL_SEPOLIA}; +use reth_scroll_chainspec::{ScrollChainSpec, SCROLL_DEV}; +use reth_scroll_cli::ScrollChainSpecParser; use reth_scroll_primitives::ScrollPrimitives; use reth_tasks::TaskManager; use reth_tokio_util::EventStream; @@ -315,40 +317,10 @@ impl TestFixtureBuilder { /// This is a convenience method that loads the appropriate chain spec. /// If the input is a file path (contains '/' or ends with '.json'), it will /// load the genesis from the file. - pub fn with_chain(mut self, chain: &str) -> Self { - let chain_spec: Arc = match chain.to_lowercase().as_str() { - "dev" => SCROLL_DEV.clone(), - "scroll-sepolia" => SCROLL_SEPOLIA.clone(), - "scroll-mainnet" | "scroll" => SCROLL_MAINNET.clone(), - _ => { - // Check if it's a file path - if chain.contains('/') || chain.ends_with(".json") { - match std::fs::read_to_string(chain) { - Ok(contents) => { - match serde_json::from_str::(&contents) { - Ok(genesis) => { - Arc::new(ScrollChainSpec::from_custom_genesis(genesis)) - } - Err(e) => { - tracing::error!(path = %chain, error = %e, "Failed to parse genesis file"); - SCROLL_DEV.clone() - } - } - } - Err(e) => { - tracing::error!(path = %chain, error = %e, "Failed to read genesis file"); - SCROLL_DEV.clone() - } - } - } else { - // For unknown chains, default to dev - tracing::warn!(chain = %chain, "Unknown chain, defaulting to dev"); - SCROLL_DEV.clone() - } - } - }; + pub fn with_chain(mut self, chain: &str) -> eyre::Result { + let chain_spec: Arc = ScrollChainSpecParser::parse(chain)?; self.chain_spec = Some(chain_spec); - self + Ok(self) } /// Enable dev mode. @@ -408,9 +380,18 @@ impl TestFixtureBuilder { } /// Use `SystemContract` consensus with the given authorized signer address. - pub const fn with_consensus_system_contract(mut self, authorized_signer: Address) -> Self { + pub const fn with_consensus_system_contract( + mut self, + authorized_signer: Option
, + ) -> Self { self.config.consensus_args.algorithm = ConsensusAlgorithm::SystemContract; - self.config.consensus_args.authorized_signer = Some(authorized_signer); + self.config.consensus_args.authorized_signer = authorized_signer; + self + } + + /// Set the valid signer address for the network. + pub const fn with_network_valid_signer(mut self, address: Option
) -> Self { + self.config.network_args.signer_address = address; self } diff --git a/crates/node/src/test_utils/mod.rs b/crates/node/src/test_utils/mod.rs index 2f0c8a9b..509ea718 100644 --- a/crates/node/src/test_utils/mod.rs +++ b/crates/node/src/test_utils/mod.rs @@ -139,7 +139,8 @@ where let network_config = NetworkArgs { discovery: DiscoveryArgs { disable_discovery: true, ..DiscoveryArgs::default() }, - trusted_peers: trusted_peers.unwrap_or_default(), + trusted_peers: trusted_peers.clone().unwrap_or_default(), + bootnodes: trusted_peers, ..NetworkArgs::default() }; diff --git a/crates/node/tests/e2e.rs b/crates/node/tests/e2e.rs index 70922a85..6e181895 100644 --- a/crates/node/tests/e2e.rs +++ b/crates/node/tests/e2e.rs @@ -189,7 +189,7 @@ async fn can_penalize_peer_for_invalid_signature() -> eyre::Result<()> { .with_chain_spec(chain_spec) .block_time(0) .allow_empty_blocks(true) - .with_consensus_system_contract(authorized_address) + .with_consensus_system_contract(Some(authorized_address)) .with_signer(authorized_signer.clone()) .payload_building_duration(1000) .build() @@ -1851,7 +1851,7 @@ async fn signer_rotation() -> eyre::Result<()> { .sequencer() .followers(1) .with_test(false) - .with_consensus_system_contract(signer_1_address) + .with_consensus_system_contract(Some(signer_1_address)) .with_signer(signer_1) .with_sequencer_auto_start(true) .with_eth_scroll_bridge(false) @@ -1862,7 +1862,7 @@ async fn signer_rotation() -> eyre::Result<()> { let mut fixture2 = TestFixture::builder() .sequencer() .with_test(false) - .with_consensus_system_contract(signer_1_address) + .with_consensus_system_contract(Some(signer_1_address)) .with_signer(signer_2) .with_sequencer_auto_start(true) .with_eth_scroll_bridge(false) diff --git a/hackathon.json b/hackathon.json deleted file mode 100644 index 7eb3b08e..00000000 --- a/hackathon.json +++ /dev/null @@ -1,168 +0,0 @@ - { - "config": { - "chainId": 938471, - "homesteadBlock": 0, - "eip150Block": 0, - "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "eip155Block": 0, - "eip158Block": 0, - "byzantiumBlock": 0, - "constantinopleBlock": 0, - "petersburgBlock": 0, - "istanbulBlock": 0, - "berlinBlock": 0, - "londonBlock": 0, - "archimedesBlock": 0, - "shanghaiBlock": 0, - "bernoulliBlock": 0, - "curieBlock": 0, - "darwinTime": 0, - "darwinV2Time": 0, - "euclidTime": 0, - "euclidV2Time": 0, - "feynmanTime": 0, - "scroll": { - "maxTxPayloadBytesPerBlock": 122880, - "feeVaultAddress": "0x5300000000000000000000000000000000000005", - "l1Config": { - "l1ChainId": 22222222, - "l1MessageQueueAddress": "0x0000000000000000000000000000000000000001", - "l1MessageQueueV2Address": "0xDc64a140Aa3E981100a9becA4E685f962f0cF6C9", - "scrollChainAddress": "0x84044d3a645843bAF0752eA591E1EAB643beD904", - "systemContractAddress": "0x9fE46736679d2D9a65F0992F2272dE9f3c7fa6e0", - "l2SystemConfigAddress": "0x2E48aC0df81f1fa57722e115e807C9dB1819bA13", - "numL1MessagesPerBlock": 10, - "startL1Block": 0 - } - } - }, - "nonce": "0x0", - "timestamp": "0x00000000000000000000000000000000000000000000000000000000689b3f30", - "extraData": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "gasLimit": "30000000", - "difficulty": "0x1", - "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "coinbase": "0x0000000000000000000000000000000000000000", - "baseFeePerGas": "0x1", - "alloc": { - "0x4e59b44847b379578588920ca78fbf26c0b4956c": { - "balance": "0x0", - "code": "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3", - "nonce": "0x1", - "storage": {} - }, - "0x5300000000000000000000000000000000000000": { - "balance": "0x0", - "code": "0x608060405234801561000f575f80fd5b5060043610610090575f3560e01c806383cc76601161006357806383cc7660146100f85780638da5cb5b1461010b578063c4d66de81461011e578063d4b9f4fa14610131578063f2fde38b14610139575f80fd5b806326aad7b7146100945780633cb747bf146100b0578063600a2e77146100db578063715018a6146100ee575b5f80fd5b61009d60015481565b6040519081526020015b60405180910390f35b6053546100c3906001600160a01b031681565b6040516001600160a01b0390911681526020016100a7565b61009d6100e9366004610539565b61014c565b6100f66101ef565b005b61009d610106366004610539565b610224565b6052546100c3906001600160a01b031681565b6100f661012c366004610550565b61023a565b61009d5f5481565b6100f6610147366004610550565b6102d2565b6053545f906001600160a01b0316331461019e5760405162461bcd60e51b815260206004820152600e60248201526d37b7363c9036b2b9b9b2b733b2b960911b60448201526064015b60405180910390fd5b5f806101a98461035e565b60408051838152602081018890529294509092507ffaa617c2d8ce12c62637dbce76efcc18dae60574aa95709bdcedce7e76071693910160405180910390a19392505050565b6052546001600160a01b031633146102195760405162461bcd60e51b81526004016101959061057d565b6102225f610477565b565b602a8160288110610233575f80fd5b0154905081565b6052546001600160a01b031633146102645760405162461bcd60e51b81526004016101959061057d565b600154156102a85760405162461bcd60e51b815260206004820152601160248201527063616e6e6f7420696e697469616c697a6560781b6044820152606401610195565b6102b06104c8565b605380546001600160a01b0319166001600160a01b0392909216919091179055565b6052546001600160a01b031633146102fc5760405162461bcd60e51b81526004016101959061057d565b6001600160a01b0381166103525760405162461bcd60e51b815260206004820152601d60248201527f6e6577206f776e657220697320746865207a65726f20616464726573730000006044820152606401610195565b61035b81610477565b50565b6003545f9081906103b15760405162461bcd60e51b815260206004820152601a60248201527f63616c6c206265666f726520696e697469616c697a6174696f6e0000000000006044820152606401610195565b600154835f5b8215610448576103c86002846105c8565b5f036104125781602a82602881106103e2576103e26105b4565b015561040b82600283602881106103fb576103fb6105b4565b01545f9182526020526040902090565b915061043c565b610439602a8260288110610428576104286105b4565b0154835f9182526020526040902090565b91505b600192831c92016103b7565b81602a826028811061045c5761045c6105b4565b0155505f819055600180548082019091559590945092505050565b605280546001600160a01b038381166001600160a01b0319831681179093556040519116919082907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0905f90a35050565b5f5b60286104d78260016105fb565b101561035b57610508600282602881106104f3576104f36105b4565b0154600283602881106103fb576103fb6105b4565b60026105158360016105fb565b60288110610525576105256105b4565b01558061053181610614565b9150506104ca565b5f60208284031215610549575f80fd5b5035919050565b5f60208284031215610560575f80fd5b81356001600160a01b0381168114610576575f80fd5b9392505050565b60208082526017908201527f63616c6c6572206973206e6f7420746865206f776e6572000000000000000000604082015260600190565b634e487b7160e01b5f52603260045260245ffd5b5f826105e257634e487b7160e01b5f52601260045260245ffd5b500690565b634e487b7160e01b5f52601160045260245ffd5b8082018082111561060e5761060e6105e7565b92915050565b5f60018201610625576106256105e7565b506001019056fea164736f6c6343000818000a", - "nonce": "0x0", - "storage": { - "0x0000000000000000000000000000000000000000000000000000000000000052": "0x000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb92266" - } - }, - "0x5300000000000000000000000000000000000002": { - "balance": "0x0", - "code": "0x608060405234801561000f575f80fd5b50600436106101a1575f3560e01c806384189161116100f3578063c63b9e2d11610093578063e88a60ad1161006e578063e88a60ad1461032e578063f2fde38b14610341578063f45e65d814610354578063fe5b04151461035d575f80fd5b8063c63b9e2d146102ff578063c91e514914610312578063de26c4a11461031b575f80fd5b8063944b247f116100ce578063944b247f146102be578063a911d77f146102d1578063aa5e9334146102d9578063bede39b5146102ec575f80fd5b806384189161146102785780638da5cb5b1461028157806393e59dc1146102ab575f80fd5b80633d0f963e1161015e5780636112d6db116101395780636112d6db1461024b5780636a5e67e514610254578063704655971461025d578063715018a614610270575f80fd5b80633d0f963e1461021c57806349948e0e1461022f578063519b4bd314610242575f80fd5b80630c18c162146101a557806313dad5be146101c157806323e524ac146101de5780633577afc5146101e757806339455d3a146101fc5780633b7656bb1461020f575b5f80fd5b6101ae60025481565b6040519081526020015b60405180910390f35b6008546101ce9060ff1681565b60405190151581526020016101b8565b6101ae60065481565b6101fa6101f5366004610c73565b610365565b005b6101fa61020a366004610c8a565b6103f7565b600b546101ce9060ff1681565b6101fa61022a366004610caa565b6104f4565b6101ae61023d366004610ceb565b610577565b6101ae60015481565b6101ae600a5481565b6101ae60075481565b6101fa61026b366004610c73565b6105b0565b6101fa61063e565b6101ae60055481565b5f54610293906001600160a01b031681565b6040516001600160a01b0390911681526020016101b8565b600454610293906001600160a01b031681565b6101fa6102cc366004610c73565b610672565b6101fa6106fe565b6101fa6102e7366004610c73565b61075a565b6101fa6102fa366004610c73565b6107f4565b6101fa61030d366004610c73565b6108b1565b6101ae60095481565b6101ae610329366004610ceb565b61094a565b6101fa61033c366004610c73565b610974565b6101fa61034f366004610caa565b610a00565b6101ae60035481565b6101fa610a8b565b5f546001600160a01b031633146103975760405162461bcd60e51b815260040161038e90610d96565b60405180910390fd5b621c9c388111156103bb57604051635742c80560e11b815260040160405180910390fd5b60028190556040518181527f32740b35c0ea213650f60d44366b4fb211c9033b50714e4a1d34e65d5beb9bb4906020015b60405180910390a150565b6004805460405163efc7840160e01b815233928101929092526001600160a01b03169063efc7840190602401602060405180830381865afa15801561043e573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906104629190610dcd565b61047f576040516326b3506d60e11b815260040160405180910390fd5b600182905560058190556040518281527f351fb23757bb5ea0546c85b7996ddd7155f96b939ebaa5ff7bc49c75f27f2c449060200160405180910390a16040518181527f9a14bfb5d18c4c3cf14cae19c23d7cf1bcede357ea40ca1f75cd49542c71c214906020015b60405180910390a15050565b5f546001600160a01b0316331461051d5760405162461bcd60e51b815260040161038e90610d96565b600480546001600160a01b038381166001600160a01b031983168117909355604080519190921680825260208201939093527f22d1c35fe072d2e42c3c8f9bd4a0d34aa84a0101d020a62517b33fdb3174e5f791016104e8565b600b545f9060ff16156105935761058d82610ae7565b92915050565b60085460ff16156105a75761058d82610b45565b61058d82610b81565b5f546001600160a01b031633146105d95760405162461bcd60e51b815260040161038e90610d96565b6105e9633b9aca006103e8610e00565b81111561060957604051631e44fdeb60e11b815260040160405180910390fd5b60038190556040518181527f3336cd9708eaf2769a0f0dc0679f30e80f15dcd88d1921b5a16858e8b85c591a906020016103ec565b5f546001600160a01b031633146106675760405162461bcd60e51b815260040161038e90610d96565b6106705f610bc4565b565b5f546001600160a01b0316331461069b5760405162461bcd60e51b815260040161038e90610d96565b6106a9633b9aca0080610e00565b8111156106c95760405163874f603160e01b815260040160405180910390fd5b60068190556040518181527f2ab3f5a4ebbcbf3c24f62f5454f52f10e1a8c9dcc5acac8f19199ce881a6a108906020016103ec565b5f546001600160a01b031633146107275760405162461bcd60e51b815260040161038e90610d96565b60085460ff161561074b576040516379f9c57560e01b815260040160405180910390fd5b6008805460ff19166001179055565b5f546001600160a01b031633146107835760405162461bcd60e51b815260040161038e90610d96565b633b9aca008110806107a1575061079e633b9aca0080610e00565b81115b156107bf5760405163d9b5dcdf60e01b815260040160405180910390fd5b60098190556040518181527fd50d3079c77df569cd58d55d4e5614bfe7066449009425d22bde8e75242f50bb906020016103ec565b6004805460405163efc7840160e01b815233928101929092526001600160a01b03169063efc7840190602401602060405180830381865afa15801561083b573d5f803e3d5ffd5b505050506040513d601f19601f8201168201806040525081019061085f9190610dcd565b61087c576040516326b3506d60e11b815260040160405180910390fd5b60018190556040518181527f351fb23757bb5ea0546c85b7996ddd7155f96b939ebaa5ff7bc49c75f27f2c44906020016103ec565b5f546001600160a01b031633146108da5760405162461bcd60e51b815260040161038e90610d96565b633b9aca008110806108f857506108f5633b9aca0080610e00565b81115b156109155760405162ae184360e01b815260040160405180910390fd5b600a8190556040518181527f8647cebb7e57360673a28415c0bed2f68c42a86c5035f1c9b2eda2b09509288a906020016103ec565b600b545f9060ff168061095f575060085460ff165b1561096b57505f919050565b61058d82610c13565b5f546001600160a01b0316331461099d5760405162461bcd60e51b815260040161038e90610d96565b6109ab633b9aca0080610e00565b8111156109cb5760405163f37ec21560e01b815260040160405180910390fd5b60078190556040518181527f6b332a036d8c3ead57dcb06c87243bd7a2aed015ddf2d0528c2501dae56331aa906020016103ec565b5f546001600160a01b03163314610a295760405162461bcd60e51b815260040161038e90610d96565b6001600160a01b038116610a7f5760405162461bcd60e51b815260206004820152601d60248201527f6e6577206f776e657220697320746865207a65726f2061646472657373000000604482015260640161038e565b610a8881610bc4565b50565b5f546001600160a01b03163314610ab45760405162461bcd60e51b815260040161038e90610d96565b600b5460ff1615610ad857604051631a7c228b60e21b815260040160405180910390fd5b600b805460ff19166001179055565b5f633b9aca0080600a548451600554600754610b039190610e00565b600154600654610b139190610e00565b610b1d9190610e17565b610b279190610e00565b610b319190610e00565b610b3b9190610e2a565b61058d9190610e2a565b5f633b9aca006005548351600754610b5d9190610e00565b610b679190610e00565b600154600654610b779190610e00565b610b3b9190610e17565b5f80610b8c83610c13565b90505f60015482610b9d9190610e00565b9050633b9aca0060035482610bb29190610e00565b610bbc9190610e2a565b949350505050565b5f80546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b80515f908190815b81811015610c6457848181518110610c3557610c35610e49565b01602001516001600160f81b0319165f03610c5557600483019250610c5c565b6010830192505b600101610c1b565b50506002540160400192915050565b5f60208284031215610c83575f80fd5b5035919050565b5f8060408385031215610c9b575f80fd5b50508035926020909101359150565b5f60208284031215610cba575f80fd5b81356001600160a01b0381168114610cd0575f80fd5b9392505050565b634e487b7160e01b5f52604160045260245ffd5b5f60208284031215610cfb575f80fd5b813567ffffffffffffffff80821115610d12575f80fd5b818401915084601f830112610d25575f80fd5b813581811115610d3757610d37610cd7565b604051601f8201601f19908116603f01168101908382118183101715610d5f57610d5f610cd7565b81604052828152876020848701011115610d77575f80fd5b826020860160208301375f928101602001929092525095945050505050565b60208082526017908201527f63616c6c6572206973206e6f7420746865206f776e6572000000000000000000604082015260600190565b5f60208284031215610ddd575f80fd5b81518015158114610cd0575f80fd5b634e487b7160e01b5f52601160045260245ffd5b808202811582820484141761058d5761058d610dec565b8082018082111561058d5761058d610dec565b5f82610e4457634e487b7160e01b5f52601260045260245ffd5b500490565b634e487b7160e01b5f52603260045260245ffdfea164736f6c6343000818000a", - "nonce": "0x0", - "storage": { - "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb92266", - "0x0000000000000000000000000000000000000000000000000000000000000008": "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000009": "0x000000000000000000000000000000000000000000000000000000003b9aca00", - "0x000000000000000000000000000000000000000000000000000000000000000a": "0x000000000000000000000000000000000000000000000000000000003b9aca00", - "0x000000000000000000000000000000000000000000000000000000000000000b": "0x0000000000000000000000000000000000000000000000000000000000000001" - } - }, - "0x5300000000000000000000000000000000000003": { - "balance": "0x0", - "code": "0x608060405234801561000f575f80fd5b5060043610610055575f3560e01c8063715018a61461005957806379586dd7146100635780638da5cb5b14610076578063efc78401146100a5578063f2fde38b146100e0575b5f80fd5b6100616100f3565b005b61006161007136600461033a565b610130565b5f54610088906001600160a01b031681565b6040516001600160a01b0390911681526020015b60405180910390f35b6100d06100b336600461040c565b6001600160a01b03165f9081526001602052604090205460ff1690565b604051901515815260200161009c565b6100616100ee36600461040c565b610222565b5f546001600160a01b031633146101255760405162461bcd60e51b815260040161011c9061042c565b60405180910390fd5b61012e5f6102ad565b565b5f546001600160a01b031633146101595760405162461bcd60e51b815260040161011c9061042c565b5f5b825181101561021d578160015f85848151811061017a5761017a610463565b60200260200101516001600160a01b03166001600160a01b031681526020019081526020015f205f6101000a81548160ff0219169083151502179055508281815181106101c9576101c9610463565b60200260200101516001600160a01b03167f8daaf060c3306c38e068a75c054bf96ecd85a3db1252712c4d93632744c42e0d8360405161020d911515815260200190565b60405180910390a260010161015b565b505050565b5f546001600160a01b0316331461024b5760405162461bcd60e51b815260040161011c9061042c565b6001600160a01b0381166102a15760405162461bcd60e51b815260206004820152601d60248201527f6e6577206f776e657220697320746865207a65726f2061646472657373000000604482015260640161011c565b6102aa816102ad565b50565b5f80546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b634e487b7160e01b5f52604160045260245ffd5b80356001600160a01b0381168114610326575f80fd5b919050565b80358015158114610326575f80fd5b5f806040838503121561034b575f80fd5b823567ffffffffffffffff80821115610362575f80fd5b818501915085601f830112610375575f80fd5b8135602082821115610389576103896102fc565b8160051b604051601f19603f830116810181811086821117156103ae576103ae6102fc565b6040529283528183019350848101820192898411156103cb575f80fd5b948201945b838610156103f0576103e186610310565b855294820194938201936103d0565b96506103ff905087820161032b565b9450505050509250929050565b5f6020828403121561041c575f80fd5b61042582610310565b9392505050565b60208082526017908201527f63616c6c6572206973206e6f7420746865206f776e6572000000000000000000604082015260600190565b634e487b7160e01b5f52603260045260245ffdfea164736f6c6343000818000a", - "nonce": "0x0", - "storage": { - "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb92266" - } - }, - "0x5300000000000000000000000000000000000005": { - "balance": "0x0", - "code": "0x6080604052600436106100a8575f3560e01c806384411d651161006257806384411d651461017a5780638da5cb5b1461018f5780639e7adc79146101ad578063f2fde38b146101cc578063feec756c146101eb578063ff4f35461461020a575f80fd5b80632e1a7d4d146100b35780633cb747bf146100d45780633ccfd60b14610110578063457e1a491461012457806366d003ac14610147578063715018a614610166575f80fd5b366100af57005b5f80fd5b3480156100be575f80fd5b506100d26100cd366004610663565b610229565b005b3480156100df575f80fd5b506002546100f3906001600160a01b031681565b6040516001600160a01b0390911681526020015b60405180910390f35b34801561011b575f80fd5b506100d26103e9565b34801561012f575f80fd5b5061013960015481565b604051908152602001610107565b348015610152575f80fd5b506003546100f3906001600160a01b031681565b348015610171575f80fd5b506100d26103f6565b348015610185575f80fd5b5061013960045481565b34801561019a575f80fd5b505f546100f3906001600160a01b031681565b3480156101b8575f80fd5b506100d26101c736600461067a565b61042a565b3480156101d7575f80fd5b506100d26101e636600461067a565b6104a4565b3480156101f6575f80fd5b506100d261020536600461067a565b61052c565b348015610215575f80fd5b506100d2610224366004610663565b6105a6565b6001548110156102b95760405162461bcd60e51b815260206004820152604a60248201527f4665655661756c743a207769746864726177616c20616d6f756e74206d75737460448201527f2062652067726561746572207468616e206d696e696d756d20776974686472616064820152691dd85b08185b5bdd5b9d60b21b608482015260a4015b60405180910390fd5b478082111561031d5760405162461bcd60e51b815260206004820152602a60248201527f4665655661756c743a20696e73756666696369656e742062616c616e636520746044820152696f20776974686472617760b01b60648201526084016102b0565b6004805483019055600354604080518481526001600160a01b0390921660208301523382820152517fc8a211cc64b6ed1b50595a9fcb1932b6d1e5a6e8ef15b60e5b1f988ea9086bba9181900360600190a1600254600354604080516020810182525f808252915163b2267a7b60e01b81526001600160a01b039485169463b2267a7b9488946103b79491909216928592906004016106a7565b5f604051808303818588803b1580156103ce575f80fd5b505af11580156103e0573d5f803e3d5ffd5b50505050505050565b476103f381610229565b50565b5f546001600160a01b0316331461041f5760405162461bcd60e51b81526004016102b090610711565b6104285f610614565b565b5f546001600160a01b031633146104535760405162461bcd60e51b81526004016102b090610711565b600280546001600160a01b038381166001600160a01b0319831681179093556040519116919082907f1c928c417a10a21c3cddad148c5dba5d710e4b1442d6d8a36de345935ad84612905f90a35050565b5f546001600160a01b031633146104cd5760405162461bcd60e51b81526004016102b090610711565b6001600160a01b0381166105235760405162461bcd60e51b815260206004820152601d60248201527f6e6577206f776e657220697320746865207a65726f206164647265737300000060448201526064016102b0565b6103f381610614565b5f546001600160a01b031633146105555760405162461bcd60e51b81526004016102b090610711565b600380546001600160a01b038381166001600160a01b0319831681179093556040519116919082907f7e1e96961a397c8aa26162fe259cc837afc95e33aad4945ddc61c18dabb7a6ad905f90a35050565b5f546001600160a01b031633146105cf5760405162461bcd60e51b81526004016102b090610711565b600180549082905560408051828152602081018490527f0d3c80219fe57713b9f9c83d1e51426792d0c14d8e330e65b102571816140965910160405180910390a15050565b5f80546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b5f60208284031215610673575f80fd5b5035919050565b5f6020828403121561068a575f80fd5b81356001600160a01b03811681146106a0575f80fd5b9392505050565b60018060a01b03851681525f60208560208401526080604084015284518060808501525f5b818110156106e85786810183015185820160a0015282016106cc565b505f60a0828601015260a0601f19601f8301168501019250505082606083015295945050505050565b60208082526017908201527f63616c6c6572206973206e6f7420746865206f776e657200000000000000000060408201526060019056fea164736f6c6343000818000a", - "nonce": "0x0", - "storage": { - "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb92266", - "0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000000000000000000000000000000de0b6b3a7640000", - "0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000df45f4790e20509959fccc0d09245b216abdaa37", - "0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000f39fd6e51aad88f6f4ce6ab8827279cfffb92266" - } - }, - "0xdf45f4790e20509959fccc0d09245b216abdaa37": { - "balance": "0x7ffffffffffffffffffffffffffffffffffffffffffffff21f494c589c0000", - "code": "0x", - "nonce": "0x0", - "storage": { - "0x360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc": "0x0000000000000000000000000000000000000000000000000000000000000000" - } - }, - "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266": { - "balance": "0xde0b6b3a7640000", - "code": "0x", - "nonce": "0x1", - "storage": {} - }, - "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266": { - "balance": "0xD3C21BCECCEDA1000000" - }, - "0x70997970C51812dc3A010C7d01b50e0d17dc79C8": { - "balance": "0xD3C21BCECCEDA1000000" - }, - "0x3C44CdDdB6a900fa2b585dd299e03d12FA4293BC": { - "balance": "0xD3C21BCECCEDA1000000" - }, - "0x90F79bf6EB2c4f870365E785982E1f101E93b906": { - "balance": "0xD3C21BCECCEDA1000000" - }, - "0x15d34AAf54267DB7D7c367839AAf71A00a2C6A65": { - "balance": "0xD3C21BCECCEDA1000000" - }, - "0x9965507D1a55bcC2695C58ba16FB37d819B0A4dc": { - "balance": "0xD3C21BCECCEDA1000000" - }, - "0x976EA74026E726554dB657fA54763abd0C3a0aa9": { - "balance": "0xD3C21BCECCEDA1000000" - }, - "0x14dC79964da2C08b23698B3D3cc7Ca32193d9955": { - "balance": "0xD3C21BCECCEDA1000000" - }, - "0x23618e81E3f5cdF7f54C3d65f7FBc0aBf5B21E8f": { - "balance": "0xD3C21BCECCEDA1000000" - }, - "0xa0Ee7A142d267C1f36714E4a8F75612F20a79720": { - "balance": "0xD3C21BCECCEDA1000000" - }, - "0xBcd4042DE499D14e55001CcbB24a551F3b954096": { - "balance": "0xD3C21BCECCEDA1000000" - }, - "0x71bE63f3384f5fb98995898A86B02Fb2426c5788": { - "balance": "0xD3C21BCECCEDA1000000" - }, - "0xFABB0ac9d68B0B445fB7357272Ff202C5651694a": { - "balance": "0xD3C21BCECCEDA1000000" - }, - "0x1CBd3b2770909D4e10f157cABC84C7264073C9Ec": { - "balance": "0xD3C21BCECCEDA1000000" - }, - "0xdF3e18d64BC6A983f673Ab319CCaE4f1a57C7097": { - "balance": "0xD3C21BCECCEDA1000000" - }, - "0xcd3B766CCDd6AE721141F452C550Ca635964ce71": { - "balance": "0xD3C21BCECCEDA1000000" - }, - "0x2546BcD3c84621e976D8185a91A922aE77ECEc30": { - "balance": "0xD3C21BCECCEDA1000000" - }, - "0xbDA5747bFD65F08deb54cb465eB87D40e51B197E": { - "balance": "0xD3C21BCECCEDA1000000" - }, - "0xdD2FD4581271e230360230F9337D5c0430Bf44C0": { - "balance": "0xD3C21BCECCEDA1000000" - }, - "0x8626f6940E2eb28930eFb4CeF49B2d1F2C9C1199": { - "balance": "0xD3C21BCECCEDA1000000" - } - } - } \ No newline at end of file From 26b97e013d33b6a69d1bfbe6ddb60b3c2d2c1ae7 Mon Sep 17 00:00:00 2001 From: frisitano Date: Thu, 15 Jan 2026 02:33:37 +0000 Subject: [PATCH 5/9] updates --- Cargo.lock | 1 + book/src/debug-toolkit.md | 486 ++++++++-------------- crates/node/Cargo.toml | 2 + crates/node/src/bin/scroll_debug.rs | 30 +- crates/node/src/debug_toolkit/cli.rs | 60 +-- crates/node/src/debug_toolkit/commands.rs | 75 ++-- crates/node/src/debug_toolkit/repl.rs | 292 +++++++++++-- crates/node/src/test_utils/fixture.rs | 43 +- 8 files changed, 563 insertions(+), 426 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c8f0502e..d87559fb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11113,6 +11113,7 @@ dependencies = [ "alloy-signer", "alloy-signer-aws", "alloy-signer-local", + "alloy-sol-types", "alloy-transport", "async-trait", "auto_impl", diff --git a/book/src/debug-toolkit.md b/book/src/debug-toolkit.md index 50f17f61..ca98f6c4 100644 --- a/book/src/debug-toolkit.md +++ b/book/src/debug-toolkit.md @@ -1,54 +1,76 @@ # Debug Toolkit -The Debug Toolkit is an interactive REPL (Read-Eval-Print Loop) for debugging, development, and hackathon scenarios. It provides a command-line interface to interact with rollup nodes, inspect chain state, inject transactions, and run custom actions. +The Debug Toolkit is an interactive REPL (Read-Eval-Print Loop) for debugging, development, and hackathon scenarios. It allows you to spin up local follower nodes that connect to a remote sequencer and L1, run tests, execute scripts, and inspect chain state. ## Getting Started +### Source Code + +The debug toolkit is available on the `feat/debug-toolkit` branch: + +**Repository:** [https://github.com/scroll-tech/rollup-node/tree/feat/debug-toolkit](https://github.com/scroll-tech/rollup-node/tree/feat/debug-toolkit) + +```bash +git clone https://github.com/scroll-tech/rollup-node.git +cd rollup-node +git checkout feat/debug-toolkit +``` + ### Building -The debug toolkit is built with the `debug-toolkit` feature flag: +Build with the `debug-toolkit` feature flag: ```bash cargo build -p rollup-node --features debug-toolkit --release ``` -### Running +## Connecting to a Remote Network -Launch the debug REPL using the `scroll-debug` binary: +The primary use case is connecting local follower nodes to a remote sequencer and L1. This allows you to run tests and scripts against a live network. -```bash -# Basic usage with dev chain and sequencer mode -cargo run --features debug-toolkit --bin scroll-debug -- --chain dev --sequencer +### Network Connection Info -# With multiple follower nodes -cargo run --features debug-toolkit --bin scroll-debug -- --chain dev --sequencer --followers 2 +``` +L1 RPC: http://ec2-54-167-214-30.compute-1.amazonaws.com:8545 +L1 Private Key: 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 +Sequencer HTTP: http://ec2-54-175-126-206.compute-1.amazonaws.com:8545 +Sequencer Enode: enode://3322bb29bba1f30f3bb40e816779f1be8ab3c14a5d14ff6c76d0585a63bdcc4ba25008be138780dafd03cb3e3ae4546da9a566b4ff9f1d237fa5d3d79bfdd219@54.175.126.206:30303 +Signer: 0xb674ff99cca262c99d3eab5b32796a99188543da +Genesis: tests/l2reth-genesis-e2e.json +``` -# With custom block time (auto-build every 1000ms) -cargo run --features debug-toolkit --bin scroll-debug -- --chain dev --sequencer --block-time 1000 +### Connect to Remote Network -# With a real L1 endpoint -cargo run --features debug-toolkit --bin scroll-debug -- --chain dev --sequencer --l1-url https://eth.llamarpc.com +```bash +cargo run --features debug-toolkit --bin scroll-debug -- \ + --followers 2 \ + --chain tests/l2reth-genesis-e2e.json \ + --bootnodes enode://3322bb29bba1f30f3bb40e816779f1be8ab3c14a5d14ff6c76d0585a63bdcc4ba25008be138780dafd03cb3e3ae4546da9a566b4ff9f1d237fa5d3d79bfdd219@54.175.126.206:30303 \ + --l1-url http://ec2-54-167-214-30.compute-1.amazonaws.com:8545 \ + --valid-signer 0xb674ff99cca262c99d3eab5b32796a99188543da ``` -### CLI Options +This creates local follower nodes that: +- Connect to the remote sequencer via P2P (`--bootnodes`) +- Sync L1 state from the remote L1 RPC (`--l1-url`) +- Validate blocks using the network's authorized signer (`--valid-signer`) +- Use the matching genesis configuration (`--chain`) + +### CLI Options Explained | Option | Description | |--------|-------------| -| `--chain ` | Chain to use: `dev`, `scroll-sepolia`, `scroll-mainnet`, or path to genesis JSON file (default: `dev`) | -| `--sequencer` | Enable sequencer mode | -| `--followers ` | Number of follower nodes to create (default: 0) | -| `--block-time ` | Block time in milliseconds (default: 0 = manual block building only) | -| `--allow-empty-blocks` | Allow building empty blocks (default: true) | -| `--l1-message-delay ` | L1 message inclusion delay in blocks (default: 0 = immediate) | -| `--l1-url ` | L1 RPC endpoint URL (optional, uses mock L1 if not specified) | - -Run `cargo run --features debug-toolkit --bin scroll-debug -- --help` to see all available options. - -## Quick Start: Multi-Node Environment with Mock L1 +| `--chain ` | Genesis configuration: `dev`, `scroll-sepolia`, `scroll-mainnet`, or path to JSON file | +| `--sequencer` | Enable local sequencer mode | +| `--followers ` | Number of local follower nodes to spin up (can be any number) | +| `--bootnodes ` | Remote sequencer enode URL to connect to | +| `--l1-url ` | Remote L1 RPC endpoint | +| `--valid-signer ` | Authorized block signer address for consensus validation | +| `--log-file ` | Path to log file (default: `./scroll-debug-.log`) | -This walkthrough demonstrates how to spin up a complete local environment with a mock L1, one sequencer, and two follower nodes. +## Local-Only Mode -### Starting the Environment +You can also run a fully local environment with a mock L1 and local sequencer for offline development: ```bash cargo run --features debug-toolkit --bin scroll-debug -- \ @@ -58,119 +80,47 @@ cargo run --features debug-toolkit --bin scroll-debug -- \ ``` This creates: -- **Node 0**: Sequencer (produces blocks) -- **Node 1**: Follower (receives blocks via P2P) -- **Node 2**: Follower (receives blocks via P2P) +- **Node 0**: Local sequencer (produces blocks) +- **Node 1-N**: Local followers (receive blocks via P2P) -### Understanding Mock L1 - -When no `--l1-url` is specified, the toolkit uses a **mock L1**. The mock L1 starts in an "unsynced" state, which means the sequencer won't produce blocks until you explicitly sync it. - -If you try to build a block before syncing L1: - -``` -scroll-debug [seq:0]> build -Error: L1 is not synced -Hint: Run 'l1 sync' to mark the mock L1 as synced before building blocks -``` - -### Step-by-Step Walkthrough - -**1. Check initial status:** - -``` -scroll-debug [seq:0]> status -=== Node 0 (Sequencer) === -Node: - Database: /tmp/.tmpXYZ/db/scroll.db - HTTP RPC: http://127.0.0.1:62491 -L2: - Head: #0 (0x1234abcd...) - Safe: #0 (0x1234abcd...) - Finalized: #0 (0x1234abcd...) - Synced: false -L1: - Head: #0 - Finalized: #0 - Processed: #0 - Synced: false -``` - -Note that L1 `Synced` is `false`. - -**2. Sync the mock L1:** +With mock L1, you must manually sync before building blocks: ``` scroll-debug [seq:0]> l1 sync -L1 synced event sent to all nodes -``` +L1 synced event sent -**3. Build your first block:** - -``` scroll-debug [seq:0]> build Block build triggered! - [EVENT] BlockSequenced { block: 1, hash: 0xabcd1234... } ``` -**4. Send a transaction and build another block:** +## Using the Network Handle -``` -scroll-debug [seq:0]> tx send 0x742d35Cc6634C0532925a3b844Bc9e7595f0bEb2 1000000000000000000 -Transaction sent! - Hash: 0x5678... - From: 0x1234... - To: 0x742d... - Value: 1000000000000000000 wei -Note: Run 'build' to include in a block +The `TestFixture` provides access to network handles for programmatic control. This is useful for writing custom actions and tests: -scroll-debug [seq:0]> build -Block build triggered! - [EVENT] BlockSequenced { block: 2, hash: 0xefgh5678... } -``` - -**5. Check that followers received the blocks:** - -``` -scroll-debug [seq:0]> node 1 -Switched to node 1 (Follower) +```rust +use rollup_node::test_utils::TestFixture; -scroll-debug [fol:1]> status -=== Node 1 (Follower) === -Node: - Database: /tmp/.tmpABC/db/scroll.db - HTTP RPC: http://127.0.0.1:62502 -L2: - Head: #2 (0xefgh5678...) - Safe: #0 (0x1234abcd...) - Finalized: #0 (0x1234abcd...) - Synced: false -... -``` +async fn example(fixture: &TestFixture) -> eyre::Result<()> { + // Access a node's network handle + let node = &fixture.nodes[0]; + let network_handle = node.rollup_manager_handle.get_network_handle().await?; -The follower's head is at block #2, showing it received the blocks via P2P. + // Get local node info + let local_record = network_handle.local_node_record(); + println!("Local enode: {}", local_record); -**6. Build multiple blocks quickly:** + // Access the inner network handle for P2P operations + let inner = network_handle.inner(); -``` -scroll-debug [seq:0]> run build-blocks 5 -Building 5 blocks (timeout: 5000ms per block)... - Block 1 triggered, waiting... sequenced at #3 - Block 2 triggered, waiting... sequenced at #4 - Block 3 triggered, waiting... sequenced at #5 - Block 4 triggered, waiting... sequenced at #6 - Block 5 triggered, waiting... sequenced at #7 -Done! Head is now at block #7 -``` + // Get connected peers + let peers = inner.get_all_peers().await?; + println!("Connected to {} peers", peers.len()); -**7. View all nodes:** + // Add a peer + inner.add_peer(peer_id, socket_addr); -``` -scroll-debug [seq:0]> nodes -Nodes: - [0] Sequencer * - [1] Follower - [2] Follower + Ok(()) +} ``` ## Commands @@ -187,8 +137,11 @@ Nodes: **Example:** ``` -scroll-debug [seq:0]> status -=== Node 0 (Sequencer) === +scroll-debug [fol:0]> status +=== Node 0 (Follower) === +Node: + Database: /tmp/.tmpXYZ/db/scroll.db + HTTP RPC: http://127.0.0.1:62491 L2: Head: #42 (0x1234abcd...) Safe: #40 (0x5678efgh...) @@ -203,23 +156,20 @@ L1: ### L1 Commands -These commands allow you to simulate L1 events: +These commands allow you to simulate L1 events (useful in local mode with mock L1): | Command | Description | |---------|-------------| | `l1 status` | Show L1 sync state | | `l1 sync` | Inject L1 synced event | | `l1 block ` | Inject new L1 block notification | -| `l1 message ` | Inject an L1 message | -| `l1 commit ` | Inject batch commit | -| `l1 finalize ` | Inject batch finalization | | `l1 reorg ` | Inject L1 reorg | ### Block & Transaction | Command | Description | |---------|-------------| -| `build` | Build a new block (sequencer mode) | +| `build` | Build a new block (local sequencer mode only) | | `tx send [idx]` | Send ETH transfer (value in wei, idx = wallet index) | | `tx pending` | List pending transactions | | `tx inject ` | Inject raw transaction | @@ -233,19 +183,6 @@ Transaction sent! From: 0x1234... To: 0x742d... Value: 1000000000000000000 wei -Note: Run 'build' to include in a block (sequencer mode) - -scroll-debug [seq:0]> build -Block build triggered! -``` - -**Viewing pending transactions:** - -``` -scroll-debug [seq:0]> tx pending -Pending Transactions (2): - [0] hash=0x1234abcd5678... from=0x742d35Cc... nonce=5 gas_price=1000000000 - [1] hash=0xabcdef123456... from=0x742d35Cc... nonce=6 gas_price=1000000000 ``` ### Wallet @@ -255,10 +192,10 @@ Pending Transactions (2): | `wallet` | Show wallet address, balance, and nonce | | `wallet gen` | Generate and list all available wallets | -The toolkit includes pre-funded test wallets. Use `wallet gen` to see all available wallets, then reference them by index in `tx send`: +The toolkit includes pre-funded test wallets: ``` -scroll-debug [seq:0]> wallet gen +scroll-debug [fol:0]> wallet gen Generated Wallets (10): Chain ID: 222222 @@ -268,9 +205,6 @@ Generated Wallets (10): [1] 0xabcdef1234567890... Balance: 1000000000000000000000 wei (1000.000000 ETH) ... - -scroll-debug [seq:0]> tx send 0x742d... 1000000 2 -# Sends from wallet index 2 ``` ### Network @@ -280,30 +214,34 @@ scroll-debug [seq:0]> tx send 0x742d... 1000000 2 | `peers` | List connected peers and show local enode | | `peers connect ` | Connect to a peer (enode://...) | +**Example:** + +``` +scroll-debug [fol:0]> peers +Local Node: + Peer ID: 0x1234... + Enode: enode://abcd...@127.0.0.1:30303 + +Connected Peers (1): + 0x3322bb29... + Address: 54.175.126.206:30303 + Client: scroll-reth/v1.0.0 +``` + ### Events -The REPL streams chain events in real-time as they occur: +The REPL streams chain events in real-time: | Command | Description | |---------|-------------| | `events on` | Enable background event stream | | `events off` | Disable background event stream | -| `events [count]` | Stream next N events (default: 10) | | `events filter ` | Filter events by type (e.g., `Block*`, `L1*`) | | `events history [n]` | Show last N events (default: 20) | -**Example with events enabled:** - -``` -scroll-debug [seq:0]> build - [EVENT] BlockSequenced { block: 1, hash: 0xabcd... } - [EVENT] ChainExtended { block: 1 } -Block build triggered! -``` - ### Custom Actions -Run pre-built or custom actions with full access to the test fixture: +Run pre-built or custom actions: | Command | Description | |---------|-------------| @@ -318,22 +256,9 @@ Run pre-built or custom actions with full access to the test fixture: | `stress-test [build_every]` | Send multiple transactions and build blocks | | `sync-all` | Send L1 sync event to all nodes | -**Example:** - -``` -scroll-debug [seq:0]> run build-blocks 10 100 -Running action: build-blocks - -Building 10 blocks with 100ms delay... - Block 1 triggered - Block 2 triggered - ... -Done! Head is now at block #10 -``` - ### Node Management -When running with multiple nodes (e.g., `--followers 2`): +Switch between nodes when running multiple followers: | Command | Description | |---------|-------------| @@ -341,17 +266,14 @@ When running with multiple nodes (e.g., `--followers 2`): | `nodes` | List all nodes in fixture | ``` -scroll-debug [seq:0]> nodes +scroll-debug [fol:0]> nodes Nodes: - [0] Sequencer * + [0] Follower * [1] Follower [2] Follower -scroll-debug [seq:0]> node 1 +scroll-debug [fol:0]> node 1 Switched to node 1 (Follower) - -scroll-debug [fol:1]> status -... ``` ### Database @@ -360,10 +282,8 @@ scroll-debug [fol:1]> status |---------|-------------| | `db` | Show database path and access command | -The `db` command shows the SQLite database path and provides a command to access it from another terminal: - ``` -scroll-debug [seq:0]> db +scroll-debug [fol:0]> db Database Info: Path: /path/to/datadir/db/scroll.db @@ -377,7 +297,22 @@ Useful queries: SELECT * FROM l2_block ORDER BY number DESC LIMIT 10; ``` -The database path is also shown in the `status` command output. +### Logs + +| Command | Description | +|---------|-------------| +| `logs` | Show log file path and tail command | + +Tracing logs are written to a file to keep the REPL display clean: + +``` +scroll-debug [fol:0]> logs +Log File: + Path: ./scroll-debug-12345.log + +View logs in another terminal: + tail -f ./scroll-debug-12345.log +``` ### Other @@ -388,15 +323,7 @@ The database path is also shown in the `status` command output. ## Creating Custom Actions -You can create custom actions by implementing the `Action` trait. Actions have full access to the `TestFixture`, allowing you to: - -- Access all nodes and their RPC interfaces -- Send transactions from test wallets -- Trigger block building -- Inject L1 events -- Query chain state - -### Implementing an Action +You can create custom actions by implementing the `Action` trait. Actions have full access to the `TestFixture`: ```rust use rollup_node::debug_toolkit::actions::{Action, ActionRegistry}; @@ -427,24 +354,20 @@ impl Action for MyCustomAction { // Access nodes println!("Fixture has {} nodes", fixture.nodes.len()); + // Access network handle + let node = &fixture.nodes[0]; + let network_handle = node.rollup_manager_handle.get_network_handle().await?; + println!("Connected peers: {}", network_handle.inner().num_connected_peers()); + // Access wallet let wallet = fixture.wallet.lock().await; println!("Wallet address: {:?}", wallet.inner.address()); drop(wallet); - // Access specific node - let node = &fixture.nodes[0]; + // Query chain state let status = node.rollup_manager_handle.status().await?; println!("Head block: {}", status.l2.fcs.head_block_info().number); - // Trigger block building (sequencer only) - if node.is_sequencer() { - node.rollup_manager_handle.build_block(); - } - - // Inject L1 events - fixture.l1().sync().await?; - Ok(()) } } @@ -472,134 +395,79 @@ impl ActionRegistry { } ``` -Or register programmatically before running the REPL: - -```rust -let fixture = TestFixture::builder() - .with_chain("dev") - .sequencer() - .build() - .await?; - -let mut repl = DebugRepl::new(fixture); -repl.action_registry_mut().register(Box::new(MyCustomAction)); -repl.run().await?; -``` - -## Use Cases - -### Hackathon Development +## Useful Cast Commands -The debug toolkit is ideal for hackathons where you need to: +Use [Foundry's `cast`](https://book.getfoundry.sh/cast/) to interact with the network. The `status` command in the REPL shows the HTTP RPC endpoint for your local nodes. -- Quickly spin up a local Scroll environment -- Test smart contract interactions -- Debug transaction flows -- Simulate L1-L2 message passing +### Check Block Status -### Integration Testing - -Use custom actions to create reproducible test scenarios: - -```rust -struct IntegrationTestAction; +```bash +# Check latest L1 block +cast block latest --rpc-url http://ec2-54-167-214-30.compute-1.amazonaws.com:8545 -#[async_trait] -impl Action for IntegrationTestAction { - fn name(&self) -> &'static str { "integration-test" } - fn description(&self) -> &'static str { "Run integration test suite" } +# Check latest L2 block (sequencer) +cast block latest --rpc-url http://ec2-54-175-126-206.compute-1.amazonaws.com:8545 +``` - async fn execute(&self, fixture: &mut TestFixture, _args: &[String]) -> eyre::Result<()> { - // 1. Sync L1 - fixture.l1().sync().await?; +### Check Sync Status - // 2. Send some transactions - // ... +```bash +# L2 sequencer sync status +cast rpc rollupNode_status --rpc-url http://ec2-54-175-126-206.compute-1.amazonaws.com:8545 | jq +``` - // 3. Build blocks - let sequencer = fixture.nodes.iter().find(|n| n.is_sequencer()).unwrap(); - sequencer.rollup_manager_handle.build_block(); +### Check and Manage Peers - // 4. Verify state - // ... +```bash +# Check connected peers +cast rpc admin_peers --rpc-url http://ec2-54-175-126-206.compute-1.amazonaws.com:8545 - Ok(()) - } -} +# Get node info (enode URL) +cast rpc admin_nodeInfo --rpc-url http://ec2-54-175-126-206.compute-1.amazonaws.com:8545 | jq -r '.enode' ``` -### Debugging - -Inspect chain state interactively: +### Enable Sequencing -``` -scroll-debug [seq:0]> block 42 -Block #42 - Hash: 0xabcd... - Parent: 0x1234... - Timestamp: 1705123456 - Gas Used: 21000 - Gas Limit: 20000000 - Txs: 3 - [0] hash=0x1111... - [1] hash=0x2222... - [2] hash=0x3333... - -scroll-debug [seq:0]> fcs -Forkchoice State: - Head: - Number: 42 - Hash: 0xabcd... - Safe: - Number: 40 - Hash: 0x5678... - Finalized: - Number: 35 - Hash: 0x9abc... +```bash +cast rpc rollupNodeAdmin_enableAutomaticSequencing --rpc-url http://ec2-54-175-126-206.compute-1.amazonaws.com:8545 ``` -## External Tools +### Send Transactions -### Using Cast +```bash +# Get wallet address from private key +cast wallet address --private-key 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 -The `status` command shows the HTTP RPC endpoint for each node. You can use [Foundry's `cast`](https://book.getfoundry.sh/cast/) to interact with the node from another terminal: +# Check balance on L2 +cast balance 0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266 --ether --rpc-url http://ec2-54-175-126-206.compute-1.amazonaws.com:8545 -``` -scroll-debug [seq:0]> status -=== Node 0 (Sequencer) === -Node: - Database: /tmp/.tmpXYZ/db/scroll.db - HTTP RPC: http://127.0.0.1:62491 -... +# Send L2 transaction +cast send 0x0000000000000000000000000000000000000002 \ + --rpc-url http://ec2-54-175-126-206.compute-1.amazonaws.com:8545 \ + --value 0.00001ether \ + --private-key 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 ``` -Then in another terminal, use `cast` with the HTTP RPC URL: +### L1 to L2 Bridge (Send L1 Message) ```bash -# Get the current block number -cast block-number --rpc-url http://127.0.0.1:62491 - -# Get block details -cast block latest --rpc-url http://127.0.0.1:62491 - -# Get an account balance -cast balance 0x742d35Cc6634C0532925a3b844Bc9e7595f0bEb2 --rpc-url http://127.0.0.1:62491 - -# Send a transaction (using a test private key) -cast send 0x742d35Cc6634C0532925a3b844Bc9e7595f0bEb2 \ - --value 1ether \ +# Send message from L1 to L2 via the messenger contract +cast send --rpc-url http://ec2-54-167-214-30.compute-1.amazonaws.com:8545 \ --private-key 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 \ - --rpc-url http://127.0.0.1:62491 + --legacy --gas-price 0.1gwei --gas-limit 200000 --value 0.001ether \ + "0x8A791620dd6260079BF849Dc5567aDC3F2FdC318" \ + "sendMessage(address _to, uint256 _value, bytes memory _message, uint256 _gasLimit)" \ + 0x0000000000000000000000000000000000000002 0x1 0x 200000 -# Call a contract -cast call 0xContractAddress "balanceOf(address)" 0xUserAddress --rpc-url http://127.0.0.1:62491 - -# Get transaction receipt -cast receipt 0xTransactionHash --rpc-url http://127.0.0.1:62491 +# Check L1 message queue index +cast call --rpc-url http://ec2-54-167-214-30.compute-1.amazonaws.com:8545 \ + "0xDc64a140Aa3E981100a9becA4E685f962f0cF6C9" \ + "nextCrossDomainMessageIndex()(uint256)" ``` -This is useful for: -- Testing smart contract deployments and interactions -- Debugging transaction behavior -- Scripting complex test scenarios -- Using familiar Ethereum tooling with your local rollup node +### Contract Addresses + +| Contract | Address | +|----------|---------| +| L1 Messenger | `0x8A791620dd6260079BF849Dc5567aDC3F2FdC318` | +| L1 Message Queue | `0xDc64a140Aa3E981100a9becA4E685f962f0cF6C9` | diff --git a/crates/node/Cargo.toml b/crates/node/Cargo.toml index 854bf2e8..947d8ad3 100644 --- a/crates/node/Cargo.toml +++ b/crates/node/Cargo.toml @@ -100,6 +100,7 @@ scroll-alloy-rpc-types-engine = { workspace = true, optional = true } scroll-alloy-rpc-types.workspace = true # debug-toolkit dependencies +alloy-sol-types = { workspace = true, optional = true } colored = { version = "3.0", optional = true } crossterm = { version = "0.28", optional = true } glob = { version = "0.3", optional = true } @@ -155,6 +156,7 @@ alloy-rpc-types-eth = { workspace = true } js-tracer = ["reth-scroll-node/js-tracer", "reth-scroll-rpc/js-tracer"] debug-toolkit = [ "test-utils", + "dep:alloy-sol-types", "dep:colored", "dep:crossterm", "dep:glob", diff --git a/crates/node/src/bin/scroll_debug.rs b/crates/node/src/bin/scroll_debug.rs index 085afd7a..9fa89c10 100644 --- a/crates/node/src/bin/scroll_debug.rs +++ b/crates/node/src/bin/scroll_debug.rs @@ -22,17 +22,39 @@ fn main() -> eyre::Result<()> { use clap::Parser; use rollup_node::debug_toolkit::DebugArgs; + use std::fs::File; + use tracing_subscriber::{fmt, layer::SubscriberExt, util::SubscriberInitExt, EnvFilter}; - // Initialize tracing + // Parse args first so we can use log_file option + let args = DebugArgs::parse(); + + // Set default log level if std::env::var("RUST_LOG").is_err() { std::env::set_var("RUST_LOG", "info"); } - tracing_subscriber::fmt::init(); + + // Determine log file path (default to current directory) + let log_path = args.log_file.clone().unwrap_or_else(|| { + std::env::current_dir() + .unwrap_or_else(|_| std::path::PathBuf::from(".")) + .join(format!("scroll-debug-{}.log", std::process::id())) + }); + + // Initialize tracing to write to file + let file = File::create(&log_path)?; + tracing_subscriber::registry() + .with(EnvFilter::from_default_env()) + .with(fmt::layer().with_writer(file).with_ansi(false)) + .init(); + + eprintln!("Logs: {}", log_path.display()); + eprintln!("Tail: tail -f {}", log_path.display()); + eprintln!(); + eprintln!("Starting nodes (this may take a moment)..."); // Create tokio runtime and run tokio::runtime::Builder::new_multi_thread().enable_all().build()?.block_on(async { - let args = DebugArgs::parse(); - args.run().await + args.run(Some(log_path)).await }) } diff --git a/crates/node/src/debug_toolkit/cli.rs b/crates/node/src/debug_toolkit/cli.rs index 1d20d7ff..8e3601cb 100644 --- a/crates/node/src/debug_toolkit/cli.rs +++ b/crates/node/src/debug_toolkit/cli.rs @@ -1,7 +1,10 @@ //! CLI subcommand for the debug toolkit. -use crate::test_utils::TestFixtureBuilder; +use crate::{test_utils::TestFixtureBuilder, L1ProviderArgs}; use alloy_primitives::Address; +use alloy_provider::{layers::CacheLayer, ProviderBuilder}; +use alloy_rpc_client::RpcClient; +use alloy_transport::layers::RetryBackoffLayer; use clap::Parser; use reth_network_peers::TrustedPeer; use std::{path::PathBuf, str::FromStr}; @@ -26,18 +29,6 @@ pub struct DebugArgs { #[arg(long)] pub datadir: Option, - /// Block time in milliseconds (0 = manual block building only). - #[arg(long, default_value = "0")] - pub block_time: u64, - - /// Allow building empty blocks (default: true when sequencer is enabled). - #[arg(long, default_value = "true")] - pub allow_empty_blocks: bool, - - /// L1 message inclusion delay in blocks (0 = immediate). - #[arg(long, default_value = "0")] - pub l1_message_delay: u64, - /// L1 RPC endpoint URL (optional, uses mock L1 if not specified). #[arg(long)] pub l1_url: Option, @@ -49,11 +40,15 @@ pub struct DebugArgs { /// The valid signer address for the network. #[arg(long)] pub valid_signer: Option
, + + /// Path to log file. Defaults to ./scroll-debug-.log + #[arg(long)] + pub log_file: Option, } impl DebugArgs { /// Run the debug toolkit with these arguments. - pub async fn run(self) -> eyre::Result<()> { + pub async fn run(self, log_path: Option) -> eyre::Result<()> { use super::DebugRepl; // Build the fixture @@ -80,16 +75,30 @@ impl DebugArgs { builder.config_mut().test = false; } - // Apply sequencer settings - builder = builder - .block_time(self.block_time) - .allow_empty_blocks(self.allow_empty_blocks) - .with_l1_message_delay(self.l1_message_delay); - - // Apply L1 URL if provided + // Apply L1 URL if provided - build provider for REPL access if let Some(l1_url) = self.l1_url { - let config = builder.config_mut(); - config.l1_provider_args.url = Some(l1_url); + builder.config_mut().l1_provider_args.url = Some(l1_url.clone()); + + // Build the L1 provider with retry and cache layers + let L1ProviderArgs { + max_retries, + initial_backoff, + compute_units_per_second, + cache_max_items, + .. + } = L1ProviderArgs::default(); + + let client = RpcClient::builder() + .layer(RetryBackoffLayer::new( + max_retries, + initial_backoff, + compute_units_per_second, + )) + .http(l1_url); + let cache_layer = CacheLayer::new(cache_max_items); + let provider = ProviderBuilder::new().layer(cache_layer).connect_client(client); + + builder = builder.with_l1_provider(Box::new(provider)); } // Parse and apply bootnodes if provided @@ -112,6 +121,9 @@ impl DebugArgs { // Create and run REPL let mut repl = DebugRepl::new(fixture); + if let Some(path) = log_path { + repl.set_log_path(path); + } repl.run().await } } @@ -128,5 +140,5 @@ pub async fn main() -> eyre::Result<()> { // Parse arguments and run let args = DebugArgs::parse(); - args.run().await + args.run(None).await } diff --git a/crates/node/src/debug_toolkit/commands.rs b/crates/node/src/debug_toolkit/commands.rs index 7125159e..d69f99d6 100644 --- a/crates/node/src/debug_toolkit/commands.rs +++ b/crates/node/src/debug_toolkit/commands.rs @@ -9,6 +9,8 @@ use std::str::FromStr; pub enum Command { /// Show node status. Status, + /// Show detailed sync status. + SyncStatus, /// Show block details. Block(BlockArg), /// List blocks in range. @@ -40,6 +42,8 @@ pub enum Command { Nodes, /// Show database path and access command. Db, + /// Show log file path. + Logs, /// Show help. Help, /// Exit the REPL. @@ -80,14 +84,17 @@ pub enum L1Command { Sync, /// Inject new L1 block. Block(u64), - /// Inject L1 message (JSON). - Message(String), - /// Inject batch commit (JSON). - Commit(String), - /// Inject batch finalization. - Finalize(u64), /// Inject L1 reorg. Reorg(u64), + /// Show L1 message queue status. + Messages, + /// Send L1 message (bridge to L2). + Send { + /// Recipient address on L2. + to: Address, + /// Value to send. + value: U256, + }, } /// Transaction-related commands. @@ -133,8 +140,6 @@ pub enum EventsCommand { On, /// Disable background event stream. Off, - /// Stream next N events. - Stream(usize), /// Set event filter. Filter(Option), /// Show event history. @@ -155,6 +160,7 @@ impl Command { match cmd.as_str() { "status" => Self::Status, + "sync-status" | "syncstatus" => Self::SyncStatus, "block" => Self::parse_block(args), "blocks" => Self::parse_blocks(args), "fcs" | "forkchoice" => Self::Fcs, @@ -168,6 +174,7 @@ impl Command { "node" => Self::parse_node(args), "nodes" => Self::Nodes, "db" | "database" => Self::Db, + "logs" | "log" => Self::Logs, "help" | "?" => Self::Help, "exit" | "quit" | "q" => Self::Exit, _ => Self::Unknown(cmd), @@ -210,27 +217,6 @@ impl Command { Self::Unknown("l1 block requires a block number".to_string()) } } - "message" | "msg" => { - if subargs.is_empty() { - Self::Unknown("l1 message requires JSON data".to_string()) - } else { - Self::L1(L1Command::Message(subargs.join(" "))) - } - } - "commit" => { - if subargs.is_empty() { - Self::Unknown("l1 commit requires JSON data".to_string()) - } else { - Self::L1(L1Command::Commit(subargs.join(" "))) - } - } - "finalize" => { - if let Some(n) = subargs.first().and_then(|s| s.parse::().ok()) { - Self::L1(L1Command::Finalize(n)) - } else { - Self::Unknown("l1 finalize requires a batch index".to_string()) - } - } "reorg" => { if let Some(n) = subargs.first().and_then(|s| s.parse::().ok()) { Self::L1(L1Command::Reorg(n)) @@ -238,6 +224,16 @@ impl Command { Self::Unknown("l1 reorg requires a block number".to_string()) } } + "messages" | "msg" | "queue" => Self::L1(L1Command::Messages), + "send" => { + if subargs.len() < 2 { + return Self::Unknown("l1 send requires ".to_string()); + } + match (Address::from_str(subargs[0]), U256::from_str(subargs[1])) { + (Ok(to), Ok(value)) => Self::L1(L1Command::Send { to, value }), + _ => Self::Unknown("l1 send: invalid address or value".to_string()), + } + } _ => Self::Unknown(format!("l1 {}", subcmd)), } } @@ -303,7 +299,7 @@ impl Command { } fn parse_events(args: &[&str]) -> Self { - let subcmd = args.first().copied().unwrap_or("stream"); + let subcmd = args.first().copied().unwrap_or("history"); let subargs = if args.len() > 1 { &args[1..] } else { &[] }; match subcmd { @@ -317,14 +313,7 @@ impl Command { let count = subargs.first().and_then(|s| s.parse().ok()).unwrap_or(20); Self::Events(EventsCommand::History(count)) } - _ => { - // Try to parse as a number for stream count - if let Ok(count) = subcmd.parse::() { - Self::Events(EventsCommand::Stream(count)) - } else { - Self::Events(EventsCommand::Stream(10)) - } - } + _ => Self::Unknown("Unknown events command".to_string()), } } @@ -353,6 +342,7 @@ pub fn print_help() { println!(); println!("{}", "Status & Inspection:".underline()); println!(" status Show node status (head, safe, finalized, L1 state)"); + println!(" sync-status Show detailed sync status (L1/L2 sync state)"); println!(" block [n|latest] Display block details"); println!(" blocks List blocks in range"); println!(" fcs Show forkchoice state"); @@ -361,10 +351,9 @@ pub fn print_help() { println!(" l1 status Show L1 sync state"); println!(" l1 sync Inject L1 synced event"); println!(" l1 block Inject new L1 block notification"); - println!(" l1 message Inject an L1 message"); - println!(" l1 commit Inject batch commit"); - println!(" l1 finalize Inject batch finalization"); println!(" l1 reorg Inject L1 reorg"); + println!(" l1 messages Show L1 message queue info (requires --l1-url)"); + println!(" l1 send Show cast command for L1->L2 bridge transfer"); println!(); println!("{}", "Block & Transaction:".underline()); println!(" build Build a new block (sequencer mode)"); @@ -383,7 +372,6 @@ pub fn print_help() { println!("{}", "Events:".underline()); println!(" events on Enable background event stream"); println!(" events off Disable background event stream"); - println!(" events [count] Stream next N events (default: 10)"); println!(" events filter Filter events by type (e.g., Block*, L1*)"); println!(" events history [n] Show last N events (default: 20)"); println!(); @@ -398,6 +386,9 @@ pub fn print_help() { println!("{}", "Database:".underline()); println!(" db Show database path and access command"); println!(); + println!("{}", "Logs:".underline()); + println!(" logs Show log file path and tail command"); + println!(); println!("{}", "Other:".underline()); println!(" help Show this help message"); println!(" exit Exit the REPL"); diff --git a/crates/node/src/debug_toolkit/repl.rs b/crates/node/src/debug_toolkit/repl.rs index d9fc668f..69ecf6ed 100644 --- a/crates/node/src/debug_toolkit/repl.rs +++ b/crates/node/src/debug_toolkit/repl.rs @@ -9,10 +9,13 @@ use super::{ event_stream::EventStreamState, }; use crate::test_utils::{fixture::NodeType, TestFixture}; -use alloy_consensus::{SignableTransaction, TxEip1559}; +use alloy_consensus::{SignableTransaction, TxEip1559, TxLegacy}; use alloy_eips::{eip2718::Encodable2718, BlockNumberOrTag}; use alloy_network::{TransactionResponse, TxSignerSync}; -use alloy_primitives::TxKind; +use alloy_primitives::{address, Address, Bytes, TxKind, U256}; +use alloy_sol_types::{sol, SolCall}; +use alloy_rpc_types_eth::TransactionRequest; +use alloy_signer_local::PrivateKeySigner; use colored::Colorize; use crossterm::{ event::{self, Event, KeyCode, KeyModifiers}, @@ -24,7 +27,29 @@ use reth_network_api::Peers; use reth_network_peers::NodeRecord; use reth_rpc_api::EthApiServer; use reth_transaction_pool::TransactionPool; -use std::{io::Write, str::FromStr, time::Duration}; +use std::{io::Write, path::PathBuf, str::FromStr, time::Duration}; + +// L1 contract addresses +const L1_MESSENGER_ADDRESS: Address = address!("8A791620dd6260079BF849Dc5567aDC3F2FdC318"); +const L1_MESSAGE_QUEUE_ADDRESS: Address = address!("Dc64a140Aa3E981100a9becA4E685f962f0cF6C9"); + +// L1 contract interfaces +sol! { + /// L1 Message Queue contract interface + interface IL1MessageQueue { + function nextCrossDomainMessageIndex() external view returns (uint256); + } + + /// L1 Messenger contract interface + interface IL1ScrollMessenger { + function sendMessage( + address _to, + uint256 _value, + bytes memory _message, + uint256 _gasLimit + ) external payable; + } +} /// Interactive REPL for debugging rollup nodes. pub struct DebugRepl { @@ -38,6 +63,8 @@ pub struct DebugRepl { event_streams: Vec, /// Registry of custom actions. action_registry: ActionRegistry, + /// Path to the log file. + log_path: Option, } impl std::fmt::Debug for DebugRepl { @@ -47,6 +74,7 @@ impl std::fmt::Debug for DebugRepl { .field("active_node", &self.active_node) .field("event_streams", &self.event_streams) .field("action_registry", &"ActionRegistry { ... }") + .field("log_path", &self.log_path) .finish_non_exhaustive() } } @@ -69,6 +97,7 @@ impl DebugRepl { active_node: 0, event_streams, action_registry: ActionRegistry::new(), + log_path: None, } } @@ -82,7 +111,19 @@ impl DebugRepl { }) .collect(); - Self { fixture, running: false, active_node: 0, event_streams, action_registry } + Self { + fixture, + running: false, + active_node: 0, + event_streams, + action_registry, + log_path: None, + } + } + + /// Set the log file path. + pub fn set_log_path(&mut self, path: PathBuf) { + self.log_path = Some(path); } /// Get mutable access to the action registry to register custom actions. @@ -224,6 +265,7 @@ impl DebugRepl { match cmd { Command::Status => self.cmd_status().await, + Command::SyncStatus => self.cmd_sync_status().await, Command::Block(arg) => self.cmd_block(arg).await, Command::Blocks { from, to } => self.cmd_blocks(from, to).await, Command::Fcs => self.cmd_fcs().await, @@ -237,6 +279,7 @@ impl DebugRepl { Command::Node(idx) => self.cmd_switch_node(idx), Command::Nodes => self.cmd_list_nodes(), Command::Db => self.cmd_db(), + Command::Logs => self.cmd_logs(), Command::Help => { print_help(); Ok(()) @@ -311,6 +354,63 @@ impl DebugRepl { Ok(()) } + /// Show detailed sync status (rollupNode_status RPC equivalent). + async fn cmd_sync_status(&self) -> eyre::Result<()> { + let node = &self.fixture.nodes[self.active_node]; + let status = node.rollup_manager_handle.status().await?; + + println!("{}", "Sync Status:".bold()); + println!(); + + // L1 Sync Status + println!("{}", "L1 Sync:".underline()); + println!( + " Status: {}", + if status.l1.status.is_synced() { + "SYNCED".green() + } else { + format!("{:?}", status.l1.status).yellow().to_string().into() + } + ); + println!(" Latest: #{}", status.l1.latest.to_string().cyan()); + println!(" Finalized: #{}", status.l1.finalized); + println!(" Processed: #{}", status.l1.processed); + println!(); + + // L2 Sync Status + println!("{}", "L2 Sync:".underline()); + println!( + " Status: {}", + if status.l2.status.is_synced() { + "SYNCED".green() + } else { + format!("{:?}", status.l2.status).yellow().to_string().into() + } + ); + println!(); + + // Forkchoice State + let fcs = &status.l2.fcs; + println!("{}", "Forkchoice:".underline()); + println!( + " Head: #{} ({:.12}...)", + fcs.head_block_info().number.to_string().green(), + format!("{:?}", fcs.head_block_info().hash) + ); + println!( + " Safe: #{} ({:.12}...)", + fcs.safe_block_info().number.to_string().yellow(), + format!("{:?}", fcs.safe_block_info().hash) + ); + println!( + " Finalized: #{} ({:.12}...)", + fcs.finalized_block_info().number.to_string().blue(), + format!("{:?}", fcs.finalized_block_info().hash) + ); + + Ok(()) + } + /// Show block details. async fn cmd_block(&self, arg: BlockArg) -> eyre::Result<()> { let node = &self.fixture.nodes[self.active_node]; @@ -420,30 +520,138 @@ impl DebugRepl { self.fixture.l1().new_block(n).await?; println!("{}", format!("L1 block {} notification sent", n).green()); } - L1Command::Message(json) => { - // Parse JSON and inject L1 message - // For now, just show that we received the command - println!( - "{}", - format!("L1 message injection not yet implemented. JSON: {}", json).yellow() - ); - } - L1Command::Commit(json) => { - println!( - "{}", - format!("Batch commit injection not yet implemented. JSON: {}", json).yellow() - ); - } - L1Command::Finalize(idx) => { - println!( - "{}", - format!("Batch finalization for index {} not yet implemented", idx).yellow() - ); - } L1Command::Reorg(block) => { self.fixture.l1().reorg_to(block).await?; println!("{}", format!("L1 reorg to block {} sent", block).green()); } + L1Command::Messages => { + println!("{}", "L1 Message Queue:".bold()); + + let Some(provider) = &self.fixture.l1_provider else { + println!( + "{}", + "No L1 provider available. Start with --l1-url to enable L1 commands." + .yellow() + ); + return Ok(()); + }; + + // Use sol! generated call type for encoding + let call = IL1MessageQueue::nextCrossDomainMessageIndexCall {}; + let call_request = TransactionRequest::default() + .to(L1_MESSAGE_QUEUE_ADDRESS) + .input(call.abi_encode().into()); + + match provider.call(call_request).await { + Ok(result) => { + // Decode uint256 from result using sol! generated return type + match IL1MessageQueue::nextCrossDomainMessageIndexCall::abi_decode_returns( + &result, + ) { + Ok(index) => { + println!(" Next Message Index: {}", index.to_string().green()); + } + Err(e) => { + println!("{}", format!("Failed to decode response: {}", e).red()); + } + } + } + Err(e) => { + println!("{}", format!("Failed to query message queue: {}", e).red()); + } + } + } + L1Command::Send { to, value } => { + println!("{}", "L1 -> L2 Bridge Transfer:".bold()); + + let Some(provider) = &self.fixture.l1_provider else { + println!( + "{}", + "No L1 provider available. Start with --l1-url to enable L1 commands." + .yellow() + ); + return Ok(()); + }; + + println!(" To: {:?}", to); + println!(" Value: {} wei", value); + println!(); + + // Use sol! generated call type for encoding + let call = IL1ScrollMessenger::sendMessageCall { + _to: to, + _value: value, + _message: Bytes::new(), + _gasLimit: U256::from(200000u64), + }; + let calldata = call.abi_encode(); + + // Use the default private key for L1 transactions + let private_key = + "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80"; + let signer: PrivateKeySigner = private_key.parse().expect("valid private key"); + let from_address = signer.address(); + + // Get chain ID, nonce from L1 + let chain_id = provider.get_chain_id().await?; + let nonce = provider.get_transaction_count(from_address).await?; + + // Build a legacy transaction (for compatibility with local L1) + // Use fixed 0.1 gwei gas price like the cast command + let mut tx = TxLegacy { + chain_id: Some(chain_id), + nonce, + gas_price: 100_000_000, // 0.1 gwei + gas_limit: 200_000, + to: TxKind::Call(L1_MESSENGER_ADDRESS), + value, + input: calldata.into(), + }; + + // Sign the transaction + let signature = signer.sign_transaction_sync(&mut tx)?; + let signed = tx.into_signed(signature); + let raw_tx = signed.encoded_2718(); + + // Send the transaction and wait for receipt + println!("{}", "Sending transaction...".dimmed()); + match provider.send_raw_transaction(&raw_tx).await { + Ok(pending) => { + let tx_hash = *pending.tx_hash(); + println!("{}", "Transaction sent!".green()); + println!(" Hash: {:?}", tx_hash); + println!(" From: {:?}", from_address); + println!(); + println!("{}", "Waiting for receipt...".dimmed()); + match pending.get_receipt().await { + Ok(receipt) => { + let status_str = if receipt.status() { + "Success".green() + } else { + "Failed".red() + }; + println!(" Status: {}", status_str); + println!(" Block: #{}", receipt.block_number.unwrap_or(0)); + println!(); + println!( + "{}", + "The L1 message will be included in L2 after L1 block finalization." + .dimmed() + ); + } + Err(e) => { + println!( + "{}", + format!("Failed to get receipt: {}", e).yellow() + ); + } + } + } + Err(e) => { + println!("{}", format!("Failed to send transaction: {}", e).red()); + } + } + } } Ok(()) } @@ -740,27 +948,6 @@ impl DebugRepl { event_stream.disable(); println!("{}", "Event stream disabled".yellow()); } - EventsCommand::Stream(count) => { - println!("Streaming {} events (Ctrl+C to stop)...", count); - let mut received = 0; - let node = &mut self.fixture.nodes[self.active_node]; - - while received < count { - tokio::select! { - event = node.chain_orchestrator_rx.next() => { - if let Some(event) = event { - received += 1; - let formatted = event_stream.format_event(&event); - println!("[{}] {}", received, formatted); - } - } - _ = tokio::time::sleep(Duration::from_secs(30)) => { - println!("{}", "Timeout waiting for events".yellow()); - break; - } - } - } - } EventsCommand::Filter(pattern) => { event_stream.set_filter(pattern.clone()); if let Some(p) = pattern { @@ -881,4 +1068,19 @@ impl DebugRepl { Ok(()) } + + /// Show log file path and tail command. + fn cmd_logs(&self) -> eyre::Result<()> { + println!("{}", "Log File:".bold()); + if let Some(path) = &self.log_path { + println!(" Path: {}", path.display()); + println!(); + println!("{}", "View logs in another terminal:".underline()); + println!(" tail -f {}", path.display()); + } else { + println!(" {}", "No log file configured (logs going to stdout)".dimmed()); + } + + Ok(()) + } } diff --git a/crates/node/src/test_utils/fixture.rs b/crates/node/src/test_utils/fixture.rs index c59b7876..b2355132 100644 --- a/crates/node/src/test_utils/fixture.rs +++ b/crates/node/src/test_utils/fixture.rs @@ -9,6 +9,8 @@ use crate::{ RollupNodeNetworkArgs, RpcArgs, ScrollRollupNode, ScrollRollupNodeConfig, SequencerArgs, SignerArgs, }; +use alloy_network::Ethereum; +use alloy_provider::Provider; use alloy_eips::BlockNumberOrTag; use alloy_primitives::Address; @@ -42,8 +44,10 @@ use std::{ }; use tokio::sync::Mutex; +/// L1 provider type for making L1 RPC calls. +pub type L1Provider = Box + Send + Sync>; + /// Main test fixture providing a high-level interface for testing rollup nodes. -#[derive(Debug)] pub struct TestFixture { /// The list of nodes in the test setup. pub nodes: Vec, @@ -51,10 +55,23 @@ pub struct TestFixture { pub wallet: Arc>, /// Chain spec used by the nodes. pub chain_spec: Arc<::ChainSpec>, + /// L1 provider for making L1 RPC calls (if connected to real L1). + pub l1_provider: Option, /// The task manager. Held in order to avoid dropping the node. _tasks: TaskManager, } +impl std::fmt::Debug for TestFixture { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TestFixture") + .field("nodes", &self.nodes) + .field("wallet", &self.wallet) + .field("chain_spec", &self.chain_spec) + .field("l1_provider", &self.l1_provider.as_ref().map(|_| "L1Provider")) + .finish_non_exhaustive() + } +} + /// The network handle to the Scroll network. pub type ScrollNetworkHandle = NetworkHandle>; @@ -204,7 +221,6 @@ impl TestFixture { } /// Builder for creating test fixtures with a fluent API. -#[derive(Debug)] pub struct TestFixtureBuilder { config: ScrollRollupNodeConfig, num_nodes: usize, @@ -212,6 +228,21 @@ pub struct TestFixtureBuilder { is_dev: bool, no_local_transactions_propagation: bool, bootnodes: Option>, + l1_provider: Option, +} + +impl std::fmt::Debug for TestFixtureBuilder { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TestFixtureBuilder") + .field("config", &self.config) + .field("num_nodes", &self.num_nodes) + .field("chain_spec", &self.chain_spec) + .field("is_dev", &self.is_dev) + .field("no_local_transactions_propagation", &self.no_local_transactions_propagation) + .field("bootnodes", &self.bootnodes) + .field("l1_provider", &self.l1_provider.as_ref().map(|_| "L1Provider")) + .finish() + } } impl Default for TestFixtureBuilder { @@ -230,6 +261,7 @@ impl TestFixtureBuilder { is_dev: false, no_local_transactions_propagation: false, bootnodes: None, + l1_provider: None, } } @@ -448,6 +480,12 @@ impl TestFixtureBuilder { &mut self.config } + /// Set the L1 provider for making L1 RPC calls. + pub fn with_l1_provider(mut self, provider: L1Provider) -> Self { + self.l1_provider = Some(provider); + self + } + /// Build the test fixture. pub async fn build(self) -> eyre::Result { let config = self.config; @@ -500,6 +538,7 @@ impl TestFixtureBuilder { nodes: node_handles, wallet: Arc::new(Mutex::new(wallet)), chain_spec, + l1_provider: self.l1_provider, _tasks, }) } From df4c5efa54f200c0149cb3e1610284d3558f630a Mon Sep 17 00:00:00 2001 From: Morty Date: Mon, 2 Mar 2026 21:23:47 +0800 Subject: [PATCH 6/9] feat: add attach tool --- Cargo.lock | 1 + crates/node/Cargo.toml | 2 + crates/node/src/bin/scroll_debug.rs | 7 +- crates/node/src/debug_toolkit/attach_repl.rs | 711 +++++++++++++++ crates/node/src/debug_toolkit/cli.rs | 39 +- crates/node/src/debug_toolkit/commands.rs | 58 ++ crates/node/src/debug_toolkit/mod.rs | 4 +- crates/node/src/debug_toolkit/repl.rs | 25 +- scroll-debug-15746.log | 0 scroll-debug-16292.log | 671 +++++++++++++++ scroll-debug-47318.log | 676 +++++++++++++++ scroll-debug-51668.log | 0 scroll-debug-93283.log | 862 +++++++++++++++++++ 13 files changed, 3045 insertions(+), 11 deletions(-) create mode 100644 crates/node/src/debug_toolkit/attach_repl.rs create mode 100644 scroll-debug-15746.log create mode 100644 scroll-debug-16292.log create mode 100644 scroll-debug-47318.log create mode 100644 scroll-debug-51668.log create mode 100644 scroll-debug-93283.log diff --git a/Cargo.lock b/Cargo.lock index d87559fb..0d90a22e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11192,6 +11192,7 @@ dependencies = [ "scroll-migration", "scroll-network", "scroll-wire", + "serde", "serde_json", "tokio", "tracing", diff --git a/crates/node/Cargo.toml b/crates/node/Cargo.toml index 947d8ad3..ccade99e 100644 --- a/crates/node/Cargo.toml +++ b/crates/node/Cargo.toml @@ -105,6 +105,7 @@ colored = { version = "3.0", optional = true } crossterm = { version = "0.28", optional = true } glob = { version = "0.3", optional = true } regex-lite = { version = "0.1", optional = true } +serde = { workspace = true, optional = true } tracing-subscriber = { version = "0.3", optional = true } scroll-db.workspace = true @@ -161,6 +162,7 @@ debug-toolkit = [ "dep:crossterm", "dep:glob", "dep:regex-lite", + "dep:serde", "dep:tracing-subscriber", ] test-utils = [ diff --git a/crates/node/src/bin/scroll_debug.rs b/crates/node/src/bin/scroll_debug.rs index 9fa89c10..4c7e56e4 100644 --- a/crates/node/src/bin/scroll_debug.rs +++ b/crates/node/src/bin/scroll_debug.rs @@ -53,9 +53,10 @@ fn main() -> eyre::Result<()> { eprintln!("Starting nodes (this may take a moment)..."); // Create tokio runtime and run - tokio::runtime::Builder::new_multi_thread().enable_all().build()?.block_on(async { - args.run(Some(log_path)).await - }) + tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build()? + .block_on(async { args.run(Some(log_path)).await }) } #[cfg(not(feature = "debug-toolkit"))] diff --git a/crates/node/src/debug_toolkit/attach_repl.rs b/crates/node/src/debug_toolkit/attach_repl.rs new file mode 100644 index 00000000..667e2c7d --- /dev/null +++ b/crates/node/src/debug_toolkit/attach_repl.rs @@ -0,0 +1,711 @@ +//! REPL for attaching to an already-running scroll node via JSON-RPC. +//! +//! Unlike [`super::DebugRepl`] which wraps an in-process [`TestFixture`], this REPL +//! connects to an existing node entirely over JSON-RPC using an alloy HTTP provider. +//! All namespaces — `eth_*`, `txpool_*`, `admin_*`, `rollupNode_*`, `rollupNodeAdmin_*` — +//! are accessed via `Provider::raw_request`, keeping the dependency surface minimal. +//! +//! # Usage +//! ```bash +//! scroll-debug --attach http://localhost:8545 +//! scroll-debug --attach http://localhost:8545 --private-key 0xac0974... +//! ``` + +use super::commands::{ + print_help, AdminCommand, BlockArg, Command, EventsCommand, L1Command, PeersCommand, TxCommand, +}; +use alloy_consensus::{SignableTransaction, TxEip1559}; +use alloy_eips::{eip2718::Encodable2718, BlockId, BlockNumberOrTag}; +use alloy_network::{Ethereum, TxSignerSync}; +use alloy_primitives::TxKind; +use alloy_provider::{Provider, ProviderBuilder}; +use alloy_signer_local::PrivateKeySigner; +use colored::Colorize; +use crossterm::{ + event::{self, Event, KeyCode, KeyModifiers}, + terminal::{disable_raw_mode, enable_raw_mode}, +}; +use reqwest::Url; +use rollup_node_chain_orchestrator::ChainOrchestratorStatus; +use std::{borrow::Cow, io::Write, path::PathBuf, time::Duration}; + +/// Interactive REPL that attaches to a running node via JSON-RPC. +#[derive(Debug)] +pub struct AttachRepl { + /// The RPC URL of the target node. + url: Url, + /// Alloy provider — all RPC calls including custom namespaces go through `raw_request`. + provider: alloy_provider::RootProvider, + /// Optional private key for signing transactions locally. + signer: Option, + /// Whether the REPL is running. + running: bool, + /// Whether background head-block polling is enabled. + events_enabled: bool, + /// Most recently seen block number (for head-block polling). + last_seen_block: u64, + /// Path to the log file (for `logs` command). + log_path: Option, +} + +impl AttachRepl { + /// Connect to a node at the given URL and build the REPL. + pub async fn new(url: Url, private_key: Option) -> eyre::Result { + // Use `default()` (no fillers) to get a plain `RootProvider`. + // We don't need gas/nonce fillers since we build transactions manually. + let provider = ProviderBuilder::default() + .connect(url.as_str()) + .await + .map_err(|e| eyre::eyre!("Failed to connect to {}: {}", url, e))?; + + let signer = if let Some(pk) = private_key { + let pk = pk.trim_start_matches("0x"); + let signer: PrivateKeySigner = + pk.parse().map_err(|e| eyre::eyre!("Invalid private key: {}", e))?; + Some(signer) + } else { + None + }; + + let last_seen_block = provider.get_block_number().await.unwrap_or(0); + + Ok(Self { + url, + provider, + signer, + running: false, + events_enabled: false, + last_seen_block, + log_path: None, + }) + } + + /// Set the log file path (shown by `logs` command). + pub fn set_log_path(&mut self, path: PathBuf) { + self.log_path = Some(path); + } + + /// Get the REPL prompt string. + fn get_prompt(&self) -> String { + let host = self.url.host_str().unwrap_or("?"); + let port = self.url.port().map(|p| format!(":{}", p)).unwrap_or_default(); + format!("{} [{}{}]> ", "scroll-debug".cyan(), host, port) + } + + /// Run the REPL loop. + pub async fn run(&mut self) -> eyre::Result<()> { + self.running = true; + + enable_raw_mode()?; + struct RawModeGuard; + impl Drop for RawModeGuard { + fn drop(&mut self) { + let _ = disable_raw_mode(); + } + } + let _guard = RawModeGuard; + + let _ = disable_raw_mode(); + println!(); + println!("{}", "Scroll Debug Toolkit (attach mode)".bold().cyan()); + println!("Connected to: {}", self.url.as_str().green()); + if let Some(signer) = &self.signer { + println!("Signer: {:?}", signer.address()); + } else { + println!("{}", "No signer – tx send/inject require --private-key".yellow()); + } + println!("Type 'help' for available commands, 'exit' to quit."); + println!(); + if let Err(e) = self.cmd_status().await { + println!("{}: {}", "Warning: could not fetch initial status".yellow(), e); + } + let _ = enable_raw_mode(); + + let mut input_buffer = String::new(); + let mut stdout = std::io::stdout(); + + print!("{}", self.get_prompt()); + let _ = stdout.flush(); + + while self.running { + tokio::select! { + biased; + + // Head-block polling (only when events are enabled). + _ = async { tokio::time::sleep(Duration::from_secs(2)).await }, if self.events_enabled => { + if let Ok(number) = self.provider.get_block_number().await { + if number > self.last_seen_block { + for n in (self.last_seen_block + 1)..=number { + let id = BlockId::Number(BlockNumberOrTag::Number(n)); + if let Ok(Some(block)) = self.provider.get_block(id).await { + let msg = format!( + "[new block] #{} hash={:.12}... txs={}", + block.header.number, + format!("{:?}", block.header.hash), + block.transactions.len(), + ); + print!("\r\x1b[K{}\r\n{}{}", msg.cyan(), self.get_prompt(), input_buffer); + let _ = stdout.flush(); + } + } + self.last_seen_block = number; + } + } + } + + // Keyboard input (50 ms poll). + _ = tokio::time::sleep(Duration::from_millis(50)) => { + while event::poll(Duration::from_millis(0))? { + if let Event::Key(key_event) = event::read()? { + match key_event.code { + KeyCode::Enter => { + print!("\r\n"); + let _ = stdout.flush(); + let line = input_buffer.trim().to_string(); + input_buffer.clear(); + + if !line.is_empty() { + let _ = disable_raw_mode(); + if let Err(e) = self.execute_command(&line).await { + println!("{}: {}", "Error".red(), e); + } + let _ = enable_raw_mode(); + } + + if self.running { + print!("{}", self.get_prompt()); + let _ = stdout.flush(); + } + } + KeyCode::Backspace => { + if !input_buffer.is_empty() { + input_buffer.pop(); + print!("\x08 \x08"); + let _ = stdout.flush(); + } + } + KeyCode::Char(c) => { + if key_event.modifiers.contains(KeyModifiers::CONTROL) && c == 'c' { + print!("\r\nUse 'exit' to quit\r\n{}{}", self.get_prompt(), input_buffer); + let _ = stdout.flush(); + } else if key_event.modifiers.contains(KeyModifiers::CONTROL) && c == 'd' { + print!("\r\n"); + self.running = false; + } else { + input_buffer.push(c); + print!("{}", c); + let _ = stdout.flush(); + } + } + KeyCode::Esc => { + print!("\r\x1b[K{}", self.get_prompt()); + let _ = stdout.flush(); + input_buffer.clear(); + } + _ => {} + } + } + } + } + } + } + + print!("Goodbye!\r\n"); + Ok(()) + } + + /// Dispatch a parsed command. + async fn execute_command(&mut self, input: &str) -> eyre::Result<()> { + let cmd = Command::parse(input); + match cmd { + Command::Status => self.cmd_status().await, + Command::SyncStatus => self.cmd_sync_status().await, + Command::Block(arg) => self.cmd_block(arg).await, + Command::Blocks { from, to } => self.cmd_blocks(from, to).await, + Command::Fcs => self.cmd_fcs().await, + Command::L1(l1_cmd) => self.cmd_l1(l1_cmd).await, + Command::Tx(tx_cmd) => self.cmd_tx(tx_cmd).await, + Command::Peers(peers_cmd) => self.cmd_peers(peers_cmd).await, + Command::Events(events_cmd) => self.cmd_events(events_cmd), + Command::Admin(admin_cmd) => self.cmd_admin(admin_cmd).await, + Command::Rpc { method, params } => self.cmd_rpc(&method, params.as_deref()).await, + Command::Logs => self.cmd_logs(), + Command::Help => { + print_help(); + Ok(()) + } + Command::Exit => { + self.running = false; + Ok(()) + } + // Spawn-mode-only commands — give informative errors + Command::Build => { + println!( + "{}", + "build is only available in spawn mode (--chain / --sequencer).".yellow() + ); + Ok(()) + } + Command::Wallet(_) => { + println!( + "{}", + "wallet gen is only available in spawn mode. Use --private-key to set a signer." + .yellow() + ); + Ok(()) + } + Command::Run(_) => { + println!("{}", "run actions are only available in spawn mode.".yellow()); + Ok(()) + } + Command::Node(_) | Command::Nodes => { + println!( + "{}", + "node switching is only available in spawn mode (multiple nodes).".yellow() + ); + Ok(()) + } + Command::Db => { + println!("{}", "db path is only available in spawn mode.".yellow()); + Ok(()) + } + Command::Unknown(s) => { + if !s.is_empty() { + println!("Unknown command: {}. Type 'help' for available commands.", s); + } + Ok(()) + } + } + } + + // ------------------------------------------------------------------------- + // Helper + // ------------------------------------------------------------------------- + + /// Call a custom-namespace JSON-RPC method and deserialize the response. + /// + /// Uses `raw_request_dyn` (no trait bounds on P/R) combined with serde_json for + /// maximum compatibility regardless of the provider's network/transport generics. + async fn raw( + &self, + method: &'static str, + params: impl serde::Serialize, + ) -> eyre::Result { + let raw_params = serde_json::value::to_raw_value(¶ms) + .map_err(|e| eyre::eyre!("Failed to serialize params for {}: {}", method, e))?; + let raw_result = self + .provider + .raw_request_dyn(Cow::Borrowed(method), &raw_params) + .await + .map_err(|e| eyre::eyre!("{}: {}", method, e))?; + serde_json::from_str(raw_result.get()) + .map_err(|e| eyre::eyre!("Failed to deserialize response from {}: {}", method, e)) + } + + // ------------------------------------------------------------------------- + // Command implementations + // ------------------------------------------------------------------------- + + /// `status` — show node status via `rollupNode_status`. + async fn cmd_status(&self) -> eyre::Result<()> { + let status: ChainOrchestratorStatus = self.raw("rollupNode_status", ()).await?; + let fcs = &status.l2.fcs; + + println!("{}", "=== Node Status ===".bold()); + println!("{}", "Node:".underline()); + println!(" RPC: {}", self.url.as_str()); + if let Some(signer) = &self.signer { + println!(" From: {:?}", signer.address()); + } + + println!("{}", "L2:".underline()); + println!( + " Head: #{} ({:.12}...)", + fcs.head_block_info().number.to_string().green(), + format!("{:?}", fcs.head_block_info().hash) + ); + println!( + " Safe: #{} ({:.12}...)", + fcs.safe_block_info().number.to_string().yellow(), + format!("{:?}", fcs.safe_block_info().hash) + ); + println!( + " Finalized: #{} ({:.12}...)", + fcs.finalized_block_info().number.to_string().blue(), + format!("{:?}", fcs.finalized_block_info().hash) + ); + println!( + " Synced: {}", + if status.l2.status.is_synced() { "true".green() } else { "false".red() } + ); + + println!("{}", "L1:".underline()); + println!(" Head: #{}", status.l1.latest.to_string().cyan()); + println!(" Finalized: #{}", status.l1.finalized); + println!(" Processed: #{}", status.l1.processed); + println!( + " Synced: {}", + if status.l1.status.is_synced() { "true".green() } else { "false".red() } + ); + + Ok(()) + } + + /// `sync-status` — detailed sync status. + async fn cmd_sync_status(&self) -> eyre::Result<()> { + let status: ChainOrchestratorStatus = self.raw("rollupNode_status", ()).await?; + + println!("{}", "Sync Status:".bold()); + println!(); + println!("{}", "L1 Sync:".underline()); + println!( + " Status: {}", + if status.l1.status.is_synced() { + "SYNCED".green() + } else { + format!("{:?}", status.l1.status).yellow().to_string().into() + } + ); + println!(" Latest: #{}", status.l1.latest.to_string().cyan()); + println!(" Finalized: #{}", status.l1.finalized); + println!(" Processed: #{}", status.l1.processed); + println!(); + println!("{}", "L2 Sync:".underline()); + println!( + " Status: {}", + if status.l2.status.is_synced() { + "SYNCED".green() + } else { + format!("{:?}", status.l2.status).yellow().to_string().into() + } + ); + println!(); + let fcs = &status.l2.fcs; + println!("{}", "Forkchoice:".underline()); + println!( + " Head: #{} ({:.12}...)", + fcs.head_block_info().number.to_string().green(), + format!("{:?}", fcs.head_block_info().hash) + ); + println!( + " Safe: #{} ({:.12}...)", + fcs.safe_block_info().number.to_string().yellow(), + format!("{:?}", fcs.safe_block_info().hash) + ); + println!( + " Finalized: #{} ({:.12}...)", + fcs.finalized_block_info().number.to_string().blue(), + format!("{:?}", fcs.finalized_block_info().hash) + ); + + Ok(()) + } + + /// `fcs` — show forkchoice state. + async fn cmd_fcs(&self) -> eyre::Result<()> { + let status: ChainOrchestratorStatus = self.raw("rollupNode_status", ()).await?; + let fcs = &status.l2.fcs; + + println!("{}", "Forkchoice State:".bold()); + println!(" Head:"); + println!(" Number: {}", fcs.head_block_info().number); + println!(" Hash: {:?}", fcs.head_block_info().hash); + println!(" Safe:"); + println!(" Number: {}", fcs.safe_block_info().number); + println!(" Hash: {:?}", fcs.safe_block_info().hash); + println!(" Finalized:"); + println!(" Number: {}", fcs.finalized_block_info().number); + println!(" Hash: {:?}", fcs.finalized_block_info().hash); + + Ok(()) + } + + /// `block [n|latest]` — show block details. + async fn cmd_block(&self, arg: BlockArg) -> eyre::Result<()> { + let tag = match arg { + BlockArg::Latest => BlockNumberOrTag::Latest, + BlockArg::Number(n) => BlockNumberOrTag::Number(n), + }; + + let block: Option = + self.raw("eth_getBlockByNumber", (tag, false)).await?; + let block = block.ok_or_else(|| eyre::eyre!("Block not found"))?; + + let number = block["number"].as_str().unwrap_or("?"); + let hash = block["hash"].as_str().unwrap_or("?"); + let parent = block["parentHash"].as_str().unwrap_or("?"); + let timestamp = block["timestamp"].as_str().unwrap_or("?"); + let gas_used = block["gasUsed"].as_str().unwrap_or("?"); + let gas_limit = block["gasLimit"].as_str().unwrap_or("?"); + let txs = block["transactions"].as_array(); + + println!("{}", format!("Block {}", number).bold()); + println!(" Hash: {}", hash); + println!(" Parent: {}", parent); + println!(" Timestamp: {}", timestamp); + println!(" Gas Used: {}", gas_used); + println!(" Gas Limit: {}", gas_limit); + + if let Some(txs) = txs { + println!(" Txs: {}", txs.len()); + for (i, tx) in txs.iter().enumerate() { + let tx_hash = tx.as_str().or_else(|| tx["hash"].as_str()).unwrap_or("?"); + println!(" [{}] hash={}", i, tx_hash); + } + } + + Ok(()) + } + + /// `blocks ` — list blocks in a range. + async fn cmd_blocks(&self, from: u64, to: u64) -> eyre::Result<()> { + println!("{}", format!("Blocks {} to {}:", from, to).bold()); + for n in from..=to { + let tag = BlockNumberOrTag::Number(n); + let block: Option = + self.raw("eth_getBlockByNumber", (tag, false)).await?; + if let Some(block) = block { + let hash = block["hash"].as_str().unwrap_or("?"); + let gas = block["gasUsed"].as_str().unwrap_or("?"); + let tx_count = block["transactions"].as_array().map(|a| a.len()).unwrap_or(0); + println!(" #{}: {} txs, gas: {}, hash: {:.12}...", n, tx_count, gas, hash); + } else { + println!(" #{}: {}", n, "not found".dimmed()); + } + } + Ok(()) + } + + /// `l1 status` / `l1 messages` — L1-related queries. + async fn cmd_l1(&self, cmd: L1Command) -> eyre::Result<()> { + match cmd { + L1Command::Status => { + let status: ChainOrchestratorStatus = self.raw("rollupNode_status", ()).await?; + println!("{}", "L1 Status:".bold()); + println!( + " Synced: {}", + if status.l1.status.is_synced() { "true".green() } else { "false".red() } + ); + println!(" L1 Head: #{}", status.l1.latest); + println!(" L1 Final: #{}", status.l1.finalized); + println!(" Processed: #{}", status.l1.processed); + } + L1Command::Messages => { + let msg: Option = + self.raw("rollupNode_getL1MessageByIndex", [0u64]).await?; + println!("{}", "L1 Message Queue (index 0):".bold()); + match msg { + Some(m) => println!("{}", serde_json::to_string_pretty(&m)?), + None => println!(" {}", "No message at index 0".dimmed()), + } + println!( + "{}", + "Hint: use 'rpc rollupNode_getL1MessageByIndex []' for specific indices" + .dimmed() + ); + } + L1Command::Sync | L1Command::Block(_) | L1Command::Reorg(_) => { + println!( + "{}", + "l1 sync/block/reorg are only available in spawn mode (mock L1).".yellow() + ); + } + L1Command::Send { .. } => { + println!( + "{}", + "l1 send is only available in spawn mode. Use cast or a wallet to bridge." + .yellow() + ); + } + } + Ok(()) + } + + /// `tx pending` / `tx send` / `tx inject`. + async fn cmd_tx(&mut self, cmd: TxCommand) -> eyre::Result<()> { + match cmd { + TxCommand::Pending => { + let result: serde_json::Value = self.raw("txpool_content", ()).await?; + println!("{}", "Pending Transactions:".bold()); + println!("{}", serde_json::to_string_pretty(&result)?); + } + TxCommand::Send { to, value, from: _ } => { + let signer = self.signer.as_ref().ok_or_else(|| { + eyre::eyre!("No signer configured. Start with --private-key .") + })?; + let from_address = signer.address(); + + let chain_id: serde_json::Value = self.raw("eth_chainId", ()).await?; + let chain_id: u64 = u64::from_str_radix( + chain_id.as_str().unwrap_or("0x1").trim_start_matches("0x"), + 16, + ) + .unwrap_or(1); + + let nonce_val: serde_json::Value = + self.raw("eth_getTransactionCount", (from_address, "latest")).await?; + let nonce: u64 = u64::from_str_radix( + nonce_val.as_str().unwrap_or("0x0").trim_start_matches("0x"), + 16, + ) + .unwrap_or(0); + + let latest: serde_json::Value = + self.raw("eth_getBlockByNumber", ("latest", false)).await?; + let base_fee = latest["baseFeePerGas"] + .as_str() + .and_then(|s| u64::from_str_radix(s.trim_start_matches("0x"), 16).ok()) + .unwrap_or(1_000_000_000); + + let mut tx = TxEip1559 { + chain_id, + nonce, + gas_limit: 21000, + max_fee_per_gas: base_fee as u128 * 2, + max_priority_fee_per_gas: 1_000_000_000, + to: TxKind::Call(to), + value, + access_list: Default::default(), + input: Default::default(), + }; + + let signature = signer.sign_transaction_sync(&mut tx)?; + let signed = tx.into_signed(signature); + let raw_tx = alloy_primitives::hex::encode_prefixed(signed.encoded_2718()); + + let tx_hash: serde_json::Value = + self.raw("eth_sendRawTransaction", [raw_tx]).await?; + println!("{}", "Transaction sent!".green()); + println!(" Hash: {}", tx_hash); + println!(" From: {:?}", from_address); + println!(" To: {:?}", to); + println!(" Value: {} wei", value); + } + TxCommand::Inject(bytes) => { + let hex = alloy_primitives::hex::encode_prefixed(&bytes); + let tx_hash: serde_json::Value = self.raw("eth_sendRawTransaction", [hex]).await?; + println!("{}", "Transaction injected!".green()); + println!(" Hash: {}", tx_hash); + } + } + Ok(()) + } + + /// `peers` / `peers connect`. + async fn cmd_peers(&self, cmd: PeersCommand) -> eyre::Result<()> { + match cmd { + PeersCommand::List => { + let peers: serde_json::Value = self.raw("admin_peers", ()).await?; + println!("{}", "Connected Peers:".bold()); + println!("{}", serde_json::to_string_pretty(&peers)?); + + println!(); + let node_info: serde_json::Value = self.raw("admin_nodeInfo", ()).await?; + println!("{}", "Local Node Info:".bold()); + println!("{}", serde_json::to_string_pretty(&node_info)?); + } + PeersCommand::Connect(enode_url) => { + let result: bool = self.raw("admin_addPeer", [enode_url.as_str()]).await?; + if result { + println!("{}", format!("Peer add request sent: {}", enode_url).green()); + } else { + println!("{}", "admin_addPeer returned false".yellow()); + } + } + } + Ok(()) + } + + /// `events on` / `events off` — toggle head-block polling. + fn cmd_events(&mut self, cmd: EventsCommand) -> eyre::Result<()> { + match cmd { + EventsCommand::On => { + self.events_enabled = true; + println!("{}", "Head-block polling enabled (2s interval)".green()); + } + EventsCommand::Off => { + self.events_enabled = false; + println!("{}", "Head-block polling disabled".yellow()); + } + EventsCommand::Filter(_) | EventsCommand::History(_) => { + println!("{}", "events filter/history are only available in spawn mode.".yellow()); + } + } + Ok(()) + } + + /// `admin enable-seq` / `admin disable-seq` / `admin revert `. + async fn cmd_admin(&self, cmd: AdminCommand) -> eyre::Result<()> { + match cmd { + AdminCommand::EnableSequencing => { + let result: bool = + self.raw("rollupNodeAdmin_enableAutomaticSequencing", ()).await?; + if result { + println!("{}", "Automatic sequencing enabled".green()); + } else { + println!("{}", "Enable sequencing returned false".yellow()); + } + } + AdminCommand::DisableSequencing => { + let result: bool = + self.raw("rollupNodeAdmin_disableAutomaticSequencing", ()).await?; + if result { + println!("{}", "Automatic sequencing disabled".yellow()); + } else { + println!("{}", "Disable sequencing returned false".yellow()); + } + } + AdminCommand::RevertToL1Block(block_number) => { + println!("{}", format!("Reverting to L1 block {}...", block_number).yellow()); + let result: bool = + self.raw("rollupNodeAdmin_revertToL1Block", [block_number]).await?; + if result { + println!("{}", format!("Reverted to L1 block {}", block_number).green()); + } else { + println!("{}", "Revert returned false".yellow()); + } + } + } + Ok(()) + } + + /// `rpc [params]` — call any JSON-RPC method and pretty-print the result. + async fn cmd_rpc(&self, method: &str, params: Option<&str>) -> eyre::Result<()> { + let raw_params = match params { + None => serde_json::value::to_raw_value(&())?, + Some(p) => { + // Try to parse as JSON first; fall back to treating as a string value + let val: serde_json::Value = serde_json::from_str(p) + .unwrap_or_else(|_| serde_json::Value::String(p.to_string())); + // Ensure always an array for JSON-RPC + let arr = if val.is_array() { val } else { serde_json::Value::Array(vec![val]) }; + serde_json::value::to_raw_value(&arr)? + } + }; + + let result = self + .provider + .raw_request_dyn(Cow::Owned(method.to_string()), &raw_params) + .await + .map_err(|e| eyre::eyre!("{}: {}", method, e))?; + + // Pretty-print via serde_json::Value + let pretty: serde_json::Value = serde_json::from_str(result.get())?; + println!("{}", serde_json::to_string_pretty(&pretty)?); + Ok(()) + } + + /// `logs` — show log file path. + fn cmd_logs(&self) -> eyre::Result<()> { + println!("{}", "Log File:".bold()); + if let Some(path) = &self.log_path { + println!(" Path: {}", path.display()); + println!(); + println!("{}", "View logs in another terminal:".underline()); + println!(" tail -f {}", path.display()); + } else { + println!(" {}", "No log file configured (logs going to stdout)".dimmed()); + } + Ok(()) + } +} diff --git a/crates/node/src/debug_toolkit/cli.rs b/crates/node/src/debug_toolkit/cli.rs index 8e3601cb..3fe19d7d 100644 --- a/crates/node/src/debug_toolkit/cli.rs +++ b/crates/node/src/debug_toolkit/cli.rs @@ -11,8 +11,32 @@ use std::{path::PathBuf, str::FromStr}; /// Debug toolkit CLI arguments. #[derive(Debug, Parser)] -#[command(name = "scroll-debug", about = "Scroll Debug Toolkit - Interactive REPL for debugging")] +#[command( + name = "scroll-debug", + about = "Scroll Debug Toolkit - Interactive REPL for debugging.\n\ + \n\ + Two modes:\n \ + Spawn: scroll-debug --chain dev --sequencer (starts a local test network)\n \ + Attach: scroll-debug --attach http://localhost:8545 (connects to a running node)" +)] pub struct DebugArgs { + // ── Attach mode ────────────────────────────────────────────────────────── + /// Attach to an already-running node at this RPC URL instead of spawning a test network. + /// + /// Example: --attach http://localhost:8545 + #[arg( + long, + conflicts_with_all = ["chain", "sequencer", "followers", "l1_url", "bootnodes", "valid_signer"] + )] + pub attach: Option, + + /// Private key (hex, with or without 0x prefix) used for signing transactions in attach mode. + /// + /// If omitted, tx send/inject commands will fail with an explanatory error. + #[arg(long, requires = "attach")] + pub private_key: Option, + + // ── Spawn mode ─────────────────────────────────────────────────────────── /// Chain to use (dev, scroll-sepolia, scroll-mainnet) or path to genesis file. #[arg(long, default_value = "dev")] pub chain: String, @@ -41,6 +65,7 @@ pub struct DebugArgs { #[arg(long)] pub valid_signer: Option
, + // ── Common ─────────────────────────────────────────────────────────────── /// Path to log file. Defaults to ./scroll-debug-.log #[arg(long)] pub log_file: Option, @@ -49,8 +74,18 @@ pub struct DebugArgs { impl DebugArgs { /// Run the debug toolkit with these arguments. pub async fn run(self, log_path: Option) -> eyre::Result<()> { - use super::DebugRepl; + use super::{AttachRepl, DebugRepl}; + // ── Attach mode ────────────────────────────────────────────────────── + if let Some(url) = self.attach { + let mut repl = AttachRepl::new(url, self.private_key).await?; + if let Some(path) = log_path { + repl.set_log_path(path); + } + return repl.run().await; + } + + // ── Spawn mode ─────────────────────────────────────────────────────── // Build the fixture let mut builder = TestFixtureBuilder::new().with_chain(&self.chain)?; diff --git a/crates/node/src/debug_toolkit/commands.rs b/crates/node/src/debug_toolkit/commands.rs index d69f99d6..b143dcaf 100644 --- a/crates/node/src/debug_toolkit/commands.rs +++ b/crates/node/src/debug_toolkit/commands.rs @@ -44,6 +44,15 @@ pub enum Command { Db, /// Show log file path. Logs, + /// Admin commands (attach mode only). + Admin(AdminCommand), + /// Execute a raw JSON-RPC call and print the result. + Rpc { + /// The RPC method name (e.g. `eth_blockNumber`). + method: String, + /// Raw JSON params string (e.g. `["latest", false]`). + params: Option, + }, /// Show help. Help, /// Exit the REPL. @@ -52,6 +61,17 @@ pub enum Command { Unknown(String), } +/// Admin commands (attach mode only). +#[derive(Debug, Clone)] +pub enum AdminCommand { + /// Enable automatic sequencing. + EnableSequencing, + /// Disable automatic sequencing. + DisableSequencing, + /// Revert the node state to a specified L1 block number. + RevertToL1Block(u64), +} + /// Run command variants. #[derive(Debug, Clone)] pub enum RunCommand { @@ -175,6 +195,8 @@ impl Command { "nodes" => Self::Nodes, "db" | "database" => Self::Db, "logs" | "log" => Self::Logs, + "admin" => Self::parse_admin(args), + "rpc" => Self::parse_rpc(args), "help" | "?" => Self::Help, "exit" | "quit" | "q" => Self::Exit, _ => Self::Unknown(cmd), @@ -334,6 +356,32 @@ impl Command { Self::Run(RunCommand::Execute { name, args: action_args }) } } + + fn parse_admin(args: &[&str]) -> Self { + let subcmd = args.first().copied().unwrap_or("help"); + let subargs = if args.len() > 1 { &args[1..] } else { &[] }; + + match subcmd { + "enable-seq" | "enable-sequencing" => Self::Admin(AdminCommand::EnableSequencing), + "disable-seq" | "disable-sequencing" => Self::Admin(AdminCommand::DisableSequencing), + "revert" | "revert-to-l1" => { + if let Some(n) = subargs.first().and_then(|s| s.parse::().ok()) { + Self::Admin(AdminCommand::RevertToL1Block(n)) + } else { + Self::Unknown("admin revert requires a block number".to_string()) + } + } + _ => Self::Unknown(format!("admin {}", subcmd)), + } + } + + fn parse_rpc(args: &[&str]) -> Self { + let Some(method) = args.first() else { + return Self::Unknown("rpc requires a method name".to_string()); + }; + let params = if args.len() > 1 { Some(args[1..].join(" ")) } else { None }; + Self::Rpc { method: method.to_string(), params } + } } /// Print the help message. @@ -389,6 +437,16 @@ pub fn print_help() { println!("{}", "Logs:".underline()); println!(" logs Show log file path and tail command"); println!(); + println!("{}", "Admin (attach mode only):".underline()); + println!(" admin enable-seq Enable automatic sequencing"); + println!(" admin disable-seq Disable automatic sequencing"); + println!(" admin revert Revert node state to L1 block number "); + println!(); + println!("{}", "Raw RPC (attach mode):".underline()); + println!(" rpc [params] Execute any JSON-RPC call and print result"); + println!(" rpc eth_blockNumber"); + println!(" rpc eth_getBlockByNumber [\"latest\",false]"); + println!(); println!("{}", "Other:".underline()); println!(" help Show this help message"); println!(" exit Exit the REPL"); diff --git a/crates/node/src/debug_toolkit/mod.rs b/crates/node/src/debug_toolkit/mod.rs index 492c4a43..8ce29878 100644 --- a/crates/node/src/debug_toolkit/mod.rs +++ b/crates/node/src/debug_toolkit/mod.rs @@ -55,11 +55,13 @@ //! ``` pub mod actions; +mod attach_repl; pub mod cli; mod commands; mod event_stream; mod repl; +pub use attach_repl::AttachRepl; pub use cli::DebugArgs; pub use commands::*; pub use event_stream::*; @@ -69,6 +71,6 @@ pub use repl::*; pub mod prelude { pub use super::{ actions::{Action, ActionRegistry}, - DebugRepl, EventStreamState, + AttachRepl, DebugRepl, EventStreamState, }; } diff --git a/crates/node/src/debug_toolkit/repl.rs b/crates/node/src/debug_toolkit/repl.rs index 69ecf6ed..fe1c6ef0 100644 --- a/crates/node/src/debug_toolkit/repl.rs +++ b/crates/node/src/debug_toolkit/repl.rs @@ -13,9 +13,9 @@ use alloy_consensus::{SignableTransaction, TxEip1559, TxLegacy}; use alloy_eips::{eip2718::Encodable2718, BlockNumberOrTag}; use alloy_network::{TransactionResponse, TxSignerSync}; use alloy_primitives::{address, Address, Bytes, TxKind, U256}; -use alloy_sol_types::{sol, SolCall}; use alloy_rpc_types_eth::TransactionRequest; use alloy_signer_local::PrivateKeySigner; +use alloy_sol_types::{sol, SolCall}; use colored::Colorize; use crossterm::{ event::{self, Event, KeyCode, KeyModifiers}, @@ -288,6 +288,24 @@ impl DebugRepl { self.running = false; Ok(()) } + Command::Admin(_) => { + println!( + "{}", + "admin commands are only available in attach mode (--attach ).".yellow() + ); + Ok(()) + } + Command::Rpc { method, params: _ } => { + println!( + "{}", + format!( + "rpc {} is only available in attach mode (--attach ). Use 'cast rpc {}' instead.", + method, method + ) + .yellow() + ); + Ok(()) + } Command::Unknown(s) => { if !s.is_empty() { println!("Unknown command: {}. Type 'help' for available commands.", s); @@ -640,10 +658,7 @@ impl DebugRepl { ); } Err(e) => { - println!( - "{}", - format!("Failed to get receipt: {}", e).yellow() - ); + println!("{}", format!("Failed to get receipt: {}", e).yellow()); } } } diff --git a/scroll-debug-15746.log b/scroll-debug-15746.log new file mode 100644 index 00000000..e69de29b diff --git a/scroll-debug-16292.log b/scroll-debug-16292.log new file mode 100644 index 00000000..648d1ac1 --- /dev/null +++ b/scroll-debug-16292.log @@ -0,0 +1,671 @@ +2026-03-02T12:33:25.812857Z INFO node{idx=0}: reth::cli: Saving prune config to toml file +2026-03-02T12:33:25.813638Z INFO node{idx=0}: reth::cli: Configuration loaded path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-BTYrGfAQ/reth.toml" +2026-03-02T12:33:25.816190Z INFO node{idx=0}: reth::cli: Database opened +2026-03-02T12:33:26.135386Z INFO node{idx=0}: reth::cli: +Pre-merge hard forks (block based): +- Homestead @0 +- Tangerine @0 +- SpuriousDragon @0 +- Byzantium @0 +- Constantinople @0 +- Petersburg @0 +- Istanbul @0 +- Berlin @0 +- London @0 +- Archimedes @0 +- Bernoulli @0 +- Curie @0 + +Post-merge hard forks (timestamp based): +- Shanghai @0 +- Darwin @0 +- DarwinV2 @0 +- Euclid @0 +- EuclidV2 @0 +- Feynman @0 +- Galileo @0 +- GalileoV2 @0 +2026-03-02T12:33:26.142528Z INFO node{idx=0}: reth::cli: Transaction pool initialized +2026-03-02T12:33:26.456201Z INFO node{idx=0}: reth::cli: P2P networking initialized enode=enode://68d342edd2921250b59d89277d689bdd07e0068ae80a20b75cf9d6c3f56991b0a46b0b717c925efe7d9f87771d6fd89fc099fded52bd6eebd754be8b882fdc1f@127.0.0.1:51351 +2026-03-02T12:33:26.458448Z INFO node{idx=0}: reth::cli: StaticFileProducer initialized +2026-03-02T12:33:26.460499Z INFO node{idx=0}: reth::cli: Verifying storage consistency. +2026-03-02T12:33:26.461291Z INFO node{idx=0}: reth::cli: Pruner initialized prune_config=PruneConfig { block_interval: 5, segments: PruneModes { sender_recovery: None, transaction_lookup: None, receipts: None, account_history: None, storage_history: None, bodies_history: None, receipts_log_filter: ReceiptsLogPruneConfig({}) } } +2026-03-02T12:33:26.461583Z INFO node{idx=0}: reth::cli: Creating JWT auth secret file path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-BTYrGfAQ/jwt.hex" +2026-03-02T12:33:26.464831Z INFO node{idx=0}: reth::cli: Consensus engine initialized +2026-03-02T12:33:26.465790Z INFO node{idx=0}: reth::cli: Engine API handler initialized +2026-03-02T12:33:26.471828Z INFO node{idx=0}: reth::cli: RPC auth server started url=127.0.0.1:51352 +2026-03-02T12:33:26.474075Z INFO node{idx=0}: reth::cli: RPC IPC server started path=/tmp/reth.ipc-zKynQEXO +2026-03-02T12:33:26.474445Z INFO node{idx=0}: reth::cli: RPC HTTP server started url=127.0.0.1:51353 +2026-03-02T12:33:26.474497Z INFO node{idx=0}: rollup_node::args: Building rollup node with config: +ScrollRollupNodeConfig { + test: true, + consensus_args: ConsensusArgs { + algorithm: Noop, + authorized_signer: None, + }, + database_args: RollupNodeDatabaseArgs { + rn_db_path: None, + }, + chain_orchestrator_args: ChainOrchestratorArgs { + optimistic_sync_trigger: 100, + chain_buffer_size: 100, + }, + engine_driver_args: EngineDriverArgs { + sync_at_startup: true, + }, + blob_provider_args: BlobProviderArgs { + beacon_node_urls: None, + s3_url: None, + anvil_url: None, + mock: true, + compute_units_per_second: 0, + max_retries: 0, + initial_backoff: 0, + }, + l1_provider_args: L1ProviderArgs { + url: None, + compute_units_per_second: 10000, + max_retries: 10, + initial_backoff: 100, + logs_query_block_range: 500, + cache_max_items: 100, + }, + sequencer_args: SequencerArgs { + sequencer_enabled: true, + auto_start: false, + block_time: 100, + payload_building_duration: 40, + fee_recipient: 0x0000000000000000000000000000000000000000, + l1_message_inclusion_mode: BlockDepth( + 0, + ), + allow_empty_blocks: true, + max_l1_messages: None, + }, + network_args: RollupNodeNetworkArgs { + enable_eth_scroll_wire_bridge: true, + enable_scroll_wire: true, + sequencer_url: None, + signer_address: None, + }, + rpc_args: RpcArgs { + basic_enabled: true, + admin_enabled: true, + }, + signer_args: SignerArgs { + key_file: None, + aws_kms_key_id: None, + private_key: None, + }, + gas_price_oracle_args: RollupNodeGasPriceOracleArgs { + default_suggested_priority_fee: 0, + }, + pprof_args: PprofArgs { + enabled: false, + addr: 0.0.0.0:6868, + default_duration: 30, + }, + database: Some( + Database { + database: Retry { + inner: DatabaseInner { + connection: SqlxSqlitePoolConnection, + write_lock: Mutex { + data: (), + }, + read_locks: Semaphore { + ll_sem: Semaphore { + permits: 5, + }, + resource_span: Span { + name: "runtime.resource", + level: Level( + Trace, + ), + target: "tokio::sync::semaphore", + disabled: true, + module_path: "tokio::sync::semaphore", + line: 461, + file: "/Users/yiweichi/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tokio-1.48.0/src/sync/semaphore.rs", + }, + }, + metrics: DatabaseMetrics, + tmp_dir: None, + }, + max_retries: None, + initial_delay_ms: 50, + exponential_backoff: false, + metrics: RetryMetrics, + }, + metrics: { + PrepareOnStartup: DatabaseOperationMetrics, + GetL1BlockInfo: DatabaseOperationMetrics, + RemoveL1BlockInfoLeq: DatabaseOperationMetrics, + UpdateL1MessagesWithL2Block: DatabaseOperationMetrics, + GetL2HeadBlockNumber: DatabaseOperationMetrics, + GetL2BlockAndBatchInfoByHash: DatabaseOperationMetrics, + GetLatestSafeL2Info: DatabaseOperationMetrics, + UpdateL1MessagesFromL2Blocks: DatabaseOperationMetrics, + DeleteBatchesGtBlockNumber: DatabaseOperationMetrics, + DeleteBatchesGtBatchIndex: DatabaseOperationMetrics, + InsertL1BlockInfo: DatabaseOperationMetrics, + RemoveL1BlockInfoGt: DatabaseOperationMetrics, + InsertBlocks: DatabaseOperationMetrics, + GetLatestL1BlockNumber: DatabaseOperationMetrics, + GetNL1Messages: DatabaseOperationMetrics, + InsertGenesisBlock: DatabaseOperationMetrics, + GetL2BlockInfoByNumber: DatabaseOperationMetrics, + GetSignature: DatabaseOperationMetrics, + GetNL2BlockDataHint: DatabaseOperationMetrics, + SetL2HeadBlockNumber: DatabaseOperationMetrics, + GetFinalizedL1BlockNumber: DatabaseOperationMetrics, + SetProcessedL1BlockNumber: DatabaseOperationMetrics, + DeleteL2BlocksGtBatchIndex: DatabaseOperationMetrics, + FinalizeConsolidatedBatches: DatabaseOperationMetrics, + Unwind: DatabaseOperationMetrics, + GetHighestBlockForBatchHash: DatabaseOperationMetrics, + SetBatchRevertBlockNumberForBatchRange: DatabaseOperationMetrics, + InsertBatchConsolidationOutcome: DatabaseOperationMetrics, + GetBatchByHash: DatabaseOperationMetrics, + GetBatchStatusByHash: DatabaseOperationMetrics, + FetchAndUpdateUnprocessedCommittedBatches: DatabaseOperationMetrics, + InsertL1Message: DatabaseOperationMetrics, + FetchAndUpdateUnprocessedFinalizedBatches: DatabaseOperationMetrics, + DeleteL1MessagesGt: DatabaseOperationMetrics, + InsertSignature: DatabaseOperationMetrics, + DeleteBatchFinalizationGtBlockNumber: DatabaseOperationMetrics, + GetBatchByIndex: DatabaseOperationMetrics, + InsertBlock: DatabaseOperationMetrics, + SetFinalizedL1BlockNumber: DatabaseOperationMetrics, + GetMaxBlockDataHintBlockNumber: DatabaseOperationMetrics, + ChangeBatchProcessingToCommittedStatus: DatabaseOperationMetrics, + FinalizeBatchesUpToIndex: DatabaseOperationMetrics, + GetHighestBlockForBatchIndex: DatabaseOperationMetrics, + UpdateSkippedL1Messages: DatabaseOperationMetrics, + GetProcessedL1BlockNumber: DatabaseOperationMetrics, + UpdateBatchStatus: DatabaseOperationMetrics, + PurgeL1MessageToL2BlockMappings: DatabaseOperationMetrics, + GetLatestIndexedEventL1BlockNumber: DatabaseOperationMetrics, + DeleteL2BlocksGtBlockNumber: DatabaseOperationMetrics, + DeleteBatchRevertGtBlockNumber: DatabaseOperationMetrics, + SetLatestL1BlockNumber: DatabaseOperationMetrics, + InsertBatch: DatabaseOperationMetrics, + InsertSignatures: DatabaseOperationMetrics, + }, + }, + ), +} +2026-03-02T12:33:26.484713Z INFO node{idx=0}: sea_orm_migration::migrator: Applying all pending migrations +2026-03-02T12:33:26.485730Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20220101_000001_create_batch_commit_table' +2026-03-02T12:33:26.487284Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20220101_000001_create_batch_commit_table' has been applied +2026-03-02T12:33:26.487479Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250304_125946_add_l1_msg_table' +2026-03-02T12:33:26.488095Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250304_125946_add_l1_msg_table' has been applied +2026-03-02T12:33:26.488290Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250408_132123_add_header_metadata' +2026-03-02T12:33:26.488425Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250408_132123_add_header_metadata' has been applied +2026-03-02T12:33:26.488615Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250408_150338_load_header_metadata' +2026-03-02T12:33:26.488625Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250408_150338_load_header_metadata' has been applied +2026-03-02T12:33:26.488708Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250411_072004_add_l2_block' +2026-03-02T12:33:26.489777Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250411_072004_add_l2_block' has been applied +2026-03-02T12:33:26.489971Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250616_223947_add_metadata' +2026-03-02T12:33:26.490274Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250616_223947_add_metadata' has been applied +2026-03-02T12:33:26.490350Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250904_175949_block_signature' +2026-03-02T12:33:26.490512Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250904_175949_block_signature' has been applied +2026-03-02T12:33:26.490709Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20251028_110719_add_l1_block_table' +2026-03-02T12:33:26.490863Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20251028_110719_add_l1_block_table' has been applied +2026-03-02T12:33:26.511883Z INFO node{idx=0}: scroll::node::args: Starting engine driver fcs=ForkchoiceState { head: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, safe: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, finalized: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 } } payload_building_duration=40 +2026-03-02T12:33:26.512175Z INFO node{idx=0}: scroll::providers: Running with mock blob provider - all other blob provider configurations are ignored +2026-03-02T12:33:26.513485Z INFO scroll::derivation_pipeline: Starting derivation pipeline worker +2026-03-02T12:33:26.513714Z INFO node{idx=0}: reth::cli: Starting consensus engine +2026-03-02T12:33:26.514886Z INFO node{idx=0}: reth_node_events::node: Forkchoice updated head_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 safe_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 finalized_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 +2026-03-02T12:33:26.519080Z WARN node{idx=1}: reth_node_builder::launch::common: Failed to build global thread pool err=The global thread pool has already been initialized. +2026-03-02T12:33:26.519437Z INFO node{idx=1}: reth::cli: Saving prune config to toml file +2026-03-02T12:33:26.519810Z INFO node{idx=1}: reth::cli: Configuration loaded path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-ExVZKSsZ/reth.toml" +2026-03-02T12:33:26.520154Z INFO node{idx=1}: reth::cli: Database opened +2026-03-02T12:33:26.607383Z INFO node{idx=1}: reth::cli: +Pre-merge hard forks (block based): +- Homestead @0 +- Tangerine @0 +- SpuriousDragon @0 +- Byzantium @0 +- Constantinople @0 +- Petersburg @0 +- Istanbul @0 +- Berlin @0 +- London @0 +- Archimedes @0 +- Bernoulli @0 +- Curie @0 + +Post-merge hard forks (timestamp based): +- Shanghai @0 +- Darwin @0 +- DarwinV2 @0 +- Euclid @0 +- EuclidV2 @0 +- Feynman @0 +- Galileo @0 +- GalileoV2 @0 +2026-03-02T12:33:26.608014Z INFO node{idx=1}: reth::cli: Transaction pool initialized +2026-03-02T12:33:26.609673Z INFO node{idx=1}: reth::cli: P2P networking initialized enode=enode://1ff1c09c1caa8fa9327009d46565fa6871ffcce57b560c26b8ef453fa233a3b9a5a7aeeee9ece98d9c83c721950e960dce5cd36637fe73830adc9418af6bb88a@127.0.0.1:51358 +2026-03-02T12:33:26.609904Z INFO node{idx=1}: reth::cli: StaticFileProducer initialized +2026-03-02T12:33:26.610368Z INFO node{idx=1}: reth::cli: Verifying storage consistency. +2026-03-02T12:33:26.610702Z INFO node{idx=1}: reth::cli: Pruner initialized prune_config=PruneConfig { block_interval: 5, segments: PruneModes { sender_recovery: None, transaction_lookup: None, receipts: None, account_history: None, storage_history: None, bodies_history: None, receipts_log_filter: ReceiptsLogPruneConfig({}) } } +2026-03-02T12:33:26.610872Z INFO node{idx=1}: reth::cli: Creating JWT auth secret file path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-ExVZKSsZ/jwt.hex" +2026-03-02T12:33:26.611682Z INFO node{idx=1}: reth::cli: Consensus engine initialized +2026-03-02T12:33:26.611895Z INFO node{idx=1}: reth::cli: Engine API handler initialized +2026-03-02T12:33:26.614553Z INFO node{idx=1}: reth::cli: RPC auth server started url=127.0.0.1:51359 +2026-03-02T12:33:26.616312Z INFO node{idx=1}: reth::cli: RPC IPC server started path=/tmp/reth.ipc-iAtGUbRW +2026-03-02T12:33:26.616323Z INFO node{idx=1}: reth::cli: RPC HTTP server started url=127.0.0.1:51360 +2026-03-02T12:33:26.616345Z INFO node{idx=1}: rollup_node::args: Building rollup node with config: +ScrollRollupNodeConfig { + test: true, + consensus_args: ConsensusArgs { + algorithm: Noop, + authorized_signer: None, + }, + database_args: RollupNodeDatabaseArgs { + rn_db_path: None, + }, + chain_orchestrator_args: ChainOrchestratorArgs { + optimistic_sync_trigger: 100, + chain_buffer_size: 100, + }, + engine_driver_args: EngineDriverArgs { + sync_at_startup: true, + }, + blob_provider_args: BlobProviderArgs { + beacon_node_urls: None, + s3_url: None, + anvil_url: None, + mock: true, + compute_units_per_second: 0, + max_retries: 0, + initial_backoff: 0, + }, + l1_provider_args: L1ProviderArgs { + url: None, + compute_units_per_second: 10000, + max_retries: 10, + initial_backoff: 100, + logs_query_block_range: 500, + cache_max_items: 100, + }, + sequencer_args: SequencerArgs { + sequencer_enabled: false, + auto_start: false, + block_time: 100, + payload_building_duration: 40, + fee_recipient: 0x0000000000000000000000000000000000000000, + l1_message_inclusion_mode: BlockDepth( + 0, + ), + allow_empty_blocks: true, + max_l1_messages: None, + }, + network_args: RollupNodeNetworkArgs { + enable_eth_scroll_wire_bridge: true, + enable_scroll_wire: true, + sequencer_url: None, + signer_address: None, + }, + rpc_args: RpcArgs { + basic_enabled: true, + admin_enabled: true, + }, + signer_args: SignerArgs { + key_file: None, + aws_kms_key_id: None, + private_key: None, + }, + gas_price_oracle_args: RollupNodeGasPriceOracleArgs { + default_suggested_priority_fee: 0, + }, + pprof_args: PprofArgs { + enabled: false, + addr: 0.0.0.0:6868, + default_duration: 30, + }, + database: Some( + Database { + database: Retry { + inner: DatabaseInner { + connection: SqlxSqlitePoolConnection, + write_lock: Mutex { + data: (), + }, + read_locks: Semaphore { + ll_sem: Semaphore { + permits: 5, + }, + resource_span: Span { + name: "runtime.resource", + level: Level( + Trace, + ), + target: "tokio::sync::semaphore", + disabled: true, + module_path: "tokio::sync::semaphore", + line: 461, + file: "/Users/yiweichi/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tokio-1.48.0/src/sync/semaphore.rs", + }, + }, + metrics: DatabaseMetrics, + tmp_dir: None, + }, + max_retries: None, + initial_delay_ms: 50, + exponential_backoff: false, + metrics: RetryMetrics, + }, + metrics: { + GetFinalizedL1BlockNumber: DatabaseOperationMetrics, + SetFinalizedL1BlockNumber: DatabaseOperationMetrics, + GetLatestSafeL2Info: DatabaseOperationMetrics, + FinalizeConsolidatedBatches: DatabaseOperationMetrics, + DeleteL2BlocksGtBlockNumber: DatabaseOperationMetrics, + DeleteL2BlocksGtBatchIndex: DatabaseOperationMetrics, + GetNL1Messages: DatabaseOperationMetrics, + GetLatestL1BlockNumber: DatabaseOperationMetrics, + ChangeBatchProcessingToCommittedStatus: DatabaseOperationMetrics, + GetBatchByIndex: DatabaseOperationMetrics, + InsertSignature: DatabaseOperationMetrics, + SetL2HeadBlockNumber: DatabaseOperationMetrics, + PurgeL1MessageToL2BlockMappings: DatabaseOperationMetrics, + FetchAndUpdateUnprocessedFinalizedBatches: DatabaseOperationMetrics, + DeleteBatchFinalizationGtBlockNumber: DatabaseOperationMetrics, + InsertBlocks: DatabaseOperationMetrics, + DeleteL1MessagesGt: DatabaseOperationMetrics, + RemoveL1BlockInfoLeq: DatabaseOperationMetrics, + InsertBatch: DatabaseOperationMetrics, + UpdateL1MessagesFromL2Blocks: DatabaseOperationMetrics, + GetBatchStatusByHash: DatabaseOperationMetrics, + UpdateSkippedL1Messages: DatabaseOperationMetrics, + GetL1BlockInfo: DatabaseOperationMetrics, + InsertGenesisBlock: DatabaseOperationMetrics, + GetBatchByHash: DatabaseOperationMetrics, + GetMaxBlockDataHintBlockNumber: DatabaseOperationMetrics, + DeleteBatchesGtBlockNumber: DatabaseOperationMetrics, + FinalizeBatchesUpToIndex: DatabaseOperationMetrics, + SetLatestL1BlockNumber: DatabaseOperationMetrics, + DeleteBatchesGtBatchIndex: DatabaseOperationMetrics, + InsertBatchConsolidationOutcome: DatabaseOperationMetrics, + SetBatchRevertBlockNumberForBatchRange: DatabaseOperationMetrics, + Unwind: DatabaseOperationMetrics, + GetProcessedL1BlockNumber: DatabaseOperationMetrics, + GetL2HeadBlockNumber: DatabaseOperationMetrics, + GetL2BlockInfoByNumber: DatabaseOperationMetrics, + SetProcessedL1BlockNumber: DatabaseOperationMetrics, + UpdateBatchStatus: DatabaseOperationMetrics, + InsertL1Message: DatabaseOperationMetrics, + GetHighestBlockForBatchIndex: DatabaseOperationMetrics, + GetSignature: DatabaseOperationMetrics, + GetLatestIndexedEventL1BlockNumber: DatabaseOperationMetrics, + PrepareOnStartup: DatabaseOperationMetrics, + GetNL2BlockDataHint: DatabaseOperationMetrics, + InsertL1BlockInfo: DatabaseOperationMetrics, + DeleteBatchRevertGtBlockNumber: DatabaseOperationMetrics, + UpdateL1MessagesWithL2Block: DatabaseOperationMetrics, + GetL2BlockAndBatchInfoByHash: DatabaseOperationMetrics, + InsertSignatures: DatabaseOperationMetrics, + RemoveL1BlockInfoGt: DatabaseOperationMetrics, + GetHighestBlockForBatchHash: DatabaseOperationMetrics, + FetchAndUpdateUnprocessedCommittedBatches: DatabaseOperationMetrics, + InsertBlock: DatabaseOperationMetrics, + }, + }, + ), +} +2026-03-02T12:33:26.617187Z INFO node{idx=1}: sea_orm_migration::migrator: Applying all pending migrations +2026-03-02T12:33:26.617603Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20220101_000001_create_batch_commit_table' +2026-03-02T12:33:26.619030Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20220101_000001_create_batch_commit_table' has been applied +2026-03-02T12:33:26.619205Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250304_125946_add_l1_msg_table' +2026-03-02T12:33:26.619782Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250304_125946_add_l1_msg_table' has been applied +2026-03-02T12:33:26.619967Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250408_132123_add_header_metadata' +2026-03-02T12:33:26.620108Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250408_132123_add_header_metadata' has been applied +2026-03-02T12:33:26.620287Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250408_150338_load_header_metadata' +2026-03-02T12:33:26.620297Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250408_150338_load_header_metadata' has been applied +2026-03-02T12:33:26.620380Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250411_072004_add_l2_block' +2026-03-02T12:33:26.621534Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250411_072004_add_l2_block' has been applied +2026-03-02T12:33:26.621763Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250616_223947_add_metadata' +2026-03-02T12:33:26.622098Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250616_223947_add_metadata' has been applied +2026-03-02T12:33:26.622187Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250904_175949_block_signature' +2026-03-02T12:33:26.622357Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250904_175949_block_signature' has been applied +2026-03-02T12:33:26.622569Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20251028_110719_add_l1_block_table' +2026-03-02T12:33:26.622727Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20251028_110719_add_l1_block_table' has been applied +2026-03-02T12:33:26.629183Z INFO node{idx=1}: scroll::node::args: Starting engine driver fcs=ForkchoiceState { head: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, safe: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, finalized: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 } } payload_building_duration=40 +2026-03-02T12:33:26.629217Z INFO node{idx=1}: scroll::providers: Running with mock blob provider - all other blob provider configurations are ignored +2026-03-02T12:33:26.630065Z INFO node{idx=1}: reth::cli: Starting consensus engine +2026-03-02T12:33:26.630158Z INFO scroll::derivation_pipeline: Starting derivation pipeline worker +2026-03-02T12:33:26.630383Z INFO node{idx=1}: reth_node_events::node: Forkchoice updated head_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 safe_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 finalized_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 +2026-03-02T12:33:29.466851Z INFO node{idx=0}: reth::cli: Status connected_peers=0 latest_block=0 +2026-03-02T12:33:29.613432Z INFO node{idx=1}: reth::cli: Status connected_peers=0 latest_block=0 +2026-03-02T12:33:31.472193Z INFO node{idx=1}: reth_e2e_test_utils::network: Session established with peer: 0x68d342edd2921250b59d89277d689bdd07e0068ae80a20b75cf9d6c3f56991b0a46b0b717c925efe7d9f87771d6fd89fc099fded52bd6eebd754be8b882fdc1f +2026-03-02T12:33:31.472748Z INFO node{idx=1}: reth_e2e_test_utils::network: Session established with peer: 0x1ff1c09c1caa8fa9327009d46565fa6871ffcce57b560c26b8ef453fa233a3b9a5a7aeeee9ece98d9c83c721950e960dce5cd36637fe73830adc9418af6bb88a +2026-03-02T12:33:31.478486Z WARN node{idx=2}: reth_node_builder::launch::common: Failed to build global thread pool err=The global thread pool has already been initialized. +2026-03-02T12:33:31.478981Z INFO node{idx=2}: reth::cli: Saving prune config to toml file +2026-03-02T12:33:31.479376Z INFO node{idx=2}: reth::cli: Configuration loaded path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-E87A3wU3/reth.toml" +2026-03-02T12:33:31.479791Z INFO node{idx=2}: reth::cli: Database opened +2026-03-02T12:33:31.568702Z INFO node{idx=2}: reth::cli: +Pre-merge hard forks (block based): +- Homestead @0 +- Tangerine @0 +- SpuriousDragon @0 +- Byzantium @0 +- Constantinople @0 +- Petersburg @0 +- Istanbul @0 +- Berlin @0 +- London @0 +- Archimedes @0 +- Bernoulli @0 +- Curie @0 + +Post-merge hard forks (timestamp based): +- Shanghai @0 +- Darwin @0 +- DarwinV2 @0 +- Euclid @0 +- EuclidV2 @0 +- Feynman @0 +- Galileo @0 +- GalileoV2 @0 +2026-03-02T12:33:31.569648Z INFO node{idx=2}: reth::cli: Transaction pool initialized +2026-03-02T12:33:31.571984Z INFO node{idx=2}: reth::cli: P2P networking initialized enode=enode://871a1b8b805b16f5d2bf85bbb4f144bb39b7175d725d42e8aa0e7903d936b1d9a0fdf6127430eb80df48fcdd749dd721fd87f1d4ade03e17ce932a79196aade3@127.0.0.1:51367 +2026-03-02T12:33:31.572301Z INFO node{idx=2}: reth::cli: StaticFileProducer initialized +2026-03-02T12:33:31.572919Z INFO node{idx=2}: reth::cli: Verifying storage consistency. +2026-03-02T12:33:31.573337Z INFO node{idx=2}: reth::cli: Pruner initialized prune_config=PruneConfig { block_interval: 5, segments: PruneModes { sender_recovery: None, transaction_lookup: None, receipts: None, account_history: None, storage_history: None, bodies_history: None, receipts_log_filter: ReceiptsLogPruneConfig({}) } } +2026-03-02T12:33:31.573561Z INFO node{idx=2}: reth::cli: Creating JWT auth secret file path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-E87A3wU3/jwt.hex" +2026-03-02T12:33:31.574721Z INFO node{idx=2}: reth::cli: Consensus engine initialized +2026-03-02T12:33:31.574976Z INFO node{idx=2}: reth::cli: Engine API handler initialized +2026-03-02T12:33:31.578376Z INFO node{idx=2}: reth::cli: RPC auth server started url=127.0.0.1:51368 +2026-03-02T12:33:31.580586Z INFO node{idx=2}: reth::cli: RPC IPC server started path=/tmp/reth.ipc-3xCrUmZj +2026-03-02T12:33:31.580599Z INFO node{idx=2}: reth::cli: RPC HTTP server started url=127.0.0.1:51369 +2026-03-02T12:33:31.580628Z INFO node{idx=2}: rollup_node::args: Building rollup node with config: +ScrollRollupNodeConfig { + test: true, + consensus_args: ConsensusArgs { + algorithm: Noop, + authorized_signer: None, + }, + database_args: RollupNodeDatabaseArgs { + rn_db_path: None, + }, + chain_orchestrator_args: ChainOrchestratorArgs { + optimistic_sync_trigger: 100, + chain_buffer_size: 100, + }, + engine_driver_args: EngineDriverArgs { + sync_at_startup: true, + }, + blob_provider_args: BlobProviderArgs { + beacon_node_urls: None, + s3_url: None, + anvil_url: None, + mock: true, + compute_units_per_second: 0, + max_retries: 0, + initial_backoff: 0, + }, + l1_provider_args: L1ProviderArgs { + url: None, + compute_units_per_second: 10000, + max_retries: 10, + initial_backoff: 100, + logs_query_block_range: 500, + cache_max_items: 100, + }, + sequencer_args: SequencerArgs { + sequencer_enabled: false, + auto_start: false, + block_time: 100, + payload_building_duration: 40, + fee_recipient: 0x0000000000000000000000000000000000000000, + l1_message_inclusion_mode: BlockDepth( + 0, + ), + allow_empty_blocks: true, + max_l1_messages: None, + }, + network_args: RollupNodeNetworkArgs { + enable_eth_scroll_wire_bridge: true, + enable_scroll_wire: true, + sequencer_url: None, + signer_address: None, + }, + rpc_args: RpcArgs { + basic_enabled: true, + admin_enabled: true, + }, + signer_args: SignerArgs { + key_file: None, + aws_kms_key_id: None, + private_key: None, + }, + gas_price_oracle_args: RollupNodeGasPriceOracleArgs { + default_suggested_priority_fee: 0, + }, + pprof_args: PprofArgs { + enabled: false, + addr: 0.0.0.0:6868, + default_duration: 30, + }, + database: Some( + Database { + database: Retry { + inner: DatabaseInner { + connection: SqlxSqlitePoolConnection, + write_lock: Mutex { + data: (), + }, + read_locks: Semaphore { + ll_sem: Semaphore { + permits: 5, + }, + resource_span: Span { + name: "runtime.resource", + level: Level( + Trace, + ), + target: "tokio::sync::semaphore", + disabled: true, + module_path: "tokio::sync::semaphore", + line: 461, + file: "/Users/yiweichi/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tokio-1.48.0/src/sync/semaphore.rs", + }, + }, + metrics: DatabaseMetrics, + tmp_dir: None, + }, + max_retries: None, + initial_delay_ms: 50, + exponential_backoff: false, + metrics: RetryMetrics, + }, + metrics: { + GetL1BlockInfo: DatabaseOperationMetrics, + FetchAndUpdateUnprocessedFinalizedBatches: DatabaseOperationMetrics, + SetL2HeadBlockNumber: DatabaseOperationMetrics, + InsertGenesisBlock: DatabaseOperationMetrics, + ChangeBatchProcessingToCommittedStatus: DatabaseOperationMetrics, + SetFinalizedL1BlockNumber: DatabaseOperationMetrics, + InsertBlocks: DatabaseOperationMetrics, + Unwind: DatabaseOperationMetrics, + SetLatestL1BlockNumber: DatabaseOperationMetrics, + InsertSignature: DatabaseOperationMetrics, + GetBatchByHash: DatabaseOperationMetrics, + GetLatestL1BlockNumber: DatabaseOperationMetrics, + GetL2HeadBlockNumber: DatabaseOperationMetrics, + GetL2BlockInfoByNumber: DatabaseOperationMetrics, + DeleteL1MessagesGt: DatabaseOperationMetrics, + DeleteBatchFinalizationGtBlockNumber: DatabaseOperationMetrics, + PrepareOnStartup: DatabaseOperationMetrics, + DeleteL2BlocksGtBlockNumber: DatabaseOperationMetrics, + DeleteBatchRevertGtBlockNumber: DatabaseOperationMetrics, + DeleteL2BlocksGtBatchIndex: DatabaseOperationMetrics, + InsertBlock: DatabaseOperationMetrics, + DeleteBatchesGtBlockNumber: DatabaseOperationMetrics, + RemoveL1BlockInfoGt: DatabaseOperationMetrics, + GetFinalizedL1BlockNumber: DatabaseOperationMetrics, + UpdateBatchStatus: DatabaseOperationMetrics, + GetLatestSafeL2Info: DatabaseOperationMetrics, + PurgeL1MessageToL2BlockMappings: DatabaseOperationMetrics, + GetNL2BlockDataHint: DatabaseOperationMetrics, + GetHighestBlockForBatchHash: DatabaseOperationMetrics, + InsertL1BlockInfo: DatabaseOperationMetrics, + GetProcessedL1BlockNumber: DatabaseOperationMetrics, + GetHighestBlockForBatchIndex: DatabaseOperationMetrics, + UpdateL1MessagesFromL2Blocks: DatabaseOperationMetrics, + DeleteBatchesGtBatchIndex: DatabaseOperationMetrics, + UpdateL1MessagesWithL2Block: DatabaseOperationMetrics, + SetBatchRevertBlockNumberForBatchRange: DatabaseOperationMetrics, + GetNL1Messages: DatabaseOperationMetrics, + FinalizeConsolidatedBatches: DatabaseOperationMetrics, + GetMaxBlockDataHintBlockNumber: DatabaseOperationMetrics, + FetchAndUpdateUnprocessedCommittedBatches: DatabaseOperationMetrics, + InsertL1Message: DatabaseOperationMetrics, + InsertBatch: DatabaseOperationMetrics, + SetProcessedL1BlockNumber: DatabaseOperationMetrics, + GetBatchByIndex: DatabaseOperationMetrics, + InsertSignatures: DatabaseOperationMetrics, + GetBatchStatusByHash: DatabaseOperationMetrics, + UpdateSkippedL1Messages: DatabaseOperationMetrics, + GetLatestIndexedEventL1BlockNumber: DatabaseOperationMetrics, + GetL2BlockAndBatchInfoByHash: DatabaseOperationMetrics, + GetSignature: DatabaseOperationMetrics, + FinalizeBatchesUpToIndex: DatabaseOperationMetrics, + InsertBatchConsolidationOutcome: DatabaseOperationMetrics, + RemoveL1BlockInfoLeq: DatabaseOperationMetrics, + }, + }, + ), +} +2026-03-02T12:33:31.581813Z INFO node{idx=2}: sea_orm_migration::migrator: Applying all pending migrations +2026-03-02T12:33:31.582387Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20220101_000001_create_batch_commit_table' +2026-03-02T12:33:31.584272Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20220101_000001_create_batch_commit_table' has been applied +2026-03-02T12:33:31.584492Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250304_125946_add_l1_msg_table' +2026-03-02T12:33:31.585384Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250304_125946_add_l1_msg_table' has been applied +2026-03-02T12:33:31.585653Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250408_132123_add_header_metadata' +2026-03-02T12:33:31.585856Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250408_132123_add_header_metadata' has been applied +2026-03-02T12:33:31.586097Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250408_150338_load_header_metadata' +2026-03-02T12:33:31.586110Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250408_150338_load_header_metadata' has been applied +2026-03-02T12:33:31.586219Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250411_072004_add_l2_block' +2026-03-02T12:33:31.587769Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250411_072004_add_l2_block' has been applied +2026-03-02T12:33:31.588086Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250616_223947_add_metadata' +2026-03-02T12:33:31.588519Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250616_223947_add_metadata' has been applied +2026-03-02T12:33:31.588624Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250904_175949_block_signature' +2026-03-02T12:33:31.588837Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250904_175949_block_signature' has been applied +2026-03-02T12:33:31.589095Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20251028_110719_add_l1_block_table' +2026-03-02T12:33:31.589287Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20251028_110719_add_l1_block_table' has been applied +2026-03-02T12:33:31.597237Z INFO node{idx=2}: scroll::node::args: Starting engine driver fcs=ForkchoiceState { head: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, safe: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, finalized: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 } } payload_building_duration=40 +2026-03-02T12:33:31.597270Z INFO node{idx=2}: scroll::providers: Running with mock blob provider - all other blob provider configurations are ignored +2026-03-02T12:33:31.598247Z INFO node{idx=2}: reth::cli: Starting consensus engine +2026-03-02T12:33:31.598321Z INFO scroll::derivation_pipeline: Starting derivation pipeline worker +2026-03-02T12:33:31.598603Z INFO node{idx=2}: reth_node_events::node: Forkchoice updated head_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 safe_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 finalized_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 +2026-03-02T12:33:31.617418Z INFO node{idx=2}: reth_e2e_test_utils::network: Session established with peer: 0x1ff1c09c1caa8fa9327009d46565fa6871ffcce57b560c26b8ef453fa233a3b9a5a7aeeee9ece98d9c83c721950e960dce5cd36637fe73830adc9418af6bb88a +2026-03-02T12:33:31.617845Z INFO node{idx=2}: reth_e2e_test_utils::network: Session established with peer: 0x871a1b8b805b16f5d2bf85bbb4f144bb39b7175d725d42e8aa0e7903d936b1d9a0fdf6127430eb80df48fcdd749dd721fd87f1d4ade03e17ce932a79196aade3 +2026-03-02T12:33:34.576380Z INFO node{idx=2}: reth::cli: Status connected_peers=1 latest_block=0 +2026-03-02T12:33:36.582596Z INFO node{idx=2}: reth_e2e_test_utils::network: Session established with peer: 0x871a1b8b805b16f5d2bf85bbb4f144bb39b7175d725d42e8aa0e7903d936b1d9a0fdf6127430eb80df48fcdd749dd721fd87f1d4ade03e17ce932a79196aade3 +2026-03-02T12:33:36.583248Z INFO node{idx=2}: reth_e2e_test_utils::network: Session established with peer: 0x68d342edd2921250b59d89277d689bdd07e0068ae80a20b75cf9d6c3f56991b0a46b0b717c925efe7d9f87771d6fd89fc099fded52bd6eebd754be8b882fdc1f +2026-03-02T12:34:13.640892Z INFO node{idx=1}: reth::cli: Wrote network peers to file peers_file="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-ExVZKSsZ/known-peers.json" +2026-03-02T12:34:13.640899Z INFO node{idx=2}: reth::cli: Wrote network peers to file peers_file="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-E87A3wU3/known-peers.json" +2026-03-02T12:34:13.640974Z INFO node{idx=0}: reth::cli: Wrote network peers to file peers_file="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-BTYrGfAQ/known-peers.json" diff --git a/scroll-debug-47318.log b/scroll-debug-47318.log new file mode 100644 index 00000000..41c57b8a --- /dev/null +++ b/scroll-debug-47318.log @@ -0,0 +1,676 @@ +2026-03-02T12:43:23.941198Z INFO node{idx=0}: reth::cli: Saving prune config to toml file +2026-03-02T12:43:23.941691Z INFO node{idx=0}: reth::cli: Configuration loaded path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-KUYbLIZB/reth.toml" +2026-03-02T12:43:23.942422Z INFO node{idx=0}: reth::cli: Database opened +2026-03-02T12:43:24.245935Z INFO node{idx=0}: reth::cli: +Pre-merge hard forks (block based): +- Homestead @0 +- Tangerine @0 +- SpuriousDragon @0 +- Byzantium @0 +- Constantinople @0 +- Petersburg @0 +- Istanbul @0 +- Berlin @0 +- London @0 +- Archimedes @0 +- Bernoulli @0 +- Curie @0 + +Post-merge hard forks (timestamp based): +- Shanghai @0 +- Darwin @0 +- DarwinV2 @0 +- Euclid @0 +- EuclidV2 @0 +- Feynman @0 +- Galileo @0 +- GalileoV2 @0 +2026-03-02T12:43:24.248691Z INFO node{idx=0}: reth::cli: Transaction pool initialized +2026-03-02T12:43:24.560959Z INFO node{idx=0}: reth::cli: P2P networking initialized enode=enode://2a854fec1913ab788092e38bc059988bd54406e53074eaf80662b3f243810b82477f0830b7739dd12a550405b721d790b8fdda008a89da71779a0ed90192e8b3@127.0.0.1:52288 +2026-03-02T12:43:24.561933Z INFO node{idx=0}: reth::cli: StaticFileProducer initialized +2026-03-02T12:43:24.562765Z INFO node{idx=0}: reth::cli: Verifying storage consistency. +2026-03-02T12:43:24.563206Z INFO node{idx=0}: reth::cli: Pruner initialized prune_config=PruneConfig { block_interval: 5, segments: PruneModes { sender_recovery: None, transaction_lookup: None, receipts: None, account_history: None, storage_history: None, bodies_history: None, receipts_log_filter: ReceiptsLogPruneConfig({}) } } +2026-03-02T12:43:24.563372Z INFO node{idx=0}: reth::cli: Creating JWT auth secret file path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-KUYbLIZB/jwt.hex" +2026-03-02T12:43:24.564552Z INFO node{idx=0}: reth::cli: Consensus engine initialized +2026-03-02T12:43:24.565074Z INFO node{idx=0}: reth::cli: Engine API handler initialized +2026-03-02T12:43:24.569199Z INFO node{idx=0}: reth::cli: RPC auth server started url=127.0.0.1:52289 +2026-03-02T12:43:24.570829Z INFO node{idx=0}: reth::cli: RPC IPC server started path=/tmp/reth.ipc-ZcdwhuVq +2026-03-02T12:43:24.570874Z INFO node{idx=0}: reth::cli: RPC HTTP server started url=127.0.0.1:52290 +2026-03-02T12:43:24.570927Z INFO node{idx=0}: rollup_node::args: Building rollup node with config: +ScrollRollupNodeConfig { + test: true, + consensus_args: ConsensusArgs { + algorithm: Noop, + authorized_signer: None, + }, + database_args: RollupNodeDatabaseArgs { + rn_db_path: None, + }, + chain_orchestrator_args: ChainOrchestratorArgs { + optimistic_sync_trigger: 100, + chain_buffer_size: 100, + }, + engine_driver_args: EngineDriverArgs { + sync_at_startup: true, + }, + blob_provider_args: BlobProviderArgs { + beacon_node_urls: None, + s3_url: None, + anvil_url: None, + mock: true, + compute_units_per_second: 0, + max_retries: 0, + initial_backoff: 0, + }, + l1_provider_args: L1ProviderArgs { + url: None, + compute_units_per_second: 10000, + max_retries: 10, + initial_backoff: 100, + logs_query_block_range: 500, + cache_max_items: 100, + }, + sequencer_args: SequencerArgs { + sequencer_enabled: true, + auto_start: false, + block_time: 100, + payload_building_duration: 40, + fee_recipient: 0x0000000000000000000000000000000000000000, + l1_message_inclusion_mode: BlockDepth( + 0, + ), + allow_empty_blocks: true, + max_l1_messages: None, + }, + network_args: RollupNodeNetworkArgs { + enable_eth_scroll_wire_bridge: true, + enable_scroll_wire: true, + sequencer_url: None, + signer_address: None, + }, + rpc_args: RpcArgs { + basic_enabled: true, + admin_enabled: true, + }, + signer_args: SignerArgs { + key_file: None, + aws_kms_key_id: None, + private_key: None, + }, + gas_price_oracle_args: RollupNodeGasPriceOracleArgs { + default_suggested_priority_fee: 0, + }, + pprof_args: PprofArgs { + enabled: false, + addr: 0.0.0.0:6868, + default_duration: 30, + }, + database: Some( + Database { + database: Retry { + inner: DatabaseInner { + connection: SqlxSqlitePoolConnection, + write_lock: Mutex { + data: (), + }, + read_locks: Semaphore { + ll_sem: Semaphore { + permits: 5, + }, + resource_span: Span { + name: "runtime.resource", + level: Level( + Trace, + ), + target: "tokio::sync::semaphore", + disabled: true, + module_path: "tokio::sync::semaphore", + line: 461, + file: "/Users/yiweichi/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tokio-1.48.0/src/sync/semaphore.rs", + }, + }, + metrics: DatabaseMetrics, + tmp_dir: None, + }, + max_retries: None, + initial_delay_ms: 50, + exponential_backoff: false, + metrics: RetryMetrics, + }, + metrics: { + RemoveL1BlockInfoLeq: DatabaseOperationMetrics, + SetBatchRevertBlockNumberForBatchRange: DatabaseOperationMetrics, + DeleteL2BlocksGtBlockNumber: DatabaseOperationMetrics, + InsertBlocks: DatabaseOperationMetrics, + DeleteL1MessagesGt: DatabaseOperationMetrics, + FetchAndUpdateUnprocessedFinalizedBatches: DatabaseOperationMetrics, + InsertGenesisBlock: DatabaseOperationMetrics, + UpdateL1MessagesFromL2Blocks: DatabaseOperationMetrics, + GetHighestBlockForBatchIndex: DatabaseOperationMetrics, + InsertSignatures: DatabaseOperationMetrics, + SetProcessedL1BlockNumber: DatabaseOperationMetrics, + DeleteBatchesGtBatchIndex: DatabaseOperationMetrics, + SetLatestL1BlockNumber: DatabaseOperationMetrics, + PurgeL1MessageToL2BlockMappings: DatabaseOperationMetrics, + InsertSignature: DatabaseOperationMetrics, + FetchAndUpdateUnprocessedCommittedBatches: DatabaseOperationMetrics, + GetL2BlockAndBatchInfoByHash: DatabaseOperationMetrics, + UpdateL1MessagesWithL2Block: DatabaseOperationMetrics, + GetFinalizedL1BlockNumber: DatabaseOperationMetrics, + GetLatestL1BlockNumber: DatabaseOperationMetrics, + DeleteBatchFinalizationGtBlockNumber: DatabaseOperationMetrics, + GetNL2BlockDataHint: DatabaseOperationMetrics, + GetLatestSafeL2Info: DatabaseOperationMetrics, + DeleteBatchesGtBlockNumber: DatabaseOperationMetrics, + RemoveL1BlockInfoGt: DatabaseOperationMetrics, + UpdateSkippedL1Messages: DatabaseOperationMetrics, + InsertBatch: DatabaseOperationMetrics, + SetL2HeadBlockNumber: DatabaseOperationMetrics, + InsertBatchConsolidationOutcome: DatabaseOperationMetrics, + GetBatchStatusByHash: DatabaseOperationMetrics, + GetL1BlockInfo: DatabaseOperationMetrics, + GetProcessedL1BlockNumber: DatabaseOperationMetrics, + GetL2HeadBlockNumber: DatabaseOperationMetrics, + SetFinalizedL1BlockNumber: DatabaseOperationMetrics, + GetL2BlockInfoByNumber: DatabaseOperationMetrics, + UpdateBatchStatus: DatabaseOperationMetrics, + InsertL1BlockInfo: DatabaseOperationMetrics, + Unwind: DatabaseOperationMetrics, + GetSignature: DatabaseOperationMetrics, + FinalizeConsolidatedBatches: DatabaseOperationMetrics, + ChangeBatchProcessingToCommittedStatus: DatabaseOperationMetrics, + GetHighestBlockForBatchHash: DatabaseOperationMetrics, + InsertL1Message: DatabaseOperationMetrics, + GetLatestIndexedEventL1BlockNumber: DatabaseOperationMetrics, + InsertBlock: DatabaseOperationMetrics, + GetBatchByIndex: DatabaseOperationMetrics, + DeleteBatchRevertGtBlockNumber: DatabaseOperationMetrics, + GetMaxBlockDataHintBlockNumber: DatabaseOperationMetrics, + PrepareOnStartup: DatabaseOperationMetrics, + DeleteL2BlocksGtBatchIndex: DatabaseOperationMetrics, + GetNL1Messages: DatabaseOperationMetrics, + FinalizeBatchesUpToIndex: DatabaseOperationMetrics, + GetBatchByHash: DatabaseOperationMetrics, + }, + }, + ), +} +2026-03-02T12:43:24.577126Z INFO node{idx=0}: sea_orm_migration::migrator: Applying all pending migrations +2026-03-02T12:43:24.577883Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20220101_000001_create_batch_commit_table' +2026-03-02T12:43:24.579501Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20220101_000001_create_batch_commit_table' has been applied +2026-03-02T12:43:24.579715Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250304_125946_add_l1_msg_table' +2026-03-02T12:43:24.580314Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250304_125946_add_l1_msg_table' has been applied +2026-03-02T12:43:24.580506Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250408_132123_add_header_metadata' +2026-03-02T12:43:24.580660Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250408_132123_add_header_metadata' has been applied +2026-03-02T12:43:24.580855Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250408_150338_load_header_metadata' +2026-03-02T12:43:24.580868Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250408_150338_load_header_metadata' has been applied +2026-03-02T12:43:24.580975Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250411_072004_add_l2_block' +2026-03-02T12:43:24.582094Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250411_072004_add_l2_block' has been applied +2026-03-02T12:43:24.582322Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250616_223947_add_metadata' +2026-03-02T12:43:24.582676Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250616_223947_add_metadata' has been applied +2026-03-02T12:43:24.582789Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250904_175949_block_signature' +2026-03-02T12:43:24.582982Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250904_175949_block_signature' has been applied +2026-03-02T12:43:24.583192Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20251028_110719_add_l1_block_table' +2026-03-02T12:43:24.583360Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20251028_110719_add_l1_block_table' has been applied +2026-03-02T12:43:24.594810Z INFO node{idx=0}: scroll::node::args: Starting engine driver fcs=ForkchoiceState { head: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, safe: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, finalized: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 } } payload_building_duration=40 +2026-03-02T12:43:24.594898Z INFO node{idx=0}: scroll::providers: Running with mock blob provider - all other blob provider configurations are ignored +2026-03-02T12:43:24.596013Z INFO node{idx=0}: reth::cli: Starting consensus engine +2026-03-02T12:43:24.596116Z INFO scroll::derivation_pipeline: Starting derivation pipeline worker +2026-03-02T12:43:24.596611Z INFO node{idx=0}: reth_node_events::node: Forkchoice updated head_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 safe_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 finalized_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 +2026-03-02T12:43:24.601141Z WARN node{idx=1}: reth_node_builder::launch::common: Failed to build global thread pool err=The global thread pool has already been initialized. +2026-03-02T12:43:24.601466Z INFO node{idx=1}: reth::cli: Saving prune config to toml file +2026-03-02T12:43:24.601738Z INFO node{idx=1}: reth::cli: Configuration loaded path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-UXOFiwdf/reth.toml" +2026-03-02T12:43:24.602128Z INFO node{idx=1}: reth::cli: Database opened +2026-03-02T12:43:24.687078Z INFO node{idx=1}: reth::cli: +Pre-merge hard forks (block based): +- Homestead @0 +- Tangerine @0 +- SpuriousDragon @0 +- Byzantium @0 +- Constantinople @0 +- Petersburg @0 +- Istanbul @0 +- Berlin @0 +- London @0 +- Archimedes @0 +- Bernoulli @0 +- Curie @0 + +Post-merge hard forks (timestamp based): +- Shanghai @0 +- Darwin @0 +- DarwinV2 @0 +- Euclid @0 +- EuclidV2 @0 +- Feynman @0 +- Galileo @0 +- GalileoV2 @0 +2026-03-02T12:43:24.687735Z INFO node{idx=1}: reth::cli: Transaction pool initialized +2026-03-02T12:43:24.689263Z INFO node{idx=1}: reth::cli: P2P networking initialized enode=enode://be607dbc7586bc1ed99f31b81bbe1a132c1d46579aefcbfd1c3a5ef1c8517ed666ff81a5b8d7e6582c106517523e5f72b7e76d466415aa45f95327baadd0c214@127.0.0.1:52295 +2026-03-02T12:43:24.689478Z INFO node{idx=1}: reth::cli: StaticFileProducer initialized +2026-03-02T12:43:24.689877Z INFO node{idx=1}: reth::cli: Verifying storage consistency. +2026-03-02T12:43:24.690177Z INFO node{idx=1}: reth::cli: Pruner initialized prune_config=PruneConfig { block_interval: 5, segments: PruneModes { sender_recovery: None, transaction_lookup: None, receipts: None, account_history: None, storage_history: None, bodies_history: None, receipts_log_filter: ReceiptsLogPruneConfig({}) } } +2026-03-02T12:43:24.690328Z INFO node{idx=1}: reth::cli: Creating JWT auth secret file path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-UXOFiwdf/jwt.hex" +2026-03-02T12:43:24.691108Z INFO node{idx=1}: reth::cli: Consensus engine initialized +2026-03-02T12:43:24.691303Z INFO node{idx=1}: reth::cli: Engine API handler initialized +2026-03-02T12:43:24.693805Z INFO node{idx=1}: reth::cli: RPC auth server started url=127.0.0.1:52296 +2026-03-02T12:43:24.695396Z INFO node{idx=1}: reth::cli: RPC IPC server started path=/tmp/reth.ipc-anQ1yfgM +2026-03-02T12:43:24.695406Z INFO node{idx=1}: reth::cli: RPC HTTP server started url=127.0.0.1:52297 +2026-03-02T12:43:24.695432Z INFO node{idx=1}: rollup_node::args: Building rollup node with config: +ScrollRollupNodeConfig { + test: true, + consensus_args: ConsensusArgs { + algorithm: Noop, + authorized_signer: None, + }, + database_args: RollupNodeDatabaseArgs { + rn_db_path: None, + }, + chain_orchestrator_args: ChainOrchestratorArgs { + optimistic_sync_trigger: 100, + chain_buffer_size: 100, + }, + engine_driver_args: EngineDriverArgs { + sync_at_startup: true, + }, + blob_provider_args: BlobProviderArgs { + beacon_node_urls: None, + s3_url: None, + anvil_url: None, + mock: true, + compute_units_per_second: 0, + max_retries: 0, + initial_backoff: 0, + }, + l1_provider_args: L1ProviderArgs { + url: None, + compute_units_per_second: 10000, + max_retries: 10, + initial_backoff: 100, + logs_query_block_range: 500, + cache_max_items: 100, + }, + sequencer_args: SequencerArgs { + sequencer_enabled: false, + auto_start: false, + block_time: 100, + payload_building_duration: 40, + fee_recipient: 0x0000000000000000000000000000000000000000, + l1_message_inclusion_mode: BlockDepth( + 0, + ), + allow_empty_blocks: true, + max_l1_messages: None, + }, + network_args: RollupNodeNetworkArgs { + enable_eth_scroll_wire_bridge: true, + enable_scroll_wire: true, + sequencer_url: None, + signer_address: None, + }, + rpc_args: RpcArgs { + basic_enabled: true, + admin_enabled: true, + }, + signer_args: SignerArgs { + key_file: None, + aws_kms_key_id: None, + private_key: None, + }, + gas_price_oracle_args: RollupNodeGasPriceOracleArgs { + default_suggested_priority_fee: 0, + }, + pprof_args: PprofArgs { + enabled: false, + addr: 0.0.0.0:6868, + default_duration: 30, + }, + database: Some( + Database { + database: Retry { + inner: DatabaseInner { + connection: SqlxSqlitePoolConnection, + write_lock: Mutex { + data: (), + }, + read_locks: Semaphore { + ll_sem: Semaphore { + permits: 5, + }, + resource_span: Span { + name: "runtime.resource", + level: Level( + Trace, + ), + target: "tokio::sync::semaphore", + disabled: true, + module_path: "tokio::sync::semaphore", + line: 461, + file: "/Users/yiweichi/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tokio-1.48.0/src/sync/semaphore.rs", + }, + }, + metrics: DatabaseMetrics, + tmp_dir: None, + }, + max_retries: None, + initial_delay_ms: 50, + exponential_backoff: false, + metrics: RetryMetrics, + }, + metrics: { + DeleteBatchFinalizationGtBlockNumber: DatabaseOperationMetrics, + RemoveL1BlockInfoGt: DatabaseOperationMetrics, + FetchAndUpdateUnprocessedCommittedBatches: DatabaseOperationMetrics, + DeleteBatchesGtBlockNumber: DatabaseOperationMetrics, + Unwind: DatabaseOperationMetrics, + GetBatchByHash: DatabaseOperationMetrics, + GetBatchStatusByHash: DatabaseOperationMetrics, + InsertBlocks: DatabaseOperationMetrics, + GetL2BlockInfoByNumber: DatabaseOperationMetrics, + GetHighestBlockForBatchIndex: DatabaseOperationMetrics, + FinalizeBatchesUpToIndex: DatabaseOperationMetrics, + InsertBatchConsolidationOutcome: DatabaseOperationMetrics, + SetLatestL1BlockNumber: DatabaseOperationMetrics, + UpdateL1MessagesFromL2Blocks: DatabaseOperationMetrics, + SetL2HeadBlockNumber: DatabaseOperationMetrics, + ChangeBatchProcessingToCommittedStatus: DatabaseOperationMetrics, + InsertBatch: DatabaseOperationMetrics, + FetchAndUpdateUnprocessedFinalizedBatches: DatabaseOperationMetrics, + InsertBlock: DatabaseOperationMetrics, + DeleteBatchesGtBatchIndex: DatabaseOperationMetrics, + InsertL1BlockInfo: DatabaseOperationMetrics, + SetBatchRevertBlockNumberForBatchRange: DatabaseOperationMetrics, + FinalizeConsolidatedBatches: DatabaseOperationMetrics, + UpdateL1MessagesWithL2Block: DatabaseOperationMetrics, + InsertSignatures: DatabaseOperationMetrics, + GetLatestL1BlockNumber: DatabaseOperationMetrics, + GetNL2BlockDataHint: DatabaseOperationMetrics, + SetFinalizedL1BlockNumber: DatabaseOperationMetrics, + PurgeL1MessageToL2BlockMappings: DatabaseOperationMetrics, + GetMaxBlockDataHintBlockNumber: DatabaseOperationMetrics, + GetHighestBlockForBatchHash: DatabaseOperationMetrics, + GetL2BlockAndBatchInfoByHash: DatabaseOperationMetrics, + GetBatchByIndex: DatabaseOperationMetrics, + GetL1BlockInfo: DatabaseOperationMetrics, + DeleteBatchRevertGtBlockNumber: DatabaseOperationMetrics, + PrepareOnStartup: DatabaseOperationMetrics, + InsertSignature: DatabaseOperationMetrics, + UpdateSkippedL1Messages: DatabaseOperationMetrics, + GetFinalizedL1BlockNumber: DatabaseOperationMetrics, + GetLatestIndexedEventL1BlockNumber: DatabaseOperationMetrics, + GetSignature: DatabaseOperationMetrics, + SetProcessedL1BlockNumber: DatabaseOperationMetrics, + GetNL1Messages: DatabaseOperationMetrics, + DeleteL2BlocksGtBatchIndex: DatabaseOperationMetrics, + GetLatestSafeL2Info: DatabaseOperationMetrics, + InsertGenesisBlock: DatabaseOperationMetrics, + GetProcessedL1BlockNumber: DatabaseOperationMetrics, + GetL2HeadBlockNumber: DatabaseOperationMetrics, + DeleteL2BlocksGtBlockNumber: DatabaseOperationMetrics, + InsertL1Message: DatabaseOperationMetrics, + RemoveL1BlockInfoLeq: DatabaseOperationMetrics, + UpdateBatchStatus: DatabaseOperationMetrics, + DeleteL1MessagesGt: DatabaseOperationMetrics, + }, + }, + ), +} +2026-03-02T12:43:24.696340Z INFO node{idx=1}: sea_orm_migration::migrator: Applying all pending migrations +2026-03-02T12:43:24.696770Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20220101_000001_create_batch_commit_table' +2026-03-02T12:43:24.698071Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20220101_000001_create_batch_commit_table' has been applied +2026-03-02T12:43:24.698239Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250304_125946_add_l1_msg_table' +2026-03-02T12:43:24.698820Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250304_125946_add_l1_msg_table' has been applied +2026-03-02T12:43:24.699019Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250408_132123_add_header_metadata' +2026-03-02T12:43:24.699158Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250408_132123_add_header_metadata' has been applied +2026-03-02T12:43:24.699328Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250408_150338_load_header_metadata' +2026-03-02T12:43:24.699337Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250408_150338_load_header_metadata' has been applied +2026-03-02T12:43:24.699413Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250411_072004_add_l2_block' +2026-03-02T12:43:24.700540Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250411_072004_add_l2_block' has been applied +2026-03-02T12:43:24.700745Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250616_223947_add_metadata' +2026-03-02T12:43:24.701060Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250616_223947_add_metadata' has been applied +2026-03-02T12:43:24.701148Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250904_175949_block_signature' +2026-03-02T12:43:24.701315Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250904_175949_block_signature' has been applied +2026-03-02T12:43:24.701515Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20251028_110719_add_l1_block_table' +2026-03-02T12:43:24.701662Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20251028_110719_add_l1_block_table' has been applied +2026-03-02T12:43:24.708674Z INFO node{idx=1}: scroll::node::args: Starting engine driver fcs=ForkchoiceState { head: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, safe: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, finalized: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 } } payload_building_duration=40 +2026-03-02T12:43:24.708704Z INFO node{idx=1}: scroll::providers: Running with mock blob provider - all other blob provider configurations are ignored +2026-03-02T12:43:24.709503Z INFO node{idx=1}: reth::cli: Starting consensus engine +2026-03-02T12:43:24.709547Z INFO scroll::derivation_pipeline: Starting derivation pipeline worker +2026-03-02T12:43:24.709794Z INFO node{idx=1}: reth_node_events::node: Forkchoice updated head_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 safe_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 finalized_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 +2026-03-02T12:43:27.566777Z INFO node{idx=0}: reth::cli: Status connected_peers=0 latest_block=0 +2026-03-02T12:43:27.693300Z INFO node{idx=1}: reth::cli: Status connected_peers=0 latest_block=0 +2026-03-02T12:43:29.572654Z INFO node{idx=1}: reth_e2e_test_utils::network: Session established with peer: 0x2a854fec1913ab788092e38bc059988bd54406e53074eaf80662b3f243810b82477f0830b7739dd12a550405b721d790b8fdda008a89da71779a0ed90192e8b3 +2026-03-02T12:43:29.573248Z INFO node{idx=1}: reth_e2e_test_utils::network: Session established with peer: 0xbe607dbc7586bc1ed99f31b81bbe1a132c1d46579aefcbfd1c3a5ef1c8517ed666ff81a5b8d7e6582c106517523e5f72b7e76d466415aa45f95327baadd0c214 +2026-03-02T12:43:29.580242Z WARN node{idx=2}: reth_node_builder::launch::common: Failed to build global thread pool err=The global thread pool has already been initialized. +2026-03-02T12:43:29.580775Z INFO node{idx=2}: reth::cli: Saving prune config to toml file +2026-03-02T12:43:29.581176Z INFO node{idx=2}: reth::cli: Configuration loaded path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-43IjIPFl/reth.toml" +2026-03-02T12:43:29.581597Z INFO node{idx=2}: reth::cli: Database opened +2026-03-02T12:43:29.674645Z INFO node{idx=2}: reth::cli: +Pre-merge hard forks (block based): +- Homestead @0 +- Tangerine @0 +- SpuriousDragon @0 +- Byzantium @0 +- Constantinople @0 +- Petersburg @0 +- Istanbul @0 +- Berlin @0 +- London @0 +- Archimedes @0 +- Bernoulli @0 +- Curie @0 + +Post-merge hard forks (timestamp based): +- Shanghai @0 +- Darwin @0 +- DarwinV2 @0 +- Euclid @0 +- EuclidV2 @0 +- Feynman @0 +- Galileo @0 +- GalileoV2 @0 +2026-03-02T12:43:29.675497Z INFO node{idx=2}: reth::cli: Transaction pool initialized +2026-03-02T12:43:29.677419Z INFO node{idx=2}: reth::cli: P2P networking initialized enode=enode://985963d1985d169e73e741aaa7af71eacad84c47e1247cabd01fe37de11086d5bbfb2301a1c49885981111067d9a950d180a99c2d38869cbd496306d7da9a2da@127.0.0.1:52305 +2026-03-02T12:43:29.677705Z INFO node{idx=2}: reth::cli: StaticFileProducer initialized +2026-03-02T12:43:29.678221Z INFO node{idx=2}: reth::cli: Verifying storage consistency. +2026-03-02T12:43:29.678596Z INFO node{idx=2}: reth::cli: Pruner initialized prune_config=PruneConfig { block_interval: 5, segments: PruneModes { sender_recovery: None, transaction_lookup: None, receipts: None, account_history: None, storage_history: None, bodies_history: None, receipts_log_filter: ReceiptsLogPruneConfig({}) } } +2026-03-02T12:43:29.678791Z INFO node{idx=2}: reth::cli: Creating JWT auth secret file path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-43IjIPFl/jwt.hex" +2026-03-02T12:43:29.679823Z INFO node{idx=2}: reth::cli: Consensus engine initialized +2026-03-02T12:43:29.680040Z INFO node{idx=2}: reth::cli: Engine API handler initialized +2026-03-02T12:43:29.682876Z INFO node{idx=2}: reth::cli: RPC auth server started url=127.0.0.1:52306 +2026-03-02T12:43:29.684728Z INFO node{idx=2}: reth::cli: RPC IPC server started path=/tmp/reth.ipc-joNGsKf3 +2026-03-02T12:43:29.684740Z INFO node{idx=2}: reth::cli: RPC HTTP server started url=127.0.0.1:52307 +2026-03-02T12:43:29.684769Z INFO node{idx=2}: rollup_node::args: Building rollup node with config: +ScrollRollupNodeConfig { + test: true, + consensus_args: ConsensusArgs { + algorithm: Noop, + authorized_signer: None, + }, + database_args: RollupNodeDatabaseArgs { + rn_db_path: None, + }, + chain_orchestrator_args: ChainOrchestratorArgs { + optimistic_sync_trigger: 100, + chain_buffer_size: 100, + }, + engine_driver_args: EngineDriverArgs { + sync_at_startup: true, + }, + blob_provider_args: BlobProviderArgs { + beacon_node_urls: None, + s3_url: None, + anvil_url: None, + mock: true, + compute_units_per_second: 0, + max_retries: 0, + initial_backoff: 0, + }, + l1_provider_args: L1ProviderArgs { + url: None, + compute_units_per_second: 10000, + max_retries: 10, + initial_backoff: 100, + logs_query_block_range: 500, + cache_max_items: 100, + }, + sequencer_args: SequencerArgs { + sequencer_enabled: false, + auto_start: false, + block_time: 100, + payload_building_duration: 40, + fee_recipient: 0x0000000000000000000000000000000000000000, + l1_message_inclusion_mode: BlockDepth( + 0, + ), + allow_empty_blocks: true, + max_l1_messages: None, + }, + network_args: RollupNodeNetworkArgs { + enable_eth_scroll_wire_bridge: true, + enable_scroll_wire: true, + sequencer_url: None, + signer_address: None, + }, + rpc_args: RpcArgs { + basic_enabled: true, + admin_enabled: true, + }, + signer_args: SignerArgs { + key_file: None, + aws_kms_key_id: None, + private_key: None, + }, + gas_price_oracle_args: RollupNodeGasPriceOracleArgs { + default_suggested_priority_fee: 0, + }, + pprof_args: PprofArgs { + enabled: false, + addr: 0.0.0.0:6868, + default_duration: 30, + }, + database: Some( + Database { + database: Retry { + inner: DatabaseInner { + connection: SqlxSqlitePoolConnection, + write_lock: Mutex { + data: (), + }, + read_locks: Semaphore { + ll_sem: Semaphore { + permits: 5, + }, + resource_span: Span { + name: "runtime.resource", + level: Level( + Trace, + ), + target: "tokio::sync::semaphore", + disabled: true, + module_path: "tokio::sync::semaphore", + line: 461, + file: "/Users/yiweichi/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tokio-1.48.0/src/sync/semaphore.rs", + }, + }, + metrics: DatabaseMetrics, + tmp_dir: None, + }, + max_retries: None, + initial_delay_ms: 50, + exponential_backoff: false, + metrics: RetryMetrics, + }, + metrics: { + SetProcessedL1BlockNumber: DatabaseOperationMetrics, + UpdateL1MessagesFromL2Blocks: DatabaseOperationMetrics, + FetchAndUpdateUnprocessedFinalizedBatches: DatabaseOperationMetrics, + Unwind: DatabaseOperationMetrics, + GetNL2BlockDataHint: DatabaseOperationMetrics, + GetHighestBlockForBatchIndex: DatabaseOperationMetrics, + GetNL1Messages: DatabaseOperationMetrics, + DeleteL2BlocksGtBatchIndex: DatabaseOperationMetrics, + GetL1BlockInfo: DatabaseOperationMetrics, + SetBatchRevertBlockNumberForBatchRange: DatabaseOperationMetrics, + InsertBatchConsolidationOutcome: DatabaseOperationMetrics, + UpdateL1MessagesWithL2Block: DatabaseOperationMetrics, + GetBatchByHash: DatabaseOperationMetrics, + FetchAndUpdateUnprocessedCommittedBatches: DatabaseOperationMetrics, + GetMaxBlockDataHintBlockNumber: DatabaseOperationMetrics, + DeleteL1MessagesGt: DatabaseOperationMetrics, + UpdateBatchStatus: DatabaseOperationMetrics, + RemoveL1BlockInfoGt: DatabaseOperationMetrics, + GetLatestL1BlockNumber: DatabaseOperationMetrics, + InsertBlock: DatabaseOperationMetrics, + GetL2HeadBlockNumber: DatabaseOperationMetrics, + GetLatestSafeL2Info: DatabaseOperationMetrics, + GetHighestBlockForBatchHash: DatabaseOperationMetrics, + GetSignature: DatabaseOperationMetrics, + InsertGenesisBlock: DatabaseOperationMetrics, + InsertBlocks: DatabaseOperationMetrics, + DeleteBatchesGtBatchIndex: DatabaseOperationMetrics, + SetLatestL1BlockNumber: DatabaseOperationMetrics, + InsertL1Message: DatabaseOperationMetrics, + UpdateSkippedL1Messages: DatabaseOperationMetrics, + InsertSignature: DatabaseOperationMetrics, + DeleteBatchFinalizationGtBlockNumber: DatabaseOperationMetrics, + GetBatchStatusByHash: DatabaseOperationMetrics, + GetFinalizedL1BlockNumber: DatabaseOperationMetrics, + GetBatchByIndex: DatabaseOperationMetrics, + FinalizeBatchesUpToIndex: DatabaseOperationMetrics, + RemoveL1BlockInfoLeq: DatabaseOperationMetrics, + ChangeBatchProcessingToCommittedStatus: DatabaseOperationMetrics, + SetL2HeadBlockNumber: DatabaseOperationMetrics, + InsertL1BlockInfo: DatabaseOperationMetrics, + PrepareOnStartup: DatabaseOperationMetrics, + DeleteL2BlocksGtBlockNumber: DatabaseOperationMetrics, + InsertSignatures: DatabaseOperationMetrics, + InsertBatch: DatabaseOperationMetrics, + DeleteBatchesGtBlockNumber: DatabaseOperationMetrics, + GetLatestIndexedEventL1BlockNumber: DatabaseOperationMetrics, + GetProcessedL1BlockNumber: DatabaseOperationMetrics, + GetL2BlockAndBatchInfoByHash: DatabaseOperationMetrics, + PurgeL1MessageToL2BlockMappings: DatabaseOperationMetrics, + GetL2BlockInfoByNumber: DatabaseOperationMetrics, + FinalizeConsolidatedBatches: DatabaseOperationMetrics, + DeleteBatchRevertGtBlockNumber: DatabaseOperationMetrics, + SetFinalizedL1BlockNumber: DatabaseOperationMetrics, + }, + }, + ), +} +2026-03-02T12:43:29.685918Z INFO node{idx=2}: sea_orm_migration::migrator: Applying all pending migrations +2026-03-02T12:43:29.686437Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20220101_000001_create_batch_commit_table' +2026-03-02T12:43:29.687907Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20220101_000001_create_batch_commit_table' has been applied +2026-03-02T12:43:29.688169Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250304_125946_add_l1_msg_table' +2026-03-02T12:43:29.688907Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250304_125946_add_l1_msg_table' has been applied +2026-03-02T12:43:29.689124Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250408_132123_add_header_metadata' +2026-03-02T12:43:29.689278Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250408_132123_add_header_metadata' has been applied +2026-03-02T12:43:29.689477Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250408_150338_load_header_metadata' +2026-03-02T12:43:29.689487Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250408_150338_load_header_metadata' has been applied +2026-03-02T12:43:29.689607Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250411_072004_add_l2_block' +2026-03-02T12:43:29.691046Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250411_072004_add_l2_block' has been applied +2026-03-02T12:43:29.691309Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250616_223947_add_metadata' +2026-03-02T12:43:29.691681Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250616_223947_add_metadata' has been applied +2026-03-02T12:43:29.691777Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250904_175949_block_signature' +2026-03-02T12:43:29.692007Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250904_175949_block_signature' has been applied +2026-03-02T12:43:29.692244Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20251028_110719_add_l1_block_table' +2026-03-02T12:43:29.692428Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20251028_110719_add_l1_block_table' has been applied +2026-03-02T12:43:29.700481Z INFO node{idx=2}: scroll::node::args: Starting engine driver fcs=ForkchoiceState { head: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, safe: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, finalized: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 } } payload_building_duration=40 +2026-03-02T12:43:29.700532Z INFO node{idx=2}: scroll::providers: Running with mock blob provider - all other blob provider configurations are ignored +2026-03-02T12:43:29.701494Z INFO node{idx=2}: reth::cli: Starting consensus engine +2026-03-02T12:43:29.701557Z INFO scroll::derivation_pipeline: Starting derivation pipeline worker +2026-03-02T12:43:29.701941Z INFO node{idx=2}: reth_node_events::node: Forkchoice updated head_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 safe_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 finalized_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 +2026-03-02T12:43:32.682331Z INFO node{idx=2}: reth::cli: Status connected_peers=0 latest_block=0 +2026-03-02T12:43:34.696510Z INFO node{idx=2}: reth_e2e_test_utils::network: Session established with peer: 0xbe607dbc7586bc1ed99f31b81bbe1a132c1d46579aefcbfd1c3a5ef1c8517ed666ff81a5b8d7e6582c106517523e5f72b7e76d466415aa45f95327baadd0c214 +2026-03-02T12:43:34.696936Z INFO node{idx=2}: reth_e2e_test_utils::network: Session established with peer: 0x985963d1985d169e73e741aaa7af71eacad84c47e1247cabd01fe37de11086d5bbfb2301a1c49885981111067d9a950d180a99c2d38869cbd496306d7da9a2da +2026-03-02T12:43:39.687164Z INFO node{idx=2}: reth_e2e_test_utils::network: Session established with peer: 0x985963d1985d169e73e741aaa7af71eacad84c47e1247cabd01fe37de11086d5bbfb2301a1c49885981111067d9a950d180a99c2d38869cbd496306d7da9a2da +2026-03-02T12:43:39.687776Z INFO node{idx=2}: reth_e2e_test_utils::network: Session established with peer: 0x2a854fec1913ab788092e38bc059988bd54406e53074eaf80662b3f243810b82477f0830b7739dd12a550405b721d790b8fdda008a89da71779a0ed90192e8b3 +2026-03-02T12:44:42.565673Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-03-02T12:44:42.692899Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-03-02T12:44:47.681405Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-03-02T12:45:57.565270Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-03-02T12:45:57.691842Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-03-02T12:46:02.679649Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-03-02T12:46:59.731446Z INFO node{idx=0}: reth::cli: Wrote network peers to file peers_file="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-KUYbLIZB/known-peers.json" +2026-03-02T12:46:59.733156Z INFO node{idx=1}: reth::cli: Wrote network peers to file peers_file="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-UXOFiwdf/known-peers.json" diff --git a/scroll-debug-51668.log b/scroll-debug-51668.log new file mode 100644 index 00000000..e69de29b diff --git a/scroll-debug-93283.log b/scroll-debug-93283.log new file mode 100644 index 00000000..48bbb5da --- /dev/null +++ b/scroll-debug-93283.log @@ -0,0 +1,862 @@ +2026-02-28T21:43:50.406970Z INFO node{idx=0}: reth::cli: Saving prune config to toml file +2026-02-28T21:43:50.407491Z INFO node{idx=0}: reth::cli: Configuration loaded path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-iZaMJRZv/reth.toml" +2026-02-28T21:43:50.408368Z INFO node{idx=0}: reth::cli: Database opened +2026-02-28T21:43:50.709233Z INFO node{idx=0}: reth::cli: +Pre-merge hard forks (block based): +- Homestead @0 +- Tangerine @0 +- SpuriousDragon @0 +- Byzantium @0 +- Constantinople @0 +- Petersburg @0 +- Istanbul @0 +- Berlin @0 +- London @0 +- Archimedes @0 +- Bernoulli @0 +- Curie @0 + +Post-merge hard forks (timestamp based): +- Shanghai @0 +- Darwin @0 +- DarwinV2 @0 +- Euclid @0 +- EuclidV2 @0 +- Feynman @0 +- Galileo @0 +- GalileoV2 @0 +2026-02-28T21:43:50.711367Z INFO node{idx=0}: reth::cli: Transaction pool initialized +2026-02-28T21:43:51.042445Z INFO node{idx=0}: reth::cli: P2P networking initialized enode=enode://2c7946ba72f3c7890775cb525fd39dea7e107cdbe4c34d70909b19aeecf2866b48271d2c07b28d8e9420b954e40a137d66ccefbf3f8253a623a64dbf8c236b9d@127.0.0.1:54050 +2026-02-28T21:43:51.043328Z INFO node{idx=0}: reth::cli: StaticFileProducer initialized +2026-02-28T21:43:51.044182Z INFO node{idx=0}: reth::cli: Verifying storage consistency. +2026-02-28T21:43:51.044675Z INFO node{idx=0}: reth::cli: Pruner initialized prune_config=PruneConfig { block_interval: 5, segments: PruneModes { sender_recovery: None, transaction_lookup: None, receipts: None, account_history: None, storage_history: None, bodies_history: None, receipts_log_filter: ReceiptsLogPruneConfig({}) } } +2026-02-28T21:43:51.044846Z INFO node{idx=0}: reth::cli: Creating JWT auth secret file path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-iZaMJRZv/jwt.hex" +2026-02-28T21:43:51.046173Z INFO node{idx=0}: reth::cli: Consensus engine initialized +2026-02-28T21:43:51.046732Z INFO node{idx=0}: reth::cli: Engine API handler initialized +2026-02-28T21:43:51.051093Z INFO node{idx=0}: reth::cli: RPC auth server started url=127.0.0.1:54051 +2026-02-28T21:43:51.052733Z INFO node{idx=0}: reth::cli: RPC IPC server started path=/tmp/reth.ipc-XtAHMyDd +2026-02-28T21:43:51.052777Z INFO node{idx=0}: reth::cli: RPC HTTP server started url=127.0.0.1:54052 +2026-02-28T21:43:51.052844Z INFO node{idx=0}: rollup_node::args: Building rollup node with config: +ScrollRollupNodeConfig { + test: true, + consensus_args: ConsensusArgs { + algorithm: Noop, + authorized_signer: None, + }, + database_args: RollupNodeDatabaseArgs { + rn_db_path: None, + }, + chain_orchestrator_args: ChainOrchestratorArgs { + optimistic_sync_trigger: 100, + chain_buffer_size: 100, + }, + engine_driver_args: EngineDriverArgs { + sync_at_startup: true, + }, + blob_provider_args: BlobProviderArgs { + beacon_node_urls: None, + s3_url: None, + anvil_url: None, + mock: true, + compute_units_per_second: 0, + max_retries: 0, + initial_backoff: 0, + }, + l1_provider_args: L1ProviderArgs { + url: None, + compute_units_per_second: 10000, + max_retries: 10, + initial_backoff: 100, + logs_query_block_range: 500, + cache_max_items: 100, + }, + sequencer_args: SequencerArgs { + sequencer_enabled: true, + auto_start: false, + block_time: 100, + payload_building_duration: 40, + fee_recipient: 0x0000000000000000000000000000000000000000, + l1_message_inclusion_mode: BlockDepth( + 0, + ), + allow_empty_blocks: true, + max_l1_messages: None, + }, + network_args: RollupNodeNetworkArgs { + enable_eth_scroll_wire_bridge: true, + enable_scroll_wire: true, + sequencer_url: None, + signer_address: None, + }, + rpc_args: RpcArgs { + basic_enabled: true, + admin_enabled: true, + }, + signer_args: SignerArgs { + key_file: None, + aws_kms_key_id: None, + private_key: None, + }, + gas_price_oracle_args: RollupNodeGasPriceOracleArgs { + default_suggested_priority_fee: 0, + }, + pprof_args: PprofArgs { + enabled: false, + addr: 0.0.0.0:6868, + default_duration: 30, + }, + database: Some( + Database { + database: Retry { + inner: DatabaseInner { + connection: SqlxSqlitePoolConnection, + write_lock: Mutex { + data: (), + }, + read_locks: Semaphore { + ll_sem: Semaphore { + permits: 5, + }, + resource_span: Span { + name: "runtime.resource", + level: Level( + Trace, + ), + target: "tokio::sync::semaphore", + disabled: true, + module_path: "tokio::sync::semaphore", + line: 461, + file: "/Users/yiweichi/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tokio-1.48.0/src/sync/semaphore.rs", + }, + }, + metrics: DatabaseMetrics, + tmp_dir: None, + }, + max_retries: None, + initial_delay_ms: 50, + exponential_backoff: false, + metrics: RetryMetrics, + }, + metrics: { + SetLatestL1BlockNumber: DatabaseOperationMetrics, + InsertSignature: DatabaseOperationMetrics, + DeleteBatchesGtBlockNumber: DatabaseOperationMetrics, + GetL2BlockAndBatchInfoByHash: DatabaseOperationMetrics, + GetLatestSafeL2Info: DatabaseOperationMetrics, + RemoveL1BlockInfoLeq: DatabaseOperationMetrics, + InsertL1Message: DatabaseOperationMetrics, + DeleteL2BlocksGtBlockNumber: DatabaseOperationMetrics, + GetBatchByIndex: DatabaseOperationMetrics, + GetBatchByHash: DatabaseOperationMetrics, + UpdateL1MessagesFromL2Blocks: DatabaseOperationMetrics, + GetL2BlockInfoByNumber: DatabaseOperationMetrics, + UpdateL1MessagesWithL2Block: DatabaseOperationMetrics, + GetLatestL1BlockNumber: DatabaseOperationMetrics, + PrepareOnStartup: DatabaseOperationMetrics, + GetProcessedL1BlockNumber: DatabaseOperationMetrics, + FinalizeConsolidatedBatches: DatabaseOperationMetrics, + InsertBatch: DatabaseOperationMetrics, + DeleteBatchesGtBatchIndex: DatabaseOperationMetrics, + ChangeBatchProcessingToCommittedStatus: DatabaseOperationMetrics, + GetLatestIndexedEventL1BlockNumber: DatabaseOperationMetrics, + SetBatchRevertBlockNumberForBatchRange: DatabaseOperationMetrics, + RemoveL1BlockInfoGt: DatabaseOperationMetrics, + GetNL2BlockDataHint: DatabaseOperationMetrics, + InsertBlocks: DatabaseOperationMetrics, + GetHighestBlockForBatchHash: DatabaseOperationMetrics, + GetHighestBlockForBatchIndex: DatabaseOperationMetrics, + FetchAndUpdateUnprocessedFinalizedBatches: DatabaseOperationMetrics, + DeleteBatchFinalizationGtBlockNumber: DatabaseOperationMetrics, + GetBatchStatusByHash: DatabaseOperationMetrics, + SetL2HeadBlockNumber: DatabaseOperationMetrics, + GetFinalizedL1BlockNumber: DatabaseOperationMetrics, + GetL2HeadBlockNumber: DatabaseOperationMetrics, + GetMaxBlockDataHintBlockNumber: DatabaseOperationMetrics, + DeleteL1MessagesGt: DatabaseOperationMetrics, + SetFinalizedL1BlockNumber: DatabaseOperationMetrics, + FinalizeBatchesUpToIndex: DatabaseOperationMetrics, + SetProcessedL1BlockNumber: DatabaseOperationMetrics, + DeleteBatchRevertGtBlockNumber: DatabaseOperationMetrics, + UpdateSkippedL1Messages: DatabaseOperationMetrics, + InsertBlock: DatabaseOperationMetrics, + InsertGenesisBlock: DatabaseOperationMetrics, + InsertBatchConsolidationOutcome: DatabaseOperationMetrics, + Unwind: DatabaseOperationMetrics, + PurgeL1MessageToL2BlockMappings: DatabaseOperationMetrics, + InsertSignatures: DatabaseOperationMetrics, + DeleteL2BlocksGtBatchIndex: DatabaseOperationMetrics, + GetL1BlockInfo: DatabaseOperationMetrics, + GetNL1Messages: DatabaseOperationMetrics, + FetchAndUpdateUnprocessedCommittedBatches: DatabaseOperationMetrics, + InsertL1BlockInfo: DatabaseOperationMetrics, + UpdateBatchStatus: DatabaseOperationMetrics, + GetSignature: DatabaseOperationMetrics, + }, + }, + ), +} +2026-02-28T21:43:51.064554Z INFO node{idx=0}: sea_orm_migration::migrator: Applying all pending migrations +2026-02-28T21:43:51.065361Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20220101_000001_create_batch_commit_table' +2026-02-28T21:43:51.067186Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20220101_000001_create_batch_commit_table' has been applied +2026-02-28T21:43:51.067516Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250304_125946_add_l1_msg_table' +2026-02-28T21:43:51.068230Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250304_125946_add_l1_msg_table' has been applied +2026-02-28T21:43:51.068443Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250408_132123_add_header_metadata' +2026-02-28T21:43:51.068589Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250408_132123_add_header_metadata' has been applied +2026-02-28T21:43:51.068836Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250408_150338_load_header_metadata' +2026-02-28T21:43:51.068857Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250408_150338_load_header_metadata' has been applied +2026-02-28T21:43:51.068947Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250411_072004_add_l2_block' +2026-02-28T21:43:51.070227Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250411_072004_add_l2_block' has been applied +2026-02-28T21:43:51.070456Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250616_223947_add_metadata' +2026-02-28T21:43:51.070832Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250616_223947_add_metadata' has been applied +2026-02-28T21:43:51.070949Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250904_175949_block_signature' +2026-02-28T21:43:51.071137Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250904_175949_block_signature' has been applied +2026-02-28T21:43:51.071353Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20251028_110719_add_l1_block_table' +2026-02-28T21:43:51.071539Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20251028_110719_add_l1_block_table' has been applied +2026-02-28T21:43:51.084817Z INFO node{idx=0}: scroll::node::args: Starting engine driver fcs=ForkchoiceState { head: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, safe: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, finalized: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 } } payload_building_duration=40 +2026-02-28T21:43:51.084930Z INFO node{idx=0}: scroll::providers: Running with mock blob provider - all other blob provider configurations are ignored +2026-02-28T21:43:51.086201Z INFO scroll::derivation_pipeline: Starting derivation pipeline worker +2026-02-28T21:43:51.086241Z INFO node{idx=0}: reth::cli: Starting consensus engine +2026-02-28T21:43:51.086783Z INFO node{idx=0}: reth_node_events::node: Forkchoice updated head_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 safe_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 finalized_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 +2026-02-28T21:43:51.091336Z WARN node{idx=1}: reth_node_builder::launch::common: Failed to build global thread pool err=The global thread pool has already been initialized. +2026-02-28T21:43:51.091716Z INFO node{idx=1}: reth::cli: Saving prune config to toml file +2026-02-28T21:43:51.092009Z INFO node{idx=1}: reth::cli: Configuration loaded path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-cUHZXmTP/reth.toml" +2026-02-28T21:43:51.092404Z INFO node{idx=1}: reth::cli: Database opened +2026-02-28T21:43:51.191253Z INFO node{idx=1}: reth::cli: +Pre-merge hard forks (block based): +- Homestead @0 +- Tangerine @0 +- SpuriousDragon @0 +- Byzantium @0 +- Constantinople @0 +- Petersburg @0 +- Istanbul @0 +- Berlin @0 +- London @0 +- Archimedes @0 +- Bernoulli @0 +- Curie @0 + +Post-merge hard forks (timestamp based): +- Shanghai @0 +- Darwin @0 +- DarwinV2 @0 +- Euclid @0 +- EuclidV2 @0 +- Feynman @0 +- Galileo @0 +- GalileoV2 @0 +2026-02-28T21:43:51.192016Z INFO node{idx=1}: reth::cli: Transaction pool initialized +2026-02-28T21:43:51.193456Z INFO node{idx=1}: reth::cli: P2P networking initialized enode=enode://cfa09a330a1c50e7327a855a0a5074aa1591f3937c9d326d6d9d9e069a841edcedce392343a810c02306a28f0246202a5606f76ef740ee61259c3985e14bc7ed@127.0.0.1:54057 +2026-02-28T21:43:51.193668Z INFO node{idx=1}: reth::cli: StaticFileProducer initialized +2026-02-28T21:43:51.194044Z INFO node{idx=1}: reth::cli: Verifying storage consistency. +2026-02-28T21:43:51.194351Z INFO node{idx=1}: reth::cli: Pruner initialized prune_config=PruneConfig { block_interval: 5, segments: PruneModes { sender_recovery: None, transaction_lookup: None, receipts: None, account_history: None, storage_history: None, bodies_history: None, receipts_log_filter: ReceiptsLogPruneConfig({}) } } +2026-02-28T21:43:51.194465Z INFO node{idx=1}: reth::cli: Creating JWT auth secret file path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-cUHZXmTP/jwt.hex" +2026-02-28T21:43:51.195206Z INFO node{idx=1}: reth::cli: Consensus engine initialized +2026-02-28T21:43:51.195371Z INFO node{idx=1}: reth::cli: Engine API handler initialized +2026-02-28T21:43:51.197639Z INFO node{idx=1}: reth::cli: RPC auth server started url=127.0.0.1:54058 +2026-02-28T21:43:51.199146Z INFO node{idx=1}: reth::cli: RPC IPC server started path=/tmp/reth.ipc-zgzTRK9I +2026-02-28T21:43:51.199156Z INFO node{idx=1}: reth::cli: RPC HTTP server started url=127.0.0.1:54059 +2026-02-28T21:43:51.199182Z INFO node{idx=1}: rollup_node::args: Building rollup node with config: +ScrollRollupNodeConfig { + test: true, + consensus_args: ConsensusArgs { + algorithm: Noop, + authorized_signer: None, + }, + database_args: RollupNodeDatabaseArgs { + rn_db_path: None, + }, + chain_orchestrator_args: ChainOrchestratorArgs { + optimistic_sync_trigger: 100, + chain_buffer_size: 100, + }, + engine_driver_args: EngineDriverArgs { + sync_at_startup: true, + }, + blob_provider_args: BlobProviderArgs { + beacon_node_urls: None, + s3_url: None, + anvil_url: None, + mock: true, + compute_units_per_second: 0, + max_retries: 0, + initial_backoff: 0, + }, + l1_provider_args: L1ProviderArgs { + url: None, + compute_units_per_second: 10000, + max_retries: 10, + initial_backoff: 100, + logs_query_block_range: 500, + cache_max_items: 100, + }, + sequencer_args: SequencerArgs { + sequencer_enabled: false, + auto_start: false, + block_time: 100, + payload_building_duration: 40, + fee_recipient: 0x0000000000000000000000000000000000000000, + l1_message_inclusion_mode: BlockDepth( + 0, + ), + allow_empty_blocks: true, + max_l1_messages: None, + }, + network_args: RollupNodeNetworkArgs { + enable_eth_scroll_wire_bridge: true, + enable_scroll_wire: true, + sequencer_url: None, + signer_address: None, + }, + rpc_args: RpcArgs { + basic_enabled: true, + admin_enabled: true, + }, + signer_args: SignerArgs { + key_file: None, + aws_kms_key_id: None, + private_key: None, + }, + gas_price_oracle_args: RollupNodeGasPriceOracleArgs { + default_suggested_priority_fee: 0, + }, + pprof_args: PprofArgs { + enabled: false, + addr: 0.0.0.0:6868, + default_duration: 30, + }, + database: Some( + Database { + database: Retry { + inner: DatabaseInner { + connection: SqlxSqlitePoolConnection, + write_lock: Mutex { + data: (), + }, + read_locks: Semaphore { + ll_sem: Semaphore { + permits: 5, + }, + resource_span: Span { + name: "runtime.resource", + level: Level( + Trace, + ), + target: "tokio::sync::semaphore", + disabled: true, + module_path: "tokio::sync::semaphore", + line: 461, + file: "/Users/yiweichi/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tokio-1.48.0/src/sync/semaphore.rs", + }, + }, + metrics: DatabaseMetrics, + tmp_dir: None, + }, + max_retries: None, + initial_delay_ms: 50, + exponential_backoff: false, + metrics: RetryMetrics, + }, + metrics: { + RemoveL1BlockInfoGt: DatabaseOperationMetrics, + InsertBatchConsolidationOutcome: DatabaseOperationMetrics, + DeleteBatchFinalizationGtBlockNumber: DatabaseOperationMetrics, + ChangeBatchProcessingToCommittedStatus: DatabaseOperationMetrics, + GetBatchByHash: DatabaseOperationMetrics, + GetLatestIndexedEventL1BlockNumber: DatabaseOperationMetrics, + GetFinalizedL1BlockNumber: DatabaseOperationMetrics, + FetchAndUpdateUnprocessedCommittedBatches: DatabaseOperationMetrics, + GetL1BlockInfo: DatabaseOperationMetrics, + GetLatestL1BlockNumber: DatabaseOperationMetrics, + GetNL1Messages: DatabaseOperationMetrics, + PrepareOnStartup: DatabaseOperationMetrics, + GetLatestSafeL2Info: DatabaseOperationMetrics, + DeleteBatchRevertGtBlockNumber: DatabaseOperationMetrics, + GetL2BlockAndBatchInfoByHash: DatabaseOperationMetrics, + GetMaxBlockDataHintBlockNumber: DatabaseOperationMetrics, + RemoveL1BlockInfoLeq: DatabaseOperationMetrics, + SetL2HeadBlockNumber: DatabaseOperationMetrics, + SetLatestL1BlockNumber: DatabaseOperationMetrics, + DeleteL2BlocksGtBatchIndex: DatabaseOperationMetrics, + InsertL1BlockInfo: DatabaseOperationMetrics, + InsertBlocks: DatabaseOperationMetrics, + GetProcessedL1BlockNumber: DatabaseOperationMetrics, + GetL2HeadBlockNumber: DatabaseOperationMetrics, + PurgeL1MessageToL2BlockMappings: DatabaseOperationMetrics, + GetL2BlockInfoByNumber: DatabaseOperationMetrics, + GetHighestBlockForBatchHash: DatabaseOperationMetrics, + UpdateBatchStatus: DatabaseOperationMetrics, + FinalizeConsolidatedBatches: DatabaseOperationMetrics, + DeleteL2BlocksGtBlockNumber: DatabaseOperationMetrics, + GetBatchStatusByHash: DatabaseOperationMetrics, + GetNL2BlockDataHint: DatabaseOperationMetrics, + Unwind: DatabaseOperationMetrics, + InsertBatch: DatabaseOperationMetrics, + UpdateL1MessagesWithL2Block: DatabaseOperationMetrics, + InsertBlock: DatabaseOperationMetrics, + InsertSignature: DatabaseOperationMetrics, + InsertL1Message: DatabaseOperationMetrics, + SetBatchRevertBlockNumberForBatchRange: DatabaseOperationMetrics, + FetchAndUpdateUnprocessedFinalizedBatches: DatabaseOperationMetrics, + InsertGenesisBlock: DatabaseOperationMetrics, + DeleteBatchesGtBlockNumber: DatabaseOperationMetrics, + GetSignature: DatabaseOperationMetrics, + GetHighestBlockForBatchIndex: DatabaseOperationMetrics, + UpdateSkippedL1Messages: DatabaseOperationMetrics, + FinalizeBatchesUpToIndex: DatabaseOperationMetrics, + DeleteBatchesGtBatchIndex: DatabaseOperationMetrics, + DeleteL1MessagesGt: DatabaseOperationMetrics, + InsertSignatures: DatabaseOperationMetrics, + UpdateL1MessagesFromL2Blocks: DatabaseOperationMetrics, + SetFinalizedL1BlockNumber: DatabaseOperationMetrics, + SetProcessedL1BlockNumber: DatabaseOperationMetrics, + GetBatchByIndex: DatabaseOperationMetrics, + }, + }, + ), +} +2026-02-28T21:43:51.200044Z INFO node{idx=1}: sea_orm_migration::migrator: Applying all pending migrations +2026-02-28T21:43:51.200464Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20220101_000001_create_batch_commit_table' +2026-02-28T21:43:51.201783Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20220101_000001_create_batch_commit_table' has been applied +2026-02-28T21:43:51.201950Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250304_125946_add_l1_msg_table' +2026-02-28T21:43:51.202505Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250304_125946_add_l1_msg_table' has been applied +2026-02-28T21:43:51.202685Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250408_132123_add_header_metadata' +2026-02-28T21:43:51.202825Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250408_132123_add_header_metadata' has been applied +2026-02-28T21:43:51.202995Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250408_150338_load_header_metadata' +2026-02-28T21:43:51.203004Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250408_150338_load_header_metadata' has been applied +2026-02-28T21:43:51.203080Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250411_072004_add_l2_block' +2026-02-28T21:43:51.204135Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250411_072004_add_l2_block' has been applied +2026-02-28T21:43:51.204345Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250616_223947_add_metadata' +2026-02-28T21:43:51.204660Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250616_223947_add_metadata' has been applied +2026-02-28T21:43:51.204786Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250904_175949_block_signature' +2026-02-28T21:43:51.204997Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250904_175949_block_signature' has been applied +2026-02-28T21:43:51.205234Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20251028_110719_add_l1_block_table' +2026-02-28T21:43:51.205397Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20251028_110719_add_l1_block_table' has been applied +2026-02-28T21:43:51.217263Z INFO node{idx=1}: scroll::node::args: Starting engine driver fcs=ForkchoiceState { head: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, safe: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, finalized: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 } } payload_building_duration=40 +2026-02-28T21:43:51.217301Z INFO node{idx=1}: scroll::providers: Running with mock blob provider - all other blob provider configurations are ignored +2026-02-28T21:43:51.218104Z INFO node{idx=1}: reth::cli: Starting consensus engine +2026-02-28T21:43:51.218184Z INFO scroll::derivation_pipeline: Starting derivation pipeline worker +2026-02-28T21:43:51.218451Z INFO node{idx=1}: reth_node_events::node: Forkchoice updated head_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 safe_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 finalized_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 +2026-02-28T21:43:54.049010Z INFO node{idx=0}: reth::cli: Status connected_peers=0 latest_block=0 +2026-02-28T21:43:54.197887Z INFO node{idx=1}: reth::cli: Status connected_peers=0 latest_block=0 +2026-02-28T21:43:56.048860Z INFO node{idx=1}: reth_e2e_test_utils::network: Session established with peer: 0x2c7946ba72f3c7890775cb525fd39dea7e107cdbe4c34d70909b19aeecf2866b48271d2c07b28d8e9420b954e40a137d66ccefbf3f8253a623a64dbf8c236b9d +2026-02-28T21:43:56.049279Z INFO node{idx=1}: reth_e2e_test_utils::network: Session established with peer: 0xcfa09a330a1c50e7327a855a0a5074aa1591f3937c9d326d6d9d9e069a841edcedce392343a810c02306a28f0246202a5606f76ef740ee61259c3985e14bc7ed +2026-02-28T21:43:56.054420Z WARN node{idx=2}: reth_node_builder::launch::common: Failed to build global thread pool err=The global thread pool has already been initialized. +2026-02-28T21:43:56.054829Z INFO node{idx=2}: reth::cli: Saving prune config to toml file +2026-02-28T21:43:56.055120Z INFO node{idx=2}: reth::cli: Configuration loaded path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-8ZDUdK4C/reth.toml" +2026-02-28T21:43:56.055462Z INFO node{idx=2}: reth::cli: Database opened +2026-02-28T21:43:56.150611Z INFO node{idx=2}: reth::cli: +Pre-merge hard forks (block based): +- Homestead @0 +- Tangerine @0 +- SpuriousDragon @0 +- Byzantium @0 +- Constantinople @0 +- Petersburg @0 +- Istanbul @0 +- Berlin @0 +- London @0 +- Archimedes @0 +- Bernoulli @0 +- Curie @0 + +Post-merge hard forks (timestamp based): +- Shanghai @0 +- Darwin @0 +- DarwinV2 @0 +- Euclid @0 +- EuclidV2 @0 +- Feynman @0 +- Galileo @0 +- GalileoV2 @0 +2026-02-28T21:43:56.151341Z INFO node{idx=2}: reth::cli: Transaction pool initialized +2026-02-28T21:43:56.152842Z INFO node{idx=2}: reth::cli: P2P networking initialized enode=enode://ea6b5b34ec929761f5c81c9fc4a6e4ffb08788e8f63b8b98a44dbcb59273169ca325ed44bed4abaf754ceac1bb3d1d9fde4bb409a09e260be3dbc75f74c9cc1b@127.0.0.1:54091 +2026-02-28T21:43:56.153069Z INFO node{idx=2}: reth::cli: StaticFileProducer initialized +2026-02-28T21:43:56.153465Z INFO node{idx=2}: reth::cli: Verifying storage consistency. +2026-02-28T21:43:56.153750Z INFO node{idx=2}: reth::cli: Pruner initialized prune_config=PruneConfig { block_interval: 5, segments: PruneModes { sender_recovery: None, transaction_lookup: None, receipts: None, account_history: None, storage_history: None, bodies_history: None, receipts_log_filter: ReceiptsLogPruneConfig({}) } } +2026-02-28T21:43:56.153868Z INFO node{idx=2}: reth::cli: Creating JWT auth secret file path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-8ZDUdK4C/jwt.hex" +2026-02-28T21:43:56.154645Z INFO node{idx=2}: reth::cli: Consensus engine initialized +2026-02-28T21:43:56.154823Z INFO node{idx=2}: reth::cli: Engine API handler initialized +2026-02-28T21:43:56.157167Z INFO node{idx=2}: reth::cli: RPC auth server started url=127.0.0.1:54092 +2026-02-28T21:43:56.158703Z INFO node{idx=2}: reth::cli: RPC IPC server started path=/tmp/reth.ipc-qrCb89S4 +2026-02-28T21:43:56.158716Z INFO node{idx=2}: reth::cli: RPC HTTP server started url=127.0.0.1:54093 +2026-02-28T21:43:56.158750Z INFO node{idx=2}: rollup_node::args: Building rollup node with config: +ScrollRollupNodeConfig { + test: true, + consensus_args: ConsensusArgs { + algorithm: Noop, + authorized_signer: None, + }, + database_args: RollupNodeDatabaseArgs { + rn_db_path: None, + }, + chain_orchestrator_args: ChainOrchestratorArgs { + optimistic_sync_trigger: 100, + chain_buffer_size: 100, + }, + engine_driver_args: EngineDriverArgs { + sync_at_startup: true, + }, + blob_provider_args: BlobProviderArgs { + beacon_node_urls: None, + s3_url: None, + anvil_url: None, + mock: true, + compute_units_per_second: 0, + max_retries: 0, + initial_backoff: 0, + }, + l1_provider_args: L1ProviderArgs { + url: None, + compute_units_per_second: 10000, + max_retries: 10, + initial_backoff: 100, + logs_query_block_range: 500, + cache_max_items: 100, + }, + sequencer_args: SequencerArgs { + sequencer_enabled: false, + auto_start: false, + block_time: 100, + payload_building_duration: 40, + fee_recipient: 0x0000000000000000000000000000000000000000, + l1_message_inclusion_mode: BlockDepth( + 0, + ), + allow_empty_blocks: true, + max_l1_messages: None, + }, + network_args: RollupNodeNetworkArgs { + enable_eth_scroll_wire_bridge: true, + enable_scroll_wire: true, + sequencer_url: None, + signer_address: None, + }, + rpc_args: RpcArgs { + basic_enabled: true, + admin_enabled: true, + }, + signer_args: SignerArgs { + key_file: None, + aws_kms_key_id: None, + private_key: None, + }, + gas_price_oracle_args: RollupNodeGasPriceOracleArgs { + default_suggested_priority_fee: 0, + }, + pprof_args: PprofArgs { + enabled: false, + addr: 0.0.0.0:6868, + default_duration: 30, + }, + database: Some( + Database { + database: Retry { + inner: DatabaseInner { + connection: SqlxSqlitePoolConnection, + write_lock: Mutex { + data: (), + }, + read_locks: Semaphore { + ll_sem: Semaphore { + permits: 5, + }, + resource_span: Span { + name: "runtime.resource", + level: Level( + Trace, + ), + target: "tokio::sync::semaphore", + disabled: true, + module_path: "tokio::sync::semaphore", + line: 461, + file: "/Users/yiweichi/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tokio-1.48.0/src/sync/semaphore.rs", + }, + }, + metrics: DatabaseMetrics, + tmp_dir: None, + }, + max_retries: None, + initial_delay_ms: 50, + exponential_backoff: false, + metrics: RetryMetrics, + }, + metrics: { + DeleteL1MessagesGt: DatabaseOperationMetrics, + DeleteBatchesGtBlockNumber: DatabaseOperationMetrics, + DeleteBatchesGtBatchIndex: DatabaseOperationMetrics, + DeleteL2BlocksGtBlockNumber: DatabaseOperationMetrics, + RemoveL1BlockInfoGt: DatabaseOperationMetrics, + FinalizeBatchesUpToIndex: DatabaseOperationMetrics, + UpdateBatchStatus: DatabaseOperationMetrics, + SetLatestL1BlockNumber: DatabaseOperationMetrics, + PrepareOnStartup: DatabaseOperationMetrics, + UpdateL1MessagesWithL2Block: DatabaseOperationMetrics, + InsertBatch: DatabaseOperationMetrics, + InsertBatchConsolidationOutcome: DatabaseOperationMetrics, + UpdateSkippedL1Messages: DatabaseOperationMetrics, + UpdateL1MessagesFromL2Blocks: DatabaseOperationMetrics, + GetBatchByIndex: DatabaseOperationMetrics, + GetLatestIndexedEventL1BlockNumber: DatabaseOperationMetrics, + DeleteL2BlocksGtBatchIndex: DatabaseOperationMetrics, + GetL1BlockInfo: DatabaseOperationMetrics, + GetLatestL1BlockNumber: DatabaseOperationMetrics, + InsertBlocks: DatabaseOperationMetrics, + GetL2HeadBlockNumber: DatabaseOperationMetrics, + SetFinalizedL1BlockNumber: DatabaseOperationMetrics, + GetLatestSafeL2Info: DatabaseOperationMetrics, + ChangeBatchProcessingToCommittedStatus: DatabaseOperationMetrics, + InsertL1Message: DatabaseOperationMetrics, + SetL2HeadBlockNumber: DatabaseOperationMetrics, + FetchAndUpdateUnprocessedCommittedBatches: DatabaseOperationMetrics, + SetBatchRevertBlockNumberForBatchRange: DatabaseOperationMetrics, + PurgeL1MessageToL2BlockMappings: DatabaseOperationMetrics, + SetProcessedL1BlockNumber: DatabaseOperationMetrics, + Unwind: DatabaseOperationMetrics, + InsertSignatures: DatabaseOperationMetrics, + InsertBlock: DatabaseOperationMetrics, + GetBatchStatusByHash: DatabaseOperationMetrics, + FetchAndUpdateUnprocessedFinalizedBatches: DatabaseOperationMetrics, + DeleteBatchFinalizationGtBlockNumber: DatabaseOperationMetrics, + GetProcessedL1BlockNumber: DatabaseOperationMetrics, + InsertSignature: DatabaseOperationMetrics, + GetNL1Messages: DatabaseOperationMetrics, + GetHighestBlockForBatchHash: DatabaseOperationMetrics, + GetSignature: DatabaseOperationMetrics, + GetBatchByHash: DatabaseOperationMetrics, + GetL2BlockAndBatchInfoByHash: DatabaseOperationMetrics, + DeleteBatchRevertGtBlockNumber: DatabaseOperationMetrics, + GetHighestBlockForBatchIndex: DatabaseOperationMetrics, + GetL2BlockInfoByNumber: DatabaseOperationMetrics, + RemoveL1BlockInfoLeq: DatabaseOperationMetrics, + InsertGenesisBlock: DatabaseOperationMetrics, + FinalizeConsolidatedBatches: DatabaseOperationMetrics, + GetFinalizedL1BlockNumber: DatabaseOperationMetrics, + GetNL2BlockDataHint: DatabaseOperationMetrics, + GetMaxBlockDataHintBlockNumber: DatabaseOperationMetrics, + InsertL1BlockInfo: DatabaseOperationMetrics, + }, + }, + ), +} +2026-02-28T21:43:56.159884Z INFO node{idx=2}: sea_orm_migration::migrator: Applying all pending migrations +2026-02-28T21:43:56.160342Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20220101_000001_create_batch_commit_table' +2026-02-28T21:43:56.161800Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20220101_000001_create_batch_commit_table' has been applied +2026-02-28T21:43:56.161976Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250304_125946_add_l1_msg_table' +2026-02-28T21:43:56.162509Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250304_125946_add_l1_msg_table' has been applied +2026-02-28T21:43:56.162675Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250408_132123_add_header_metadata' +2026-02-28T21:43:56.162811Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250408_132123_add_header_metadata' has been applied +2026-02-28T21:43:56.162992Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250408_150338_load_header_metadata' +2026-02-28T21:43:56.163002Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250408_150338_load_header_metadata' has been applied +2026-02-28T21:43:56.163086Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250411_072004_add_l2_block' +2026-02-28T21:43:56.164158Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250411_072004_add_l2_block' has been applied +2026-02-28T21:43:56.164355Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250616_223947_add_metadata' +2026-02-28T21:43:56.164677Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250616_223947_add_metadata' has been applied +2026-02-28T21:43:56.164779Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250904_175949_block_signature' +2026-02-28T21:43:56.164951Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250904_175949_block_signature' has been applied +2026-02-28T21:43:56.165157Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20251028_110719_add_l1_block_table' +2026-02-28T21:43:56.165315Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20251028_110719_add_l1_block_table' has been applied +2026-02-28T21:43:56.172044Z INFO node{idx=2}: scroll::node::args: Starting engine driver fcs=ForkchoiceState { head: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, safe: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, finalized: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 } } payload_building_duration=40 +2026-02-28T21:43:56.172099Z INFO node{idx=2}: scroll::providers: Running with mock blob provider - all other blob provider configurations are ignored +2026-02-28T21:43:56.172971Z INFO node{idx=2}: reth::cli: Starting consensus engine +2026-02-28T21:43:56.173156Z INFO scroll::derivation_pipeline: Starting derivation pipeline worker +2026-02-28T21:43:56.173369Z INFO node{idx=2}: reth_node_events::node: Forkchoice updated head_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 safe_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 finalized_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 +2026-02-28T21:43:56.200224Z INFO node{idx=2}: reth_e2e_test_utils::network: Session established with peer: 0xcfa09a330a1c50e7327a855a0a5074aa1591f3937c9d326d6d9d9e069a841edcedce392343a810c02306a28f0246202a5606f76ef740ee61259c3985e14bc7ed +2026-02-28T21:43:56.200583Z INFO node{idx=2}: reth_e2e_test_utils::network: Session established with peer: 0xea6b5b34ec929761f5c81c9fc4a6e4ffb08788e8f63b8b98a44dbcb59273169ca325ed44bed4abaf754ceac1bb3d1d9fde4bb409a09e260be3dbc75f74c9cc1b +2026-02-28T21:43:59.155517Z INFO node{idx=2}: reth::cli: Status connected_peers=1 latest_block=0 +2026-02-28T21:44:01.171492Z INFO node{idx=2}: reth_e2e_test_utils::network: Session established with peer: 0xea6b5b34ec929761f5c81c9fc4a6e4ffb08788e8f63b8b98a44dbcb59273169ca325ed44bed4abaf754ceac1bb3d1d9fde4bb409a09e260be3dbc75f74c9cc1b +2026-02-28T21:44:01.172460Z INFO node{idx=2}: reth_e2e_test_utils::network: Session established with peer: 0x2c7946ba72f3c7890775cb525fd39dea7e107cdbe4c34d70909b19aeecf2866b48271d2c07b28d8e9420b954e40a137d66ccefbf3f8253a623a64dbf8c236b9d +2026-02-28T21:45:09.048451Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:45:09.196777Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:45:14.155828Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:46:24.047569Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:46:24.197183Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:46:29.157143Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:47:39.049111Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:47:39.197265Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:47:44.156893Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:48:51.047833Z WARN node{idx=0}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=299.960894208s +2026-02-28T21:48:51.197405Z WARN node{idx=1}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=299.978783875s +2026-02-28T21:48:54.048042Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:48:54.197587Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:48:56.155756Z WARN node{idx=2}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=299.982226708s +2026-02-28T21:48:59.156789Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:50:09.048829Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:50:09.197745Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:50:14.156482Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:51:24.048875Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:51:24.198359Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:51:29.156899Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:52:39.059531Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:52:39.207785Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:52:44.166739Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:53:51.059649Z WARN node{idx=0}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=599.961285583s +2026-02-28T21:53:51.077389Z INFO node{idx=0}: scroll::db::maintenance: running periodic PRAGMA optimize... +2026-02-28T21:53:51.082726Z INFO node{idx=0}: scroll::db::maintenance: periodic PRAGMA optimize complete. +2026-02-28T21:53:51.208471Z WARN node{idx=1}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=599.978363416s +2026-02-28T21:53:51.213024Z INFO node{idx=1}: scroll::db::maintenance: running periodic PRAGMA optimize... +2026-02-28T21:53:51.218303Z INFO node{idx=1}: scroll::db::maintenance: periodic PRAGMA optimize complete. +2026-02-28T21:53:54.059664Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:53:54.209226Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:53:56.167599Z WARN node{idx=2}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=599.982586458s +2026-02-28T21:53:56.172224Z INFO node{idx=2}: scroll::db::maintenance: running periodic PRAGMA optimize... +2026-02-28T21:53:56.173228Z INFO node{idx=2}: scroll::db::maintenance: periodic PRAGMA optimize complete. +2026-02-28T21:53:59.167584Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:55:09.060113Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:55:09.209947Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:55:14.168964Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:56:24.060128Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:56:24.209790Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:56:29.169254Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:57:39.061349Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:57:39.210343Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:57:44.168842Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:58:51.060516Z WARN node{idx=0}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=899.961098583s +2026-02-28T21:58:51.209873Z WARN node{idx=1}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=899.978732791s +2026-02-28T21:58:54.061222Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:58:54.209690Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T21:58:56.168647Z WARN node{idx=2}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=899.982594875s +2026-02-28T21:58:59.169032Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:00:09.061569Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:00:09.210622Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:00:14.170030Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:01:24.062891Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:01:24.212286Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:01:29.168763Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:02:39.061769Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:02:39.210756Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:02:44.169653Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:03:51.061413Z WARN node{idx=0}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=1199.961018041s +2026-02-28T22:03:51.086987Z INFO node{idx=0}: scroll::db::maintenance: running periodic PRAGMA optimize... +2026-02-28T22:03:51.088233Z INFO node{idx=0}: scroll::db::maintenance: periodic PRAGMA optimize complete. +2026-02-28T22:03:51.210410Z WARN node{idx=1}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=1199.978263916s +2026-02-28T22:03:51.222668Z INFO node{idx=1}: scroll::db::maintenance: running periodic PRAGMA optimize... +2026-02-28T22:03:51.225178Z INFO node{idx=1}: scroll::db::maintenance: periodic PRAGMA optimize complete. +2026-02-28T22:03:54.062590Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:03:54.211394Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:03:56.169613Z WARN node{idx=2}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=1199.982559458s +2026-02-28T22:03:56.176544Z INFO node{idx=2}: scroll::db::maintenance: running periodic PRAGMA optimize... +2026-02-28T22:03:56.177380Z INFO node{idx=2}: scroll::db::maintenance: periodic PRAGMA optimize complete. +2026-02-28T22:03:59.170127Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:05:09.062486Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:05:09.211903Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:05:14.170299Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:06:24.061903Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:06:24.211886Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:06:29.170980Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:07:39.061707Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:07:39.212472Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:07:44.170640Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:08:51.062600Z WARN node{idx=0}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=1499.961128958s +2026-02-28T22:08:51.211571Z WARN node{idx=1}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=1499.97840675s +2026-02-28T22:08:54.063021Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:08:54.211364Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:08:56.170417Z WARN node{idx=2}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=1499.982343833s +2026-02-28T22:08:59.170985Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:10:09.063705Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:10:09.212789Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:10:14.172080Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:11:24.062679Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:11:24.211510Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:11:29.172009Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:12:39.064411Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:12:39.212779Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:12:44.171608Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:13:51.064166Z WARN node{idx=0}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=1799.961742041s +2026-02-28T22:13:51.092260Z INFO node{idx=0}: scroll::db::maintenance: running periodic PRAGMA optimize... +2026-02-28T22:13:51.094329Z INFO node{idx=0}: scroll::db::maintenance: periodic PRAGMA optimize complete. +2026-02-28T22:13:51.212373Z WARN node{idx=1}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=1799.978200416s +2026-02-28T22:13:51.228928Z INFO node{idx=1}: scroll::db::maintenance: running periodic PRAGMA optimize... +2026-02-28T22:13:51.232176Z INFO node{idx=1}: scroll::db::maintenance: periodic PRAGMA optimize complete. +2026-02-28T22:13:54.064007Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:13:54.212978Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:13:56.171567Z WARN node{idx=2}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=1799.982483958s +2026-02-28T22:13:56.179766Z INFO node{idx=2}: scroll::db::maintenance: running periodic PRAGMA optimize... +2026-02-28T22:13:56.180689Z INFO node{idx=2}: scroll::db::maintenance: periodic PRAGMA optimize complete. +2026-02-28T22:13:59.171819Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:15:09.064284Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:15:09.213263Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:15:14.172119Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:16:24.064452Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:16:24.212999Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:16:29.173080Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:17:39.064191Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:17:39.213953Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:17:44.173206Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:18:51.065927Z WARN node{idx=0}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=2099.962479625s +2026-02-28T22:18:51.213691Z WARN node{idx=1}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=2099.978500708s +2026-02-28T22:18:54.064186Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:18:54.214267Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:18:56.172613Z WARN node{idx=2}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=2099.98251125s +2026-02-28T22:18:59.174318Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:20:09.065339Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:20:09.214196Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:20:14.173501Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:21:24.066164Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:21:24.214686Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:21:29.173429Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:22:39.066245Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:22:39.214645Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:22:44.174519Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:23:51.065306Z WARN node{idx=0}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=2399.960828083s +2026-02-28T22:23:51.097898Z INFO node{idx=0}: scroll::db::maintenance: running periodic PRAGMA optimize... +2026-02-28T22:23:51.099240Z INFO node{idx=0}: scroll::db::maintenance: periodic PRAGMA optimize complete. +2026-02-28T22:23:51.214703Z WARN node{idx=1}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=2399.978484291s +2026-02-28T22:23:51.235936Z INFO node{idx=1}: scroll::db::maintenance: running periodic PRAGMA optimize... +2026-02-28T22:23:51.237115Z INFO node{idx=1}: scroll::db::maintenance: periodic PRAGMA optimize complete. +2026-02-28T22:23:54.066642Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:23:54.215715Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:23:56.174020Z WARN node{idx=2}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=2399.982891208s +2026-02-28T22:23:56.185168Z INFO node{idx=2}: scroll::db::maintenance: running periodic PRAGMA optimize... +2026-02-28T22:23:56.186160Z INFO node{idx=2}: scroll::db::maintenance: periodic PRAGMA optimize complete. +2026-02-28T22:23:59.173427Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:25:09.066823Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:25:09.215059Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:25:14.174187Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:26:24.065683Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:26:24.215148Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:26:29.175384Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:27:39.067354Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:27:39.215021Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:27:44.175364Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:28:51.067531Z WARN node{idx=0}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=2699.962020166s +2026-02-28T22:28:51.215793Z WARN node{idx=1}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=2699.978548708s +2026-02-28T22:28:54.067838Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:28:54.216653Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:28:56.173812Z WARN node{idx=2}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=2699.981664375s +2026-02-28T22:28:59.175213Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:30:09.071711Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:30:09.220714Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:30:14.180045Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:31:24.073024Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:31:24.221960Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:31:29.180724Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:32:39.073101Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:32:39.221421Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:32:44.180458Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:33:51.072896Z WARN node{idx=0}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=2999.961677333s +2026-02-28T22:33:51.107994Z INFO node{idx=0}: scroll::db::maintenance: running periodic PRAGMA optimize... +2026-02-28T22:33:51.110253Z INFO node{idx=0}: scroll::db::maintenance: periodic PRAGMA optimize complete. +2026-02-28T22:33:51.221746Z WARN node{idx=1}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=2999.978780333s +2026-02-28T22:33:51.245429Z INFO node{idx=1}: scroll::db::maintenance: running periodic PRAGMA optimize... +2026-02-28T22:33:51.248516Z INFO node{idx=1}: scroll::db::maintenance: periodic PRAGMA optimize complete. +2026-02-28T22:33:54.072921Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:33:54.221375Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:33:56.180559Z WARN node{idx=2}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=2999.982685125s +2026-02-28T22:33:56.194635Z INFO node{idx=2}: scroll::db::maintenance: running periodic PRAGMA optimize... +2026-02-28T22:33:56.195454Z INFO node{idx=2}: scroll::db::maintenance: periodic PRAGMA optimize complete. +2026-02-28T22:33:59.180980Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:35:09.073625Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:35:09.221676Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:35:14.181435Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:36:24.072692Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:36:24.222895Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:36:29.182104Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:37:39.073136Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:37:39.223322Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:37:44.182252Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 +2026-02-28T22:38:51.074750Z WARN node{idx=0}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=3299.962292166s +2026-02-28T22:38:51.222887Z WARN node{idx=1}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=3299.978703166s +2026-02-28T22:38:52.425759Z INFO node{idx=0}: reth::cli: Wrote network peers to file peers_file="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-iZaMJRZv/known-peers.json" +2026-02-28T22:38:52.425764Z INFO node{idx=2}: reth::cli: Wrote network peers to file peers_file="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-8ZDUdK4C/known-peers.json" +2026-02-28T22:38:52.425629Z INFO node{idx=1}: reth::cli: Wrote network peers to file peers_file="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-cUHZXmTP/known-peers.json" From 143c032b1c499dd692863472e8f59e445480101a Mon Sep 17 00:00:00 2001 From: Morty Date: Tue, 3 Mar 2026 08:54:40 +0800 Subject: [PATCH 7/9] refactor: improve debug toolkit structure --- book/src/debug-toolkit.md | 47 +- crates/node/src/bin/scroll_debug.rs | 6 +- crates/node/src/debug_toolkit/cli.rs | 2 +- crates/node/src/debug_toolkit/commands.rs | 10 +- crates/node/src/debug_toolkit/event/mod.rs | 3 + .../{event_stream.rs => event/stream.rs} | 3 +- crates/node/src/debug_toolkit/mod.rs | 9 +- .../{attach_repl.rs => repl/attach.rs} | 324 ++----- .../debug_toolkit/{repl.rs => repl/local.rs} | 288 ++---- crates/node/src/debug_toolkit/repl/mod.rs | 6 + .../node/src/debug_toolkit/repl/terminal.rs | 81 ++ crates/node/src/debug_toolkit/shared/mod.rs | 3 + .../node/src/debug_toolkit/shared/output.rs | 71 ++ crates/node/src/debug_toolkit/shared/rpc.rs | 46 + .../node/src/debug_toolkit/shared/status.rs | 102 +++ crates/node/src/test_utils/fixture.rs | 6 +- scroll-debug-15746.log | 0 scroll-debug-16292.log | 671 -------------- scroll-debug-47318.log | 676 -------------- scroll-debug-51668.log | 0 scroll-debug-93283.log | 862 ------------------ 21 files changed, 545 insertions(+), 2671 deletions(-) create mode 100644 crates/node/src/debug_toolkit/event/mod.rs rename crates/node/src/debug_toolkit/{event_stream.rs => event/stream.rs} (99%) rename crates/node/src/debug_toolkit/{attach_repl.rs => repl/attach.rs} (64%) rename crates/node/src/debug_toolkit/{repl.rs => repl/local.rs} (79%) create mode 100644 crates/node/src/debug_toolkit/repl/mod.rs create mode 100644 crates/node/src/debug_toolkit/repl/terminal.rs create mode 100644 crates/node/src/debug_toolkit/shared/mod.rs create mode 100644 crates/node/src/debug_toolkit/shared/output.rs create mode 100644 crates/node/src/debug_toolkit/shared/rpc.rs create mode 100644 crates/node/src/debug_toolkit/shared/status.rs delete mode 100644 scroll-debug-15746.log delete mode 100644 scroll-debug-16292.log delete mode 100644 scroll-debug-47318.log delete mode 100644 scroll-debug-51668.log delete mode 100644 scroll-debug-93283.log diff --git a/book/src/debug-toolkit.md b/book/src/debug-toolkit.md index ca98f6c4..7ca17d95 100644 --- a/book/src/debug-toolkit.md +++ b/book/src/debug-toolkit.md @@ -1,6 +1,11 @@ # Debug Toolkit -The Debug Toolkit is an interactive REPL (Read-Eval-Print Loop) for debugging, development, and hackathon scenarios. It allows you to spin up local follower nodes that connect to a remote sequencer and L1, run tests, execute scripts, and inspect chain state. +The Debug Toolkit is an interactive REPL (Read-Eval-Print Loop) for debugging, development, and hackathon scenarios. + +It supports two modes: + +- **Attach mode**: connect to an already-running node over JSON-RPC. +- **Local (spawn) mode**: spin up a local test network and interact with in-process nodes. ## Getting Started @@ -24,7 +29,22 @@ Build with the `debug-toolkit` feature flag: cargo build -p rollup-node --features debug-toolkit --release ``` -## Connecting to a Remote Network +## Attach Mode + +Use attach mode when you want to inspect or control an already-running node: + +```bash +cargo run --features debug-toolkit --bin scroll-debug -- \ + --attach http://localhost:8545 \ + --private-key +``` + +Notes: + +- `--private-key` is optional, but required for `tx send` and `tx inject`. +- Commands that depend on local fixtures (`build`, `run`, `node`, `db`, L1 mock injection) are only available in local (spawn) mode. + +## Connecting to a Remote Network (Local (spawn) Mode) The primary use case is connecting local follower nodes to a remote sequencer and L1. This allows you to run tests and scripts against a live network. @@ -68,7 +88,7 @@ This creates local follower nodes that: | `--valid-signer ` | Authorized block signer address for consensus validation | | `--log-file ` | Path to log file (default: `./scroll-debug-.log`) | -## Local-Only Mode +## Local (Spawn) Mode You can also run a fully local environment with a mock L1 and local sequencer for offline development: @@ -314,6 +334,27 @@ View logs in another terminal: tail -f ./scroll-debug-12345.log ``` +### Admin + +| Command | Description | +|---------|-------------| +| `admin enable-seq` | Enable automatic sequencing | +| `admin disable-seq` | Disable automatic sequencing | +| `admin revert ` | Revert node state to L1 block number `n` | + +### Raw RPC + +| Command | Description | +|---------|-------------| +| `rpc [params]` | Execute any JSON-RPC call and print result | + +**Examples:** + +```bash +rpc eth_blockNumber +rpc eth_getBlockByNumber ["latest",false] +``` + ### Other | Command | Description | diff --git a/crates/node/src/bin/scroll_debug.rs b/crates/node/src/bin/scroll_debug.rs index 4c7e56e4..edb27057 100644 --- a/crates/node/src/bin/scroll_debug.rs +++ b/crates/node/src/bin/scroll_debug.rs @@ -50,7 +50,11 @@ fn main() -> eyre::Result<()> { eprintln!("Logs: {}", log_path.display()); eprintln!("Tail: tail -f {}", log_path.display()); eprintln!(); - eprintln!("Starting nodes (this may take a moment)..."); + if let Some(url) = &args.attach { + eprintln!("Attaching to node at {}...", url); + } else { + eprintln!("Starting nodes (this may take a moment)..."); + } // Create tokio runtime and run tokio::runtime::Builder::new_multi_thread() diff --git a/crates/node/src/debug_toolkit/cli.rs b/crates/node/src/debug_toolkit/cli.rs index 3fe19d7d..ed4ef710 100644 --- a/crates/node/src/debug_toolkit/cli.rs +++ b/crates/node/src/debug_toolkit/cli.rs @@ -23,7 +23,7 @@ pub struct DebugArgs { // ── Attach mode ────────────────────────────────────────────────────────── /// Attach to an already-running node at this RPC URL instead of spawning a test network. /// - /// Example: --attach http://localhost:8545 + /// Example: `--attach ` #[arg( long, conflicts_with_all = ["chain", "sequencer", "followers", "l1_url", "bootnodes", "valid_signer"] diff --git a/crates/node/src/debug_toolkit/commands.rs b/crates/node/src/debug_toolkit/commands.rs index b143dcaf..07768c5b 100644 --- a/crates/node/src/debug_toolkit/commands.rs +++ b/crates/node/src/debug_toolkit/commands.rs @@ -44,7 +44,7 @@ pub enum Command { Db, /// Show log file path. Logs, - /// Admin commands (attach mode only). + /// Admin commands. Admin(AdminCommand), /// Execute a raw JSON-RPC call and print the result. Rpc { @@ -61,7 +61,7 @@ pub enum Command { Unknown(String), } -/// Admin commands (attach mode only). +/// Admin commands. #[derive(Debug, Clone)] pub enum AdminCommand { /// Enable automatic sequencing. @@ -379,7 +379,7 @@ impl Command { let Some(method) = args.first() else { return Self::Unknown("rpc requires a method name".to_string()); }; - let params = if args.len() > 1 { Some(args[1..].join(" ")) } else { None }; + let params = (args.len() > 1).then(|| args[1..].join(" ")); Self::Rpc { method: method.to_string(), params } } } @@ -437,12 +437,12 @@ pub fn print_help() { println!("{}", "Logs:".underline()); println!(" logs Show log file path and tail command"); println!(); - println!("{}", "Admin (attach mode only):".underline()); + println!("{}", "Admin:".underline()); println!(" admin enable-seq Enable automatic sequencing"); println!(" admin disable-seq Disable automatic sequencing"); println!(" admin revert Revert node state to L1 block number "); println!(); - println!("{}", "Raw RPC (attach mode):".underline()); + println!("{}", "Raw RPC:".underline()); println!(" rpc [params] Execute any JSON-RPC call and print result"); println!(" rpc eth_blockNumber"); println!(" rpc eth_getBlockByNumber [\"latest\",false]"); diff --git a/crates/node/src/debug_toolkit/event/mod.rs b/crates/node/src/debug_toolkit/event/mod.rs new file mode 100644 index 00000000..f1d68c65 --- /dev/null +++ b/crates/node/src/debug_toolkit/event/mod.rs @@ -0,0 +1,3 @@ +pub(crate) mod stream; + +pub use stream::*; diff --git a/crates/node/src/debug_toolkit/event_stream.rs b/crates/node/src/debug_toolkit/event/stream.rs similarity index 99% rename from crates/node/src/debug_toolkit/event_stream.rs rename to crates/node/src/debug_toolkit/event/stream.rs index b15713a1..057e9a39 100644 --- a/crates/node/src/debug_toolkit/event_stream.rs +++ b/crates/node/src/debug_toolkit/event/stream.rs @@ -1,5 +1,4 @@ -//! Background event streaming for the debug REPL. - +/// Background event streaming for the debug REPL. use colored::Colorize; use rollup_node_chain_orchestrator::ChainOrchestratorEvent; use std::{ diff --git a/crates/node/src/debug_toolkit/mod.rs b/crates/node/src/debug_toolkit/mod.rs index 8ce29878..22794875 100644 --- a/crates/node/src/debug_toolkit/mod.rs +++ b/crates/node/src/debug_toolkit/mod.rs @@ -55,17 +55,16 @@ //! ``` pub mod actions; -mod attach_repl; pub mod cli; mod commands; -mod event_stream; +mod event; mod repl; +mod shared; -pub use attach_repl::AttachRepl; pub use cli::DebugArgs; pub use commands::*; -pub use event_stream::*; -pub use repl::*; +pub use event::*; +pub use repl::{AttachRepl, DebugRepl}; /// Prelude for convenient imports. pub mod prelude { diff --git a/crates/node/src/debug_toolkit/attach_repl.rs b/crates/node/src/debug_toolkit/repl/attach.rs similarity index 64% rename from crates/node/src/debug_toolkit/attach_repl.rs rename to crates/node/src/debug_toolkit/repl/attach.rs index 667e2c7d..f909d49e 100644 --- a/crates/node/src/debug_toolkit/attach_repl.rs +++ b/crates/node/src/debug_toolkit/repl/attach.rs @@ -1,33 +1,19 @@ -//! REPL for attaching to an already-running scroll node via JSON-RPC. -//! -//! Unlike [`super::DebugRepl`] which wraps an in-process [`TestFixture`], this REPL -//! connects to an existing node entirely over JSON-RPC using an alloy HTTP provider. -//! All namespaces — `eth_*`, `txpool_*`, `admin_*`, `rollupNode_*`, `rollupNodeAdmin_*` — -//! are accessed via `Provider::raw_request`, keeping the dependency surface minimal. -//! -//! # Usage -//! ```bash -//! scroll-debug --attach http://localhost:8545 -//! scroll-debug --attach http://localhost:8545 --private-key 0xac0974... -//! ``` - -use super::commands::{ +/// REPL for attaching to an already-running scroll node via JSON-RPC. +use crate::debug_toolkit::commands::{ print_help, AdminCommand, BlockArg, Command, EventsCommand, L1Command, PeersCommand, TxCommand, }; use alloy_consensus::{SignableTransaction, TxEip1559}; use alloy_eips::{eip2718::Encodable2718, BlockId, BlockNumberOrTag}; -use alloy_network::{Ethereum, TxSignerSync}; +use alloy_network::TxSignerSync; use alloy_primitives::TxKind; use alloy_provider::{Provider, ProviderBuilder}; use alloy_signer_local::PrivateKeySigner; use colored::Colorize; -use crossterm::{ - event::{self, Event, KeyCode, KeyModifiers}, - terminal::{disable_raw_mode, enable_raw_mode}, -}; +use crossterm::terminal::{disable_raw_mode, enable_raw_mode}; use reqwest::Url; use rollup_node_chain_orchestrator::ChainOrchestratorStatus; -use std::{borrow::Cow, io::Write, path::PathBuf, time::Duration}; +use scroll_alloy_network::Scroll; +use std::{io::Write, path::PathBuf, time::Duration}; /// Interactive REPL that attaches to a running node via JSON-RPC. #[derive(Debug)] @@ -35,7 +21,7 @@ pub struct AttachRepl { /// The RPC URL of the target node. url: Url, /// Alloy provider — all RPC calls including custom namespaces go through `raw_request`. - provider: alloy_provider::RootProvider, + provider: alloy_provider::RootProvider, /// Optional private key for signing transactions locally. signer: Option, /// Whether the REPL is running. @@ -96,14 +82,7 @@ impl AttachRepl { pub async fn run(&mut self) -> eyre::Result<()> { self.running = true; - enable_raw_mode()?; - struct RawModeGuard; - impl Drop for RawModeGuard { - fn drop(&mut self) { - let _ = disable_raw_mode(); - } - } - let _guard = RawModeGuard; + let _guard = super::terminal::RawModeGuard::new()?; let _ = disable_raw_mode(); println!(); @@ -112,7 +91,7 @@ impl AttachRepl { if let Some(signer) = &self.signer { println!("Signer: {:?}", signer.address()); } else { - println!("{}", "No signer – tx send/inject require --private-key".yellow()); + println!("{}", "No signer, tx send/inject require --private-key".yellow()); } println!("Type 'help' for available commands, 'exit' to quit."); println!(); @@ -153,58 +132,22 @@ impl AttachRepl { } } - // Keyboard input (50 ms poll). + // Check for keyboard input (non-blocking) _ = tokio::time::sleep(Duration::from_millis(50)) => { - while event::poll(Duration::from_millis(0))? { - if let Event::Key(key_event) = event::read()? { - match key_event.code { - KeyCode::Enter => { - print!("\r\n"); - let _ = stdout.flush(); - let line = input_buffer.trim().to_string(); - input_buffer.clear(); - - if !line.is_empty() { - let _ = disable_raw_mode(); - if let Err(e) = self.execute_command(&line).await { - println!("{}: {}", "Error".red(), e); - } - let _ = enable_raw_mode(); - } - - if self.running { - print!("{}", self.get_prompt()); - let _ = stdout.flush(); - } - } - KeyCode::Backspace => { - if !input_buffer.is_empty() { - input_buffer.pop(); - print!("\x08 \x08"); - let _ = stdout.flush(); - } - } - KeyCode::Char(c) => { - if key_event.modifiers.contains(KeyModifiers::CONTROL) && c == 'c' { - print!("\r\nUse 'exit' to quit\r\n{}{}", self.get_prompt(), input_buffer); - let _ = stdout.flush(); - } else if key_event.modifiers.contains(KeyModifiers::CONTROL) && c == 'd' { - print!("\r\n"); - self.running = false; - } else { - input_buffer.push(c); - print!("{}", c); - let _ = stdout.flush(); - } - } - KeyCode::Esc => { - print!("\r\x1b[K{}", self.get_prompt()); - let _ = stdout.flush(); - input_buffer.clear(); - } - _ => {} + match super::terminal::poll_keyboard(&mut input_buffer, &self.get_prompt())? { + super::terminal::InputAction::Command(line) => { + let _ = disable_raw_mode(); + if let Err(e) = self.execute_command(&line).await { + println!("{}: {}", "Error".red(), e); + } + let _ = enable_raw_mode(); + if self.running { + print!("{}", self.get_prompt()); + let _ = stdout.flush(); } } + super::terminal::InputAction::Quit => self.running = false, + super::terminal::InputAction::None => {} } } } @@ -284,22 +227,14 @@ impl AttachRepl { /// Call a custom-namespace JSON-RPC method and deserialize the response. /// - /// Uses `raw_request_dyn` (no trait bounds on P/R) combined with serde_json for + /// Uses `raw_request_dyn` (no trait bounds on P/R) combined with `serde_json` for /// maximum compatibility regardless of the provider's network/transport generics. async fn raw( &self, method: &'static str, params: impl serde::Serialize, ) -> eyre::Result { - let raw_params = serde_json::value::to_raw_value(¶ms) - .map_err(|e| eyre::eyre!("Failed to serialize params for {}: {}", method, e))?; - let raw_result = self - .provider - .raw_request_dyn(Cow::Borrowed(method), &raw_params) - .await - .map_err(|e| eyre::eyre!("{}: {}", method, e))?; - serde_json::from_str(raw_result.get()) - .map_err(|e| eyre::eyre!("Failed to deserialize response from {}: {}", method, e)) + crate::debug_toolkit::shared::rpc::raw_typed(&self.provider, method, params).await } // ------------------------------------------------------------------------- @@ -309,7 +244,6 @@ impl AttachRepl { /// `status` — show node status via `rollupNode_status`. async fn cmd_status(&self) -> eyre::Result<()> { let status: ChainOrchestratorStatus = self.raw("rollupNode_status", ()).await?; - let fcs = &status.l2.fcs; println!("{}", "=== Node Status ===".bold()); println!("{}", "Node:".underline()); @@ -317,36 +251,7 @@ impl AttachRepl { if let Some(signer) = &self.signer { println!(" From: {:?}", signer.address()); } - - println!("{}", "L2:".underline()); - println!( - " Head: #{} ({:.12}...)", - fcs.head_block_info().number.to_string().green(), - format!("{:?}", fcs.head_block_info().hash) - ); - println!( - " Safe: #{} ({:.12}...)", - fcs.safe_block_info().number.to_string().yellow(), - format!("{:?}", fcs.safe_block_info().hash) - ); - println!( - " Finalized: #{} ({:.12}...)", - fcs.finalized_block_info().number.to_string().blue(), - format!("{:?}", fcs.finalized_block_info().hash) - ); - println!( - " Synced: {}", - if status.l2.status.is_synced() { "true".green() } else { "false".red() } - ); - - println!("{}", "L1:".underline()); - println!(" Head: #{}", status.l1.latest.to_string().cyan()); - println!(" Finalized: #{}", status.l1.finalized); - println!(" Processed: #{}", status.l1.processed); - println!( - " Synced: {}", - if status.l1.status.is_synced() { "true".green() } else { "false".red() } - ); + crate::debug_toolkit::shared::status::print_status_overview(&status); Ok(()) } @@ -354,69 +259,14 @@ impl AttachRepl { /// `sync-status` — detailed sync status. async fn cmd_sync_status(&self) -> eyre::Result<()> { let status: ChainOrchestratorStatus = self.raw("rollupNode_status", ()).await?; - - println!("{}", "Sync Status:".bold()); - println!(); - println!("{}", "L1 Sync:".underline()); - println!( - " Status: {}", - if status.l1.status.is_synced() { - "SYNCED".green() - } else { - format!("{:?}", status.l1.status).yellow().to_string().into() - } - ); - println!(" Latest: #{}", status.l1.latest.to_string().cyan()); - println!(" Finalized: #{}", status.l1.finalized); - println!(" Processed: #{}", status.l1.processed); - println!(); - println!("{}", "L2 Sync:".underline()); - println!( - " Status: {}", - if status.l2.status.is_synced() { - "SYNCED".green() - } else { - format!("{:?}", status.l2.status).yellow().to_string().into() - } - ); - println!(); - let fcs = &status.l2.fcs; - println!("{}", "Forkchoice:".underline()); - println!( - " Head: #{} ({:.12}...)", - fcs.head_block_info().number.to_string().green(), - format!("{:?}", fcs.head_block_info().hash) - ); - println!( - " Safe: #{} ({:.12}...)", - fcs.safe_block_info().number.to_string().yellow(), - format!("{:?}", fcs.safe_block_info().hash) - ); - println!( - " Finalized: #{} ({:.12}...)", - fcs.finalized_block_info().number.to_string().blue(), - format!("{:?}", fcs.finalized_block_info().hash) - ); - + crate::debug_toolkit::shared::status::print_sync_status(&status); Ok(()) } /// `fcs` — show forkchoice state. async fn cmd_fcs(&self) -> eyre::Result<()> { let status: ChainOrchestratorStatus = self.raw("rollupNode_status", ()).await?; - let fcs = &status.l2.fcs; - - println!("{}", "Forkchoice State:".bold()); - println!(" Head:"); - println!(" Number: {}", fcs.head_block_info().number); - println!(" Hash: {:?}", fcs.head_block_info().hash); - println!(" Safe:"); - println!(" Number: {}", fcs.safe_block_info().number); - println!(" Hash: {:?}", fcs.safe_block_info().hash); - println!(" Finalized:"); - println!(" Number: {}", fcs.finalized_block_info().number); - println!(" Hash: {:?}", fcs.finalized_block_info().hash); - + crate::debug_toolkit::shared::status::print_forkchoice(&status); Ok(()) } @@ -522,7 +372,7 @@ impl AttachRepl { } /// `tx pending` / `tx send` / `tx inject`. - async fn cmd_tx(&mut self, cmd: TxCommand) -> eyre::Result<()> { + async fn cmd_tx(&self, cmd: TxCommand) -> eyre::Result<()> { match cmd { TxCommand::Pending => { let result: serde_json::Value = self.raw("txpool_content", ()).await?; @@ -556,13 +406,48 @@ impl AttachRepl { .as_str() .and_then(|s| u64::from_str_radix(s.trim_start_matches("0x"), 16).ok()) .unwrap_or(1_000_000_000); + let base_fee_u128 = base_fee as u128; + // Keep priority tip conservative on low-fee chains and always satisfy: + // max_fee_per_gas >= max_priority_fee_per_gas. + let max_priority_fee_per_gas = (base_fee_u128 / 2).max(1); + let max_fee_per_gas = (base_fee_u128 * 2).max(max_priority_fee_per_gas); + let gas_limit = match self + .raw::( + "eth_estimateGas", + [serde_json::json!({ + "from": format!("{:#x}", from_address), + "to": format!("{:#x}", to), + "value": format!("0x{value:x}"), + })], + ) + .await + { + Ok(v) => v + .as_str() + .and_then(|s| u64::from_str_radix(s.trim_start_matches("0x"), 16).ok()) + // Add a small safety buffer on top of estimate. + .map(|g| g.saturating_mul(12) / 10) + .filter(|g| *g > 0) + .unwrap_or(21_000), + Err(e) => { + println!( + "{}", + format!( + "Warning: eth_estimateGas failed ({}), falling back to 21000", + e + ) + .yellow() + ); + 21_000 + } + }; let mut tx = TxEip1559 { chain_id, nonce, - gas_limit: 21000, - max_fee_per_gas: base_fee as u128 * 2, - max_priority_fee_per_gas: 1_000_000_000, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, to: TxKind::Call(to), value, access_list: Default::default(), @@ -575,17 +460,22 @@ impl AttachRepl { let tx_hash: serde_json::Value = self.raw("eth_sendRawTransaction", [raw_tx]).await?; - println!("{}", "Transaction sent!".green()); - println!(" Hash: {}", tx_hash); - println!(" From: {:?}", from_address); - println!(" To: {:?}", to); - println!(" Value: {} wei", value); + let tx_hash_str = + tx_hash.as_str().map(ToOwned::to_owned).unwrap_or_else(|| tx_hash.to_string()); + crate::debug_toolkit::shared::output::print_tx_sent( + &tx_hash_str, + &format!("{:?}", from_address), + &format!("{:?}", to), + value, + false, + ); } TxCommand::Inject(bytes) => { let hex = alloy_primitives::hex::encode_prefixed(&bytes); let tx_hash: serde_json::Value = self.raw("eth_sendRawTransaction", [hex]).await?; - println!("{}", "Transaction injected!".green()); - println!(" Hash: {}", tx_hash); + let tx_hash_str = + tx_hash.as_str().map(ToOwned::to_owned).unwrap_or_else(|| tx_hash.to_string()); + crate::debug_toolkit::shared::output::print_tx_injected(&tx_hash_str); } } Ok(()) @@ -640,30 +530,21 @@ impl AttachRepl { AdminCommand::EnableSequencing => { let result: bool = self.raw("rollupNodeAdmin_enableAutomaticSequencing", ()).await?; - if result { - println!("{}", "Automatic sequencing enabled".green()); - } else { - println!("{}", "Enable sequencing returned false".yellow()); - } + crate::debug_toolkit::shared::output::print_admin_enable_result(result); } AdminCommand::DisableSequencing => { let result: bool = self.raw("rollupNodeAdmin_disableAutomaticSequencing", ()).await?; - if result { - println!("{}", "Automatic sequencing disabled".yellow()); - } else { - println!("{}", "Disable sequencing returned false".yellow()); - } + crate::debug_toolkit::shared::output::print_admin_disable_result(result); } AdminCommand::RevertToL1Block(block_number) => { - println!("{}", format!("Reverting to L1 block {}...", block_number).yellow()); + crate::debug_toolkit::shared::output::print_admin_revert_start(block_number); let result: bool = self.raw("rollupNodeAdmin_revertToL1Block", [block_number]).await?; - if result { - println!("{}", format!("Reverted to L1 block {}", block_number).green()); - } else { - println!("{}", "Revert returned false".yellow()); - } + crate::debug_toolkit::shared::output::print_admin_revert_result( + block_number, + result, + ); } } Ok(()) @@ -671,41 +552,14 @@ impl AttachRepl { /// `rpc [params]` — call any JSON-RPC method and pretty-print the result. async fn cmd_rpc(&self, method: &str, params: Option<&str>) -> eyre::Result<()> { - let raw_params = match params { - None => serde_json::value::to_raw_value(&())?, - Some(p) => { - // Try to parse as JSON first; fall back to treating as a string value - let val: serde_json::Value = serde_json::from_str(p) - .unwrap_or_else(|_| serde_json::Value::String(p.to_string())); - // Ensure always an array for JSON-RPC - let arr = if val.is_array() { val } else { serde_json::Value::Array(vec![val]) }; - serde_json::value::to_raw_value(&arr)? - } - }; - - let result = self - .provider - .raw_request_dyn(Cow::Owned(method.to_string()), &raw_params) - .await - .map_err(|e| eyre::eyre!("{}: {}", method, e))?; - - // Pretty-print via serde_json::Value - let pretty: serde_json::Value = serde_json::from_str(result.get())?; - println!("{}", serde_json::to_string_pretty(&pretty)?); - Ok(()) + let pretty = + crate::debug_toolkit::shared::rpc::raw_value(&self.provider, method, params).await?; + crate::debug_toolkit::shared::output::print_pretty_json(&pretty) } /// `logs` — show log file path. fn cmd_logs(&self) -> eyre::Result<()> { - println!("{}", "Log File:".bold()); - if let Some(path) = &self.log_path { - println!(" Path: {}", path.display()); - println!(); - println!("{}", "View logs in another terminal:".underline()); - println!(" tail -f {}", path.display()); - } else { - println!(" {}", "No log file configured (logs going to stdout)".dimmed()); - } + crate::debug_toolkit::shared::output::print_log_file(&self.log_path); Ok(()) } } diff --git a/crates/node/src/debug_toolkit/repl.rs b/crates/node/src/debug_toolkit/repl/local.rs similarity index 79% rename from crates/node/src/debug_toolkit/repl.rs rename to crates/node/src/debug_toolkit/repl/local.rs index fe1c6ef0..dd10a81f 100644 --- a/crates/node/src/debug_toolkit/repl.rs +++ b/crates/node/src/debug_toolkit/repl/local.rs @@ -1,32 +1,30 @@ -//! Interactive REPL for debugging rollup nodes. - -use super::{ +/// Interactive REPL for debugging rollup nodes. +use crate::debug_toolkit::{ actions::ActionRegistry, commands::{ - print_help, BlockArg, Command, EventsCommand, L1Command, PeersCommand, RunCommand, - TxCommand, WalletCommand, + print_help, AdminCommand, BlockArg, Command, EventsCommand, L1Command, PeersCommand, + RunCommand, TxCommand, WalletCommand, }, - event_stream::EventStreamState, + event::stream::EventStreamState, }; use crate::test_utils::{fixture::NodeType, TestFixture}; use alloy_consensus::{SignableTransaction, TxEip1559, TxLegacy}; use alloy_eips::{eip2718::Encodable2718, BlockNumberOrTag}; use alloy_network::{TransactionResponse, TxSignerSync}; use alloy_primitives::{address, Address, Bytes, TxKind, U256}; +use alloy_provider::ProviderBuilder; use alloy_rpc_types_eth::TransactionRequest; use alloy_signer_local::PrivateKeySigner; use alloy_sol_types::{sol, SolCall}; use colored::Colorize; -use crossterm::{ - event::{self, Event, KeyCode, KeyModifiers}, - terminal::{disable_raw_mode, enable_raw_mode}, -}; +use crossterm::terminal::{disable_raw_mode, enable_raw_mode}; use futures::StreamExt; use reth_network::PeersInfo; use reth_network_api::Peers; use reth_network_peers::NodeRecord; use reth_rpc_api::EthApiServer; use reth_transaction_pool::TransactionPool; +use scroll_alloy_network::Scroll; use std::{io::Write, path::PathBuf, str::FromStr, time::Duration}; // L1 contract addresses @@ -135,17 +133,7 @@ impl DebugRepl { pub async fn run(&mut self) -> eyre::Result<()> { self.running = true; - // Enable raw mode for proper terminal control - enable_raw_mode()?; - - // Guard to ensure raw mode is disabled on exit - struct RawModeGuard; - impl Drop for RawModeGuard { - fn drop(&mut self) { - let _ = disable_raw_mode(); - } - } - let _guard = RawModeGuard; + let _guard = super::terminal::RawModeGuard::new()?; // Print welcome message and initial status // Disable raw mode temporarily so println! works correctly @@ -186,61 +174,20 @@ impl DebugRepl { // Check for keyboard input (non-blocking) _ = tokio::time::sleep(Duration::from_millis(50)) => { - // Poll for keyboard events - while event::poll(Duration::from_millis(0))? { - if let Event::Key(key_event) = event::read()? { - match key_event.code { - KeyCode::Enter => { - print!("\r\n"); - let _ = stdout.flush(); - let line = input_buffer.trim().to_string(); - input_buffer.clear(); - - if !line.is_empty() { - // Disable raw mode for command output (println! works normally) - let _ = disable_raw_mode(); - if let Err(e) = self.execute_command(&line).await { - println!("{}: {}", "Error".red(), e); - } - // Re-enable raw mode for input - let _ = enable_raw_mode(); - } - - if self.running { - print!("{}", self.get_prompt()); - let _ = stdout.flush(); - } - } - KeyCode::Backspace => { - if !input_buffer.is_empty() { - input_buffer.pop(); - print!("\x08 \x08"); // Move back, overwrite, move back - let _ = stdout.flush(); - } - } - KeyCode::Char(c) => { - if key_event.modifiers.contains(KeyModifiers::CONTROL) && c == 'c' { - print!("\r\nUse 'exit' to quit\r\n"); - print!("{}{}", self.get_prompt(), input_buffer); - let _ = stdout.flush(); - } else if key_event.modifiers.contains(KeyModifiers::CONTROL) && c == 'd' { - print!("\r\n"); - self.running = false; - } else { - input_buffer.push(c); - print!("{}", c); - let _ = stdout.flush(); - } - } - KeyCode::Esc => { - // Clear current input - print!("\r\x1b[K{}", self.get_prompt()); - let _ = stdout.flush(); - input_buffer.clear(); - } - _ => {} + match super::terminal::poll_keyboard(&mut input_buffer, &self.get_prompt())? { + super::terminal::InputAction::Command(line) => { + let _ = disable_raw_mode(); + if let Err(e) = self.execute_command(&line).await { + println!("{}: {}", "Error".red(), e); + } + let _ = enable_raw_mode(); + if self.running { + print!("{}", self.get_prompt()); + let _ = stdout.flush(); } } + super::terminal::InputAction::Quit => self.running = false, + super::terminal::InputAction::None => {} } } } @@ -280,6 +227,8 @@ impl DebugRepl { Command::Nodes => self.cmd_list_nodes(), Command::Db => self.cmd_db(), Command::Logs => self.cmd_logs(), + Command::Admin(admin_cmd) => self.cmd_admin(admin_cmd).await, + Command::Rpc { method, params } => self.cmd_rpc(&method, params.as_deref()).await, Command::Help => { print_help(); Ok(()) @@ -288,24 +237,6 @@ impl DebugRepl { self.running = false; Ok(()) } - Command::Admin(_) => { - println!( - "{}", - "admin commands are only available in attach mode (--attach ).".yellow() - ); - Ok(()) - } - Command::Rpc { method, params: _ } => { - println!( - "{}", - format!( - "rpc {} is only available in attach mode (--attach ). Use 'cast rpc {}' instead.", - method, method - ) - .yellow() - ); - Ok(()) - } Command::Unknown(s) => { if !s.is_empty() { println!("Unknown command: {}. Type 'help' for available commands.", s); @@ -324,7 +255,6 @@ impl DebugRepl { }; let status = node.rollup_manager_handle.status().await?; - let fcs = &status.l2.fcs; println!("{}", format!("=== Node {} ({}) ===", self.active_node, node_type).bold()); @@ -336,96 +266,16 @@ impl DebugRepl { if let Some(addr) = http_addr { println!(" HTTP RPC: http://{}", addr); } - - // L2 Status - println!("{}", "L2:".underline()); - println!( - " Head: #{} ({:.12}...)", - fcs.head_block_info().number.to_string().green(), - format!("{:?}", fcs.head_block_info().hash) - ); - println!( - " Safe: #{} ({:.12}...)", - fcs.safe_block_info().number.to_string().yellow(), - format!("{:?}", fcs.safe_block_info().hash) - ); - println!( - " Finalized: #{} ({:.12}...)", - fcs.finalized_block_info().number.to_string().blue(), - format!("{:?}", fcs.finalized_block_info().hash) - ); - println!( - " Synced: {}", - if status.l2.status.is_synced() { "true".green() } else { "false".red() } - ); - - // L1 Status - println!("{}", "L1:".underline()); - println!(" Head: #{}", status.l1.latest.to_string().cyan()); - println!(" Finalized: #{}", status.l1.finalized); - println!(" Processed: #{}", status.l1.processed); - println!( - " Synced: {}", - if status.l1.status.is_synced() { "true".green() } else { "false".red() } - ); + crate::debug_toolkit::shared::status::print_status_overview(&status); Ok(()) } - /// Show detailed sync status (rollupNode_status RPC equivalent). + /// Show detailed sync status (`rollupNode_status` RPC equivalent). async fn cmd_sync_status(&self) -> eyre::Result<()> { let node = &self.fixture.nodes[self.active_node]; let status = node.rollup_manager_handle.status().await?; - - println!("{}", "Sync Status:".bold()); - println!(); - - // L1 Sync Status - println!("{}", "L1 Sync:".underline()); - println!( - " Status: {}", - if status.l1.status.is_synced() { - "SYNCED".green() - } else { - format!("{:?}", status.l1.status).yellow().to_string().into() - } - ); - println!(" Latest: #{}", status.l1.latest.to_string().cyan()); - println!(" Finalized: #{}", status.l1.finalized); - println!(" Processed: #{}", status.l1.processed); - println!(); - - // L2 Sync Status - println!("{}", "L2 Sync:".underline()); - println!( - " Status: {}", - if status.l2.status.is_synced() { - "SYNCED".green() - } else { - format!("{:?}", status.l2.status).yellow().to_string().into() - } - ); - println!(); - - // Forkchoice State - let fcs = &status.l2.fcs; - println!("{}", "Forkchoice:".underline()); - println!( - " Head: #{} ({:.12}...)", - fcs.head_block_info().number.to_string().green(), - format!("{:?}", fcs.head_block_info().hash) - ); - println!( - " Safe: #{} ({:.12}...)", - fcs.safe_block_info().number.to_string().yellow(), - format!("{:?}", fcs.safe_block_info().hash) - ); - println!( - " Finalized: #{} ({:.12}...)", - fcs.finalized_block_info().number.to_string().blue(), - format!("{:?}", fcs.finalized_block_info().hash) - ); - + crate::debug_toolkit::shared::status::print_sync_status(&status); Ok(()) } @@ -499,19 +349,7 @@ impl DebugRepl { async fn cmd_fcs(&self) -> eyre::Result<()> { let node = &self.fixture.nodes[self.active_node]; let status = node.rollup_manager_handle.status().await?; - let fcs = &status.l2.fcs; - - println!("{}", "Forkchoice State:".bold()); - println!(" Head:"); - println!(" Number: {}", fcs.head_block_info().number); - println!(" Hash: {:?}", fcs.head_block_info().hash); - println!(" Safe:"); - println!(" Number: {}", fcs.safe_block_info().number); - println!(" Hash: {:?}", fcs.safe_block_info().hash); - println!(" Finalized:"); - println!(" Number: {}", fcs.finalized_block_info().number); - println!(" Hash: {:?}", fcs.finalized_block_info().hash); - + crate::debug_toolkit::shared::status::print_forkchoice(&status); Ok(()) } @@ -797,16 +635,17 @@ impl DebugRepl { let node = &self.fixture.nodes[self.active_node]; let tx_hash = node.node.rpc.inject_tx(raw_tx.clone()).await?; - println!("{}", "Transaction sent!".green()); - println!(" Hash: {:?}", tx_hash); - println!(" From: {:?}", from_address); - println!(" To: {:?}", to); - println!(" Value: {} wei", value); - println!("{}", "Note: Run 'build' to include in a block (sequencer mode)".dimmed()); + crate::debug_toolkit::shared::output::print_tx_sent( + &format!("{:?}", tx_hash), + &format!("{:?}", from_address), + &format!("{:?}", to), + value, + true, + ); } TxCommand::Inject(bytes) => { - self.fixture.inject_tx_on(self.active_node, bytes.clone()).await?; - println!("{}", "Transaction injected".green()); + let tx_hash = self.fixture.inject_tx_on(self.active_node, bytes.clone()).await?; + crate::debug_toolkit::shared::output::print_tx_injected(&format!("{:?}", tx_hash)); } } Ok(()) @@ -1063,6 +902,50 @@ impl DebugRepl { Ok(()) } + /// Call any JSON-RPC method against the active node and pretty-print result. + async fn cmd_rpc(&self, method: &str, params: Option<&str>) -> eyre::Result<()> { + let node = &self.fixture.nodes[self.active_node]; + let http_addr = node + .node + .inner + .rpc_server_handle() + .http_local_addr() + .ok_or_else(|| eyre::eyre!("HTTP RPC is not available on active node"))?; + let rpc_url = format!("http://{}", http_addr); + + let provider: alloy_provider::RootProvider = ProviderBuilder::default() + .connect(rpc_url.as_str()) + .await + .map_err(|e| eyre::eyre!("Failed to connect to node RPC {}: {}", rpc_url, e))?; + let pretty = + crate::debug_toolkit::shared::rpc::raw_value(&provider, method, params).await?; + crate::debug_toolkit::shared::output::print_pretty_json(&pretty) + } + + /// Handle admin commands directly through the in-process rollup manager handle. + async fn cmd_admin(&self, cmd: AdminCommand) -> eyre::Result<()> { + let handle = &self.fixture.nodes[self.active_node].rollup_manager_handle; + match cmd { + AdminCommand::EnableSequencing => { + let result = handle.enable_automatic_sequencing().await?; + crate::debug_toolkit::shared::output::print_admin_enable_result(result); + } + AdminCommand::DisableSequencing => { + let result = handle.disable_automatic_sequencing().await?; + crate::debug_toolkit::shared::output::print_admin_disable_result(result); + } + AdminCommand::RevertToL1Block(block_number) => { + crate::debug_toolkit::shared::output::print_admin_revert_start(block_number); + let result = handle.revert_to_l1_block(block_number).await?; + crate::debug_toolkit::shared::output::print_admin_revert_result( + block_number, + result, + ); + } + } + Ok(()) + } + /// Show database path and access command. fn cmd_db(&self) -> eyre::Result<()> { let node = &self.fixture.nodes[self.active_node]; @@ -1086,16 +969,7 @@ impl DebugRepl { /// Show log file path and tail command. fn cmd_logs(&self) -> eyre::Result<()> { - println!("{}", "Log File:".bold()); - if let Some(path) = &self.log_path { - println!(" Path: {}", path.display()); - println!(); - println!("{}", "View logs in another terminal:".underline()); - println!(" tail -f {}", path.display()); - } else { - println!(" {}", "No log file configured (logs going to stdout)".dimmed()); - } - + crate::debug_toolkit::shared::output::print_log_file(&self.log_path); Ok(()) } } diff --git a/crates/node/src/debug_toolkit/repl/mod.rs b/crates/node/src/debug_toolkit/repl/mod.rs new file mode 100644 index 00000000..aa61a0bf --- /dev/null +++ b/crates/node/src/debug_toolkit/repl/mod.rs @@ -0,0 +1,6 @@ +pub(crate) mod attach; +pub(crate) mod local; +pub(crate) mod terminal; + +pub use attach::AttachRepl; +pub use local::DebugRepl; diff --git a/crates/node/src/debug_toolkit/repl/terminal.rs b/crates/node/src/debug_toolkit/repl/terminal.rs new file mode 100644 index 00000000..ad9caa50 --- /dev/null +++ b/crates/node/src/debug_toolkit/repl/terminal.rs @@ -0,0 +1,81 @@ +//! Shared terminal utilities for the debug REPLs. + +use crossterm::{ + event::{self, Event, KeyCode, KeyModifiers}, + terminal::{disable_raw_mode, enable_raw_mode}, +}; +use std::{io::Write, time::Duration}; + +/// RAII guard: enable raw mode on create, disable on drop. +pub(super) struct RawModeGuard; + +impl RawModeGuard { + pub(super) fn new() -> eyre::Result { + enable_raw_mode()?; + Ok(Self) + } +} + +impl Drop for RawModeGuard { + fn drop(&mut self) { + let _ = disable_raw_mode(); + } +} + +pub(super) enum InputAction { + Command(String), + Quit, + None, +} + +/// Drain pending key events and return the next user action. +pub(super) fn poll_keyboard(input_buffer: &mut String, prompt: &str) -> eyre::Result { + let mut stdout = std::io::stdout(); + + while event::poll(Duration::from_millis(0))? { + if let Event::Key(key_event) = event::read()? { + match key_event.code { + KeyCode::Enter => { + print!("\r\n"); + let _ = stdout.flush(); + let line = input_buffer.trim().to_string(); + input_buffer.clear(); + if !line.is_empty() { + return Ok(InputAction::Command(line)); + } + print!("{}", prompt); + let _ = stdout.flush(); + } + KeyCode::Backspace => { + if !input_buffer.is_empty() { + input_buffer.pop(); + print!("\x08 \x08"); + let _ = stdout.flush(); + } + } + KeyCode::Char(c) => { + if key_event.modifiers.contains(KeyModifiers::CONTROL) && c == 'c' { + print!("\r\nUse 'exit' to quit\r\n{}{}", prompt, input_buffer); + let _ = stdout.flush(); + } else if key_event.modifiers.contains(KeyModifiers::CONTROL) && c == 'd' { + print!("\r\n"); + let _ = stdout.flush(); + return Ok(InputAction::Quit); + } else { + input_buffer.push(c); + print!("{}", c); + let _ = stdout.flush(); + } + } + KeyCode::Esc => { + input_buffer.clear(); + print!("\r\x1b[K{}", prompt); + let _ = stdout.flush(); + } + _ => {} + } + } + } + + Ok(InputAction::None) +} diff --git a/crates/node/src/debug_toolkit/shared/mod.rs b/crates/node/src/debug_toolkit/shared/mod.rs new file mode 100644 index 00000000..3714a96b --- /dev/null +++ b/crates/node/src/debug_toolkit/shared/mod.rs @@ -0,0 +1,3 @@ +pub(crate) mod output; +pub(crate) mod rpc; +pub(crate) mod status; diff --git a/crates/node/src/debug_toolkit/shared/output.rs b/crates/node/src/debug_toolkit/shared/output.rs new file mode 100644 index 00000000..2ebfc1b0 --- /dev/null +++ b/crates/node/src/debug_toolkit/shared/output.rs @@ -0,0 +1,71 @@ +//! Shared terminal output helpers for REPL commands. + +use colored::Colorize; +use std::{fmt::Display, path::PathBuf}; + +pub(crate) fn print_admin_enable_result(result: bool) { + if result { + println!("{}", "Automatic sequencing enabled".green()); + } else { + println!("{}", "Enable sequencing returned false".yellow()); + } +} + +pub(crate) fn print_admin_disable_result(result: bool) { + if result { + println!("{}", "Automatic sequencing disabled".yellow()); + } else { + println!("{}", "Disable sequencing returned false".yellow()); + } +} + +pub(crate) fn print_admin_revert_start(block_number: u64) { + println!("{}", format!("Reverting to L1 block {}...", block_number).yellow()); +} + +pub(crate) fn print_admin_revert_result(block_number: u64, result: bool) { + if result { + println!("{}", format!("Reverted to L1 block {}", block_number).green()); + } else { + println!("{}", "Revert returned false".yellow()); + } +} + +pub(crate) fn print_pretty_json(value: &serde_json::Value) -> eyre::Result<()> { + println!("{}", serde_json::to_string_pretty(value)?); + Ok(()) +} + +pub(crate) fn print_log_file(log_path: &Option) { + println!("{}", "Log File:".bold()); + if let Some(path) = log_path { + println!(" Path: {}", path.display()); + println!(); + println!("{}", "View logs in another terminal:".underline()); + println!(" tail -f {}", path.display()); + } else { + println!(" {}", "No log file configured (logs going to stdout)".dimmed()); + } +} + +pub(crate) fn print_tx_sent( + tx_hash: &str, + from: &str, + to: &str, + value_wei: impl Display, + include_build_hint: bool, +) { + println!("{}", "Transaction sent!".green()); + println!(" Hash: {}", tx_hash); + println!(" From: {}", from); + println!(" To: {}", to); + println!(" Value: {} wei", value_wei); + if include_build_hint { + println!("{}", "Note: Run 'build' to include in a block (sequencer mode)".dimmed()); + } +} + +pub(crate) fn print_tx_injected(tx_hash: &str) { + println!("{}", "Transaction injected!".green()); + println!(" Hash: {}", tx_hash); +} diff --git a/crates/node/src/debug_toolkit/shared/rpc.rs b/crates/node/src/debug_toolkit/shared/rpc.rs new file mode 100644 index 00000000..fba00113 --- /dev/null +++ b/crates/node/src/debug_toolkit/shared/rpc.rs @@ -0,0 +1,46 @@ +//! Shared JSON-RPC helpers for debug toolkit REPLs. + +use alloy_provider::{Provider, RootProvider}; +use scroll_alloy_network::Scroll; +use std::borrow::Cow; + +/// Call a typed JSON-RPC method and deserialize into `R`. +pub(crate) async fn raw_typed( + provider: &RootProvider, + method: &'static str, + params: impl serde::Serialize, +) -> eyre::Result { + let raw_params = serde_json::value::to_raw_value(¶ms) + .map_err(|e| eyre::eyre!("Failed to serialize params for {}: {}", method, e))?; + let raw_result = provider + .raw_request_dyn(Cow::Borrowed(method), &raw_params) + .await + .map_err(|e| eyre::eyre!("{}: {}", method, e))?; + serde_json::from_str(raw_result.get()) + .map_err(|e| eyre::eyre!("Failed to deserialize response from {}: {}", method, e)) +} + +/// Call any JSON-RPC method and return the response as a JSON value. +pub(crate) async fn raw_value( + provider: &RootProvider, + method: &str, + params: Option<&str>, +) -> eyre::Result { + let raw_params = match params { + None => serde_json::value::to_raw_value(&())?, + Some(p) => { + let value: serde_json::Value = serde_json::from_str(p) + .unwrap_or_else(|_| serde_json::Value::String(p.to_string())); + let array = + if value.is_array() { value } else { serde_json::Value::Array(vec![value]) }; + serde_json::value::to_raw_value(&array)? + } + }; + + let result = provider + .raw_request_dyn(Cow::Owned(method.to_string()), &raw_params) + .await + .map_err(|e| eyre::eyre!("{}: {}", method, e))?; + serde_json::from_str(result.get()) + .map_err(|e| eyre::eyre!("Failed to deserialize response from {}: {}", method, e)) +} diff --git a/crates/node/src/debug_toolkit/shared/status.rs b/crates/node/src/debug_toolkit/shared/status.rs new file mode 100644 index 00000000..749fff0c --- /dev/null +++ b/crates/node/src/debug_toolkit/shared/status.rs @@ -0,0 +1,102 @@ +//! Shared rendering for rollup node status outputs. + +use colored::Colorize; +use rollup_node_chain_orchestrator::ChainOrchestratorStatus; + +/// Print L2/L1 overview sections used by `status`. +pub(crate) fn print_status_overview(status: &ChainOrchestratorStatus) { + let fcs = &status.l2.fcs; + + println!("{}", "L2:".underline()); + println!( + " Head: #{} ({:.12}...)", + fcs.head_block_info().number.to_string().green(), + format!("{:?}", fcs.head_block_info().hash) + ); + println!( + " Safe: #{} ({:.12}...)", + fcs.safe_block_info().number.to_string().yellow(), + format!("{:?}", fcs.safe_block_info().hash) + ); + println!( + " Finalized: #{} ({:.12}...)", + fcs.finalized_block_info().number.to_string().blue(), + format!("{:?}", fcs.finalized_block_info().hash) + ); + println!( + " Synced: {}", + if status.l2.status.is_synced() { "true".green() } else { "false".red() } + ); + + println!("{}", "L1:".underline()); + println!(" Head: #{}", status.l1.latest.to_string().cyan()); + println!(" Finalized: #{}", status.l1.finalized); + println!(" Processed: #{}", status.l1.processed); + println!( + " Synced: {}", + if status.l1.status.is_synced() { "true".green() } else { "false".red() } + ); +} + +/// Print detailed sync status used by `sync-status`. +pub(crate) fn print_sync_status(status: &ChainOrchestratorStatus) { + println!("{}", "Sync Status:".bold()); + println!(); + println!("{}", "L1 Sync:".underline()); + println!( + " Status: {}", + if status.l1.status.is_synced() { + "SYNCED".green() + } else { + format!("{:?}", status.l1.status).yellow().to_string().into() + } + ); + println!(" Latest: #{}", status.l1.latest.to_string().cyan()); + println!(" Finalized: #{}", status.l1.finalized); + println!(" Processed: #{}", status.l1.processed); + println!(); + + println!("{}", "L2 Sync:".underline()); + println!( + " Status: {}", + if status.l2.status.is_synced() { + "SYNCED".green() + } else { + format!("{:?}", status.l2.status).yellow().to_string().into() + } + ); + println!(); + println!("{}", "Forkchoice:".underline()); + + let fcs = &status.l2.fcs; + println!( + " Head: #{} ({:.12}...)", + fcs.head_block_info().number.to_string().green(), + format!("{:?}", fcs.head_block_info().hash) + ); + println!( + " Safe: #{} ({:.12}...)", + fcs.safe_block_info().number.to_string().yellow(), + format!("{:?}", fcs.safe_block_info().hash) + ); + println!( + " Finalized: #{} ({:.12}...)", + fcs.finalized_block_info().number.to_string().blue(), + format!("{:?}", fcs.finalized_block_info().hash) + ); +} + +/// Print forkchoice section used by `fcs`. +pub(crate) fn print_forkchoice(status: &ChainOrchestratorStatus) { + let fcs = &status.l2.fcs; + println!("{}", "Forkchoice State:".bold()); + println!(" Head:"); + println!(" Number: {}", fcs.head_block_info().number); + println!(" Hash: {:?}", fcs.head_block_info().hash); + println!(" Safe:"); + println!(" Number: {}", fcs.safe_block_info().number); + println!(" Hash: {:?}", fcs.safe_block_info().hash); + println!(" Finalized:"); + println!(" Number: {}", fcs.finalized_block_info().number); + println!(" Hash: {:?}", fcs.finalized_block_info().hash); +} diff --git a/crates/node/src/test_utils/fixture.rs b/crates/node/src/test_utils/fixture.rs index b2355132..e9bf839f 100644 --- a/crates/node/src/test_utils/fixture.rs +++ b/crates/node/src/test_utils/fixture.rs @@ -176,9 +176,9 @@ impl TestFixture { &mut self, node_index: usize, tx: impl Into, - ) -> eyre::Result<()> { - self.nodes[node_index].node.rpc.inject_tx(tx.into()).await?; - Ok(()) + ) -> eyre::Result { + let tx_hash = self.nodes[node_index].node.rpc.inject_tx(tx.into()).await?; + Ok(tx_hash) } /// Get the current (latest) block from a specific node. diff --git a/scroll-debug-15746.log b/scroll-debug-15746.log deleted file mode 100644 index e69de29b..00000000 diff --git a/scroll-debug-16292.log b/scroll-debug-16292.log deleted file mode 100644 index 648d1ac1..00000000 --- a/scroll-debug-16292.log +++ /dev/null @@ -1,671 +0,0 @@ -2026-03-02T12:33:25.812857Z INFO node{idx=0}: reth::cli: Saving prune config to toml file -2026-03-02T12:33:25.813638Z INFO node{idx=0}: reth::cli: Configuration loaded path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-BTYrGfAQ/reth.toml" -2026-03-02T12:33:25.816190Z INFO node{idx=0}: reth::cli: Database opened -2026-03-02T12:33:26.135386Z INFO node{idx=0}: reth::cli: -Pre-merge hard forks (block based): -- Homestead @0 -- Tangerine @0 -- SpuriousDragon @0 -- Byzantium @0 -- Constantinople @0 -- Petersburg @0 -- Istanbul @0 -- Berlin @0 -- London @0 -- Archimedes @0 -- Bernoulli @0 -- Curie @0 - -Post-merge hard forks (timestamp based): -- Shanghai @0 -- Darwin @0 -- DarwinV2 @0 -- Euclid @0 -- EuclidV2 @0 -- Feynman @0 -- Galileo @0 -- GalileoV2 @0 -2026-03-02T12:33:26.142528Z INFO node{idx=0}: reth::cli: Transaction pool initialized -2026-03-02T12:33:26.456201Z INFO node{idx=0}: reth::cli: P2P networking initialized enode=enode://68d342edd2921250b59d89277d689bdd07e0068ae80a20b75cf9d6c3f56991b0a46b0b717c925efe7d9f87771d6fd89fc099fded52bd6eebd754be8b882fdc1f@127.0.0.1:51351 -2026-03-02T12:33:26.458448Z INFO node{idx=0}: reth::cli: StaticFileProducer initialized -2026-03-02T12:33:26.460499Z INFO node{idx=0}: reth::cli: Verifying storage consistency. -2026-03-02T12:33:26.461291Z INFO node{idx=0}: reth::cli: Pruner initialized prune_config=PruneConfig { block_interval: 5, segments: PruneModes { sender_recovery: None, transaction_lookup: None, receipts: None, account_history: None, storage_history: None, bodies_history: None, receipts_log_filter: ReceiptsLogPruneConfig({}) } } -2026-03-02T12:33:26.461583Z INFO node{idx=0}: reth::cli: Creating JWT auth secret file path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-BTYrGfAQ/jwt.hex" -2026-03-02T12:33:26.464831Z INFO node{idx=0}: reth::cli: Consensus engine initialized -2026-03-02T12:33:26.465790Z INFO node{idx=0}: reth::cli: Engine API handler initialized -2026-03-02T12:33:26.471828Z INFO node{idx=0}: reth::cli: RPC auth server started url=127.0.0.1:51352 -2026-03-02T12:33:26.474075Z INFO node{idx=0}: reth::cli: RPC IPC server started path=/tmp/reth.ipc-zKynQEXO -2026-03-02T12:33:26.474445Z INFO node{idx=0}: reth::cli: RPC HTTP server started url=127.0.0.1:51353 -2026-03-02T12:33:26.474497Z INFO node{idx=0}: rollup_node::args: Building rollup node with config: -ScrollRollupNodeConfig { - test: true, - consensus_args: ConsensusArgs { - algorithm: Noop, - authorized_signer: None, - }, - database_args: RollupNodeDatabaseArgs { - rn_db_path: None, - }, - chain_orchestrator_args: ChainOrchestratorArgs { - optimistic_sync_trigger: 100, - chain_buffer_size: 100, - }, - engine_driver_args: EngineDriverArgs { - sync_at_startup: true, - }, - blob_provider_args: BlobProviderArgs { - beacon_node_urls: None, - s3_url: None, - anvil_url: None, - mock: true, - compute_units_per_second: 0, - max_retries: 0, - initial_backoff: 0, - }, - l1_provider_args: L1ProviderArgs { - url: None, - compute_units_per_second: 10000, - max_retries: 10, - initial_backoff: 100, - logs_query_block_range: 500, - cache_max_items: 100, - }, - sequencer_args: SequencerArgs { - sequencer_enabled: true, - auto_start: false, - block_time: 100, - payload_building_duration: 40, - fee_recipient: 0x0000000000000000000000000000000000000000, - l1_message_inclusion_mode: BlockDepth( - 0, - ), - allow_empty_blocks: true, - max_l1_messages: None, - }, - network_args: RollupNodeNetworkArgs { - enable_eth_scroll_wire_bridge: true, - enable_scroll_wire: true, - sequencer_url: None, - signer_address: None, - }, - rpc_args: RpcArgs { - basic_enabled: true, - admin_enabled: true, - }, - signer_args: SignerArgs { - key_file: None, - aws_kms_key_id: None, - private_key: None, - }, - gas_price_oracle_args: RollupNodeGasPriceOracleArgs { - default_suggested_priority_fee: 0, - }, - pprof_args: PprofArgs { - enabled: false, - addr: 0.0.0.0:6868, - default_duration: 30, - }, - database: Some( - Database { - database: Retry { - inner: DatabaseInner { - connection: SqlxSqlitePoolConnection, - write_lock: Mutex { - data: (), - }, - read_locks: Semaphore { - ll_sem: Semaphore { - permits: 5, - }, - resource_span: Span { - name: "runtime.resource", - level: Level( - Trace, - ), - target: "tokio::sync::semaphore", - disabled: true, - module_path: "tokio::sync::semaphore", - line: 461, - file: "/Users/yiweichi/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tokio-1.48.0/src/sync/semaphore.rs", - }, - }, - metrics: DatabaseMetrics, - tmp_dir: None, - }, - max_retries: None, - initial_delay_ms: 50, - exponential_backoff: false, - metrics: RetryMetrics, - }, - metrics: { - PrepareOnStartup: DatabaseOperationMetrics, - GetL1BlockInfo: DatabaseOperationMetrics, - RemoveL1BlockInfoLeq: DatabaseOperationMetrics, - UpdateL1MessagesWithL2Block: DatabaseOperationMetrics, - GetL2HeadBlockNumber: DatabaseOperationMetrics, - GetL2BlockAndBatchInfoByHash: DatabaseOperationMetrics, - GetLatestSafeL2Info: DatabaseOperationMetrics, - UpdateL1MessagesFromL2Blocks: DatabaseOperationMetrics, - DeleteBatchesGtBlockNumber: DatabaseOperationMetrics, - DeleteBatchesGtBatchIndex: DatabaseOperationMetrics, - InsertL1BlockInfo: DatabaseOperationMetrics, - RemoveL1BlockInfoGt: DatabaseOperationMetrics, - InsertBlocks: DatabaseOperationMetrics, - GetLatestL1BlockNumber: DatabaseOperationMetrics, - GetNL1Messages: DatabaseOperationMetrics, - InsertGenesisBlock: DatabaseOperationMetrics, - GetL2BlockInfoByNumber: DatabaseOperationMetrics, - GetSignature: DatabaseOperationMetrics, - GetNL2BlockDataHint: DatabaseOperationMetrics, - SetL2HeadBlockNumber: DatabaseOperationMetrics, - GetFinalizedL1BlockNumber: DatabaseOperationMetrics, - SetProcessedL1BlockNumber: DatabaseOperationMetrics, - DeleteL2BlocksGtBatchIndex: DatabaseOperationMetrics, - FinalizeConsolidatedBatches: DatabaseOperationMetrics, - Unwind: DatabaseOperationMetrics, - GetHighestBlockForBatchHash: DatabaseOperationMetrics, - SetBatchRevertBlockNumberForBatchRange: DatabaseOperationMetrics, - InsertBatchConsolidationOutcome: DatabaseOperationMetrics, - GetBatchByHash: DatabaseOperationMetrics, - GetBatchStatusByHash: DatabaseOperationMetrics, - FetchAndUpdateUnprocessedCommittedBatches: DatabaseOperationMetrics, - InsertL1Message: DatabaseOperationMetrics, - FetchAndUpdateUnprocessedFinalizedBatches: DatabaseOperationMetrics, - DeleteL1MessagesGt: DatabaseOperationMetrics, - InsertSignature: DatabaseOperationMetrics, - DeleteBatchFinalizationGtBlockNumber: DatabaseOperationMetrics, - GetBatchByIndex: DatabaseOperationMetrics, - InsertBlock: DatabaseOperationMetrics, - SetFinalizedL1BlockNumber: DatabaseOperationMetrics, - GetMaxBlockDataHintBlockNumber: DatabaseOperationMetrics, - ChangeBatchProcessingToCommittedStatus: DatabaseOperationMetrics, - FinalizeBatchesUpToIndex: DatabaseOperationMetrics, - GetHighestBlockForBatchIndex: DatabaseOperationMetrics, - UpdateSkippedL1Messages: DatabaseOperationMetrics, - GetProcessedL1BlockNumber: DatabaseOperationMetrics, - UpdateBatchStatus: DatabaseOperationMetrics, - PurgeL1MessageToL2BlockMappings: DatabaseOperationMetrics, - GetLatestIndexedEventL1BlockNumber: DatabaseOperationMetrics, - DeleteL2BlocksGtBlockNumber: DatabaseOperationMetrics, - DeleteBatchRevertGtBlockNumber: DatabaseOperationMetrics, - SetLatestL1BlockNumber: DatabaseOperationMetrics, - InsertBatch: DatabaseOperationMetrics, - InsertSignatures: DatabaseOperationMetrics, - }, - }, - ), -} -2026-03-02T12:33:26.484713Z INFO node{idx=0}: sea_orm_migration::migrator: Applying all pending migrations -2026-03-02T12:33:26.485730Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20220101_000001_create_batch_commit_table' -2026-03-02T12:33:26.487284Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20220101_000001_create_batch_commit_table' has been applied -2026-03-02T12:33:26.487479Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250304_125946_add_l1_msg_table' -2026-03-02T12:33:26.488095Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250304_125946_add_l1_msg_table' has been applied -2026-03-02T12:33:26.488290Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250408_132123_add_header_metadata' -2026-03-02T12:33:26.488425Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250408_132123_add_header_metadata' has been applied -2026-03-02T12:33:26.488615Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250408_150338_load_header_metadata' -2026-03-02T12:33:26.488625Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250408_150338_load_header_metadata' has been applied -2026-03-02T12:33:26.488708Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250411_072004_add_l2_block' -2026-03-02T12:33:26.489777Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250411_072004_add_l2_block' has been applied -2026-03-02T12:33:26.489971Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250616_223947_add_metadata' -2026-03-02T12:33:26.490274Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250616_223947_add_metadata' has been applied -2026-03-02T12:33:26.490350Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250904_175949_block_signature' -2026-03-02T12:33:26.490512Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250904_175949_block_signature' has been applied -2026-03-02T12:33:26.490709Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20251028_110719_add_l1_block_table' -2026-03-02T12:33:26.490863Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20251028_110719_add_l1_block_table' has been applied -2026-03-02T12:33:26.511883Z INFO node{idx=0}: scroll::node::args: Starting engine driver fcs=ForkchoiceState { head: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, safe: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, finalized: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 } } payload_building_duration=40 -2026-03-02T12:33:26.512175Z INFO node{idx=0}: scroll::providers: Running with mock blob provider - all other blob provider configurations are ignored -2026-03-02T12:33:26.513485Z INFO scroll::derivation_pipeline: Starting derivation pipeline worker -2026-03-02T12:33:26.513714Z INFO node{idx=0}: reth::cli: Starting consensus engine -2026-03-02T12:33:26.514886Z INFO node{idx=0}: reth_node_events::node: Forkchoice updated head_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 safe_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 finalized_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 -2026-03-02T12:33:26.519080Z WARN node{idx=1}: reth_node_builder::launch::common: Failed to build global thread pool err=The global thread pool has already been initialized. -2026-03-02T12:33:26.519437Z INFO node{idx=1}: reth::cli: Saving prune config to toml file -2026-03-02T12:33:26.519810Z INFO node{idx=1}: reth::cli: Configuration loaded path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-ExVZKSsZ/reth.toml" -2026-03-02T12:33:26.520154Z INFO node{idx=1}: reth::cli: Database opened -2026-03-02T12:33:26.607383Z INFO node{idx=1}: reth::cli: -Pre-merge hard forks (block based): -- Homestead @0 -- Tangerine @0 -- SpuriousDragon @0 -- Byzantium @0 -- Constantinople @0 -- Petersburg @0 -- Istanbul @0 -- Berlin @0 -- London @0 -- Archimedes @0 -- Bernoulli @0 -- Curie @0 - -Post-merge hard forks (timestamp based): -- Shanghai @0 -- Darwin @0 -- DarwinV2 @0 -- Euclid @0 -- EuclidV2 @0 -- Feynman @0 -- Galileo @0 -- GalileoV2 @0 -2026-03-02T12:33:26.608014Z INFO node{idx=1}: reth::cli: Transaction pool initialized -2026-03-02T12:33:26.609673Z INFO node{idx=1}: reth::cli: P2P networking initialized enode=enode://1ff1c09c1caa8fa9327009d46565fa6871ffcce57b560c26b8ef453fa233a3b9a5a7aeeee9ece98d9c83c721950e960dce5cd36637fe73830adc9418af6bb88a@127.0.0.1:51358 -2026-03-02T12:33:26.609904Z INFO node{idx=1}: reth::cli: StaticFileProducer initialized -2026-03-02T12:33:26.610368Z INFO node{idx=1}: reth::cli: Verifying storage consistency. -2026-03-02T12:33:26.610702Z INFO node{idx=1}: reth::cli: Pruner initialized prune_config=PruneConfig { block_interval: 5, segments: PruneModes { sender_recovery: None, transaction_lookup: None, receipts: None, account_history: None, storage_history: None, bodies_history: None, receipts_log_filter: ReceiptsLogPruneConfig({}) } } -2026-03-02T12:33:26.610872Z INFO node{idx=1}: reth::cli: Creating JWT auth secret file path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-ExVZKSsZ/jwt.hex" -2026-03-02T12:33:26.611682Z INFO node{idx=1}: reth::cli: Consensus engine initialized -2026-03-02T12:33:26.611895Z INFO node{idx=1}: reth::cli: Engine API handler initialized -2026-03-02T12:33:26.614553Z INFO node{idx=1}: reth::cli: RPC auth server started url=127.0.0.1:51359 -2026-03-02T12:33:26.616312Z INFO node{idx=1}: reth::cli: RPC IPC server started path=/tmp/reth.ipc-iAtGUbRW -2026-03-02T12:33:26.616323Z INFO node{idx=1}: reth::cli: RPC HTTP server started url=127.0.0.1:51360 -2026-03-02T12:33:26.616345Z INFO node{idx=1}: rollup_node::args: Building rollup node with config: -ScrollRollupNodeConfig { - test: true, - consensus_args: ConsensusArgs { - algorithm: Noop, - authorized_signer: None, - }, - database_args: RollupNodeDatabaseArgs { - rn_db_path: None, - }, - chain_orchestrator_args: ChainOrchestratorArgs { - optimistic_sync_trigger: 100, - chain_buffer_size: 100, - }, - engine_driver_args: EngineDriverArgs { - sync_at_startup: true, - }, - blob_provider_args: BlobProviderArgs { - beacon_node_urls: None, - s3_url: None, - anvil_url: None, - mock: true, - compute_units_per_second: 0, - max_retries: 0, - initial_backoff: 0, - }, - l1_provider_args: L1ProviderArgs { - url: None, - compute_units_per_second: 10000, - max_retries: 10, - initial_backoff: 100, - logs_query_block_range: 500, - cache_max_items: 100, - }, - sequencer_args: SequencerArgs { - sequencer_enabled: false, - auto_start: false, - block_time: 100, - payload_building_duration: 40, - fee_recipient: 0x0000000000000000000000000000000000000000, - l1_message_inclusion_mode: BlockDepth( - 0, - ), - allow_empty_blocks: true, - max_l1_messages: None, - }, - network_args: RollupNodeNetworkArgs { - enable_eth_scroll_wire_bridge: true, - enable_scroll_wire: true, - sequencer_url: None, - signer_address: None, - }, - rpc_args: RpcArgs { - basic_enabled: true, - admin_enabled: true, - }, - signer_args: SignerArgs { - key_file: None, - aws_kms_key_id: None, - private_key: None, - }, - gas_price_oracle_args: RollupNodeGasPriceOracleArgs { - default_suggested_priority_fee: 0, - }, - pprof_args: PprofArgs { - enabled: false, - addr: 0.0.0.0:6868, - default_duration: 30, - }, - database: Some( - Database { - database: Retry { - inner: DatabaseInner { - connection: SqlxSqlitePoolConnection, - write_lock: Mutex { - data: (), - }, - read_locks: Semaphore { - ll_sem: Semaphore { - permits: 5, - }, - resource_span: Span { - name: "runtime.resource", - level: Level( - Trace, - ), - target: "tokio::sync::semaphore", - disabled: true, - module_path: "tokio::sync::semaphore", - line: 461, - file: "/Users/yiweichi/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tokio-1.48.0/src/sync/semaphore.rs", - }, - }, - metrics: DatabaseMetrics, - tmp_dir: None, - }, - max_retries: None, - initial_delay_ms: 50, - exponential_backoff: false, - metrics: RetryMetrics, - }, - metrics: { - GetFinalizedL1BlockNumber: DatabaseOperationMetrics, - SetFinalizedL1BlockNumber: DatabaseOperationMetrics, - GetLatestSafeL2Info: DatabaseOperationMetrics, - FinalizeConsolidatedBatches: DatabaseOperationMetrics, - DeleteL2BlocksGtBlockNumber: DatabaseOperationMetrics, - DeleteL2BlocksGtBatchIndex: DatabaseOperationMetrics, - GetNL1Messages: DatabaseOperationMetrics, - GetLatestL1BlockNumber: DatabaseOperationMetrics, - ChangeBatchProcessingToCommittedStatus: DatabaseOperationMetrics, - GetBatchByIndex: DatabaseOperationMetrics, - InsertSignature: DatabaseOperationMetrics, - SetL2HeadBlockNumber: DatabaseOperationMetrics, - PurgeL1MessageToL2BlockMappings: DatabaseOperationMetrics, - FetchAndUpdateUnprocessedFinalizedBatches: DatabaseOperationMetrics, - DeleteBatchFinalizationGtBlockNumber: DatabaseOperationMetrics, - InsertBlocks: DatabaseOperationMetrics, - DeleteL1MessagesGt: DatabaseOperationMetrics, - RemoveL1BlockInfoLeq: DatabaseOperationMetrics, - InsertBatch: DatabaseOperationMetrics, - UpdateL1MessagesFromL2Blocks: DatabaseOperationMetrics, - GetBatchStatusByHash: DatabaseOperationMetrics, - UpdateSkippedL1Messages: DatabaseOperationMetrics, - GetL1BlockInfo: DatabaseOperationMetrics, - InsertGenesisBlock: DatabaseOperationMetrics, - GetBatchByHash: DatabaseOperationMetrics, - GetMaxBlockDataHintBlockNumber: DatabaseOperationMetrics, - DeleteBatchesGtBlockNumber: DatabaseOperationMetrics, - FinalizeBatchesUpToIndex: DatabaseOperationMetrics, - SetLatestL1BlockNumber: DatabaseOperationMetrics, - DeleteBatchesGtBatchIndex: DatabaseOperationMetrics, - InsertBatchConsolidationOutcome: DatabaseOperationMetrics, - SetBatchRevertBlockNumberForBatchRange: DatabaseOperationMetrics, - Unwind: DatabaseOperationMetrics, - GetProcessedL1BlockNumber: DatabaseOperationMetrics, - GetL2HeadBlockNumber: DatabaseOperationMetrics, - GetL2BlockInfoByNumber: DatabaseOperationMetrics, - SetProcessedL1BlockNumber: DatabaseOperationMetrics, - UpdateBatchStatus: DatabaseOperationMetrics, - InsertL1Message: DatabaseOperationMetrics, - GetHighestBlockForBatchIndex: DatabaseOperationMetrics, - GetSignature: DatabaseOperationMetrics, - GetLatestIndexedEventL1BlockNumber: DatabaseOperationMetrics, - PrepareOnStartup: DatabaseOperationMetrics, - GetNL2BlockDataHint: DatabaseOperationMetrics, - InsertL1BlockInfo: DatabaseOperationMetrics, - DeleteBatchRevertGtBlockNumber: DatabaseOperationMetrics, - UpdateL1MessagesWithL2Block: DatabaseOperationMetrics, - GetL2BlockAndBatchInfoByHash: DatabaseOperationMetrics, - InsertSignatures: DatabaseOperationMetrics, - RemoveL1BlockInfoGt: DatabaseOperationMetrics, - GetHighestBlockForBatchHash: DatabaseOperationMetrics, - FetchAndUpdateUnprocessedCommittedBatches: DatabaseOperationMetrics, - InsertBlock: DatabaseOperationMetrics, - }, - }, - ), -} -2026-03-02T12:33:26.617187Z INFO node{idx=1}: sea_orm_migration::migrator: Applying all pending migrations -2026-03-02T12:33:26.617603Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20220101_000001_create_batch_commit_table' -2026-03-02T12:33:26.619030Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20220101_000001_create_batch_commit_table' has been applied -2026-03-02T12:33:26.619205Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250304_125946_add_l1_msg_table' -2026-03-02T12:33:26.619782Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250304_125946_add_l1_msg_table' has been applied -2026-03-02T12:33:26.619967Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250408_132123_add_header_metadata' -2026-03-02T12:33:26.620108Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250408_132123_add_header_metadata' has been applied -2026-03-02T12:33:26.620287Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250408_150338_load_header_metadata' -2026-03-02T12:33:26.620297Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250408_150338_load_header_metadata' has been applied -2026-03-02T12:33:26.620380Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250411_072004_add_l2_block' -2026-03-02T12:33:26.621534Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250411_072004_add_l2_block' has been applied -2026-03-02T12:33:26.621763Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250616_223947_add_metadata' -2026-03-02T12:33:26.622098Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250616_223947_add_metadata' has been applied -2026-03-02T12:33:26.622187Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250904_175949_block_signature' -2026-03-02T12:33:26.622357Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250904_175949_block_signature' has been applied -2026-03-02T12:33:26.622569Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20251028_110719_add_l1_block_table' -2026-03-02T12:33:26.622727Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20251028_110719_add_l1_block_table' has been applied -2026-03-02T12:33:26.629183Z INFO node{idx=1}: scroll::node::args: Starting engine driver fcs=ForkchoiceState { head: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, safe: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, finalized: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 } } payload_building_duration=40 -2026-03-02T12:33:26.629217Z INFO node{idx=1}: scroll::providers: Running with mock blob provider - all other blob provider configurations are ignored -2026-03-02T12:33:26.630065Z INFO node{idx=1}: reth::cli: Starting consensus engine -2026-03-02T12:33:26.630158Z INFO scroll::derivation_pipeline: Starting derivation pipeline worker -2026-03-02T12:33:26.630383Z INFO node{idx=1}: reth_node_events::node: Forkchoice updated head_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 safe_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 finalized_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 -2026-03-02T12:33:29.466851Z INFO node{idx=0}: reth::cli: Status connected_peers=0 latest_block=0 -2026-03-02T12:33:29.613432Z INFO node{idx=1}: reth::cli: Status connected_peers=0 latest_block=0 -2026-03-02T12:33:31.472193Z INFO node{idx=1}: reth_e2e_test_utils::network: Session established with peer: 0x68d342edd2921250b59d89277d689bdd07e0068ae80a20b75cf9d6c3f56991b0a46b0b717c925efe7d9f87771d6fd89fc099fded52bd6eebd754be8b882fdc1f -2026-03-02T12:33:31.472748Z INFO node{idx=1}: reth_e2e_test_utils::network: Session established with peer: 0x1ff1c09c1caa8fa9327009d46565fa6871ffcce57b560c26b8ef453fa233a3b9a5a7aeeee9ece98d9c83c721950e960dce5cd36637fe73830adc9418af6bb88a -2026-03-02T12:33:31.478486Z WARN node{idx=2}: reth_node_builder::launch::common: Failed to build global thread pool err=The global thread pool has already been initialized. -2026-03-02T12:33:31.478981Z INFO node{idx=2}: reth::cli: Saving prune config to toml file -2026-03-02T12:33:31.479376Z INFO node{idx=2}: reth::cli: Configuration loaded path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-E87A3wU3/reth.toml" -2026-03-02T12:33:31.479791Z INFO node{idx=2}: reth::cli: Database opened -2026-03-02T12:33:31.568702Z INFO node{idx=2}: reth::cli: -Pre-merge hard forks (block based): -- Homestead @0 -- Tangerine @0 -- SpuriousDragon @0 -- Byzantium @0 -- Constantinople @0 -- Petersburg @0 -- Istanbul @0 -- Berlin @0 -- London @0 -- Archimedes @0 -- Bernoulli @0 -- Curie @0 - -Post-merge hard forks (timestamp based): -- Shanghai @0 -- Darwin @0 -- DarwinV2 @0 -- Euclid @0 -- EuclidV2 @0 -- Feynman @0 -- Galileo @0 -- GalileoV2 @0 -2026-03-02T12:33:31.569648Z INFO node{idx=2}: reth::cli: Transaction pool initialized -2026-03-02T12:33:31.571984Z INFO node{idx=2}: reth::cli: P2P networking initialized enode=enode://871a1b8b805b16f5d2bf85bbb4f144bb39b7175d725d42e8aa0e7903d936b1d9a0fdf6127430eb80df48fcdd749dd721fd87f1d4ade03e17ce932a79196aade3@127.0.0.1:51367 -2026-03-02T12:33:31.572301Z INFO node{idx=2}: reth::cli: StaticFileProducer initialized -2026-03-02T12:33:31.572919Z INFO node{idx=2}: reth::cli: Verifying storage consistency. -2026-03-02T12:33:31.573337Z INFO node{idx=2}: reth::cli: Pruner initialized prune_config=PruneConfig { block_interval: 5, segments: PruneModes { sender_recovery: None, transaction_lookup: None, receipts: None, account_history: None, storage_history: None, bodies_history: None, receipts_log_filter: ReceiptsLogPruneConfig({}) } } -2026-03-02T12:33:31.573561Z INFO node{idx=2}: reth::cli: Creating JWT auth secret file path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-E87A3wU3/jwt.hex" -2026-03-02T12:33:31.574721Z INFO node{idx=2}: reth::cli: Consensus engine initialized -2026-03-02T12:33:31.574976Z INFO node{idx=2}: reth::cli: Engine API handler initialized -2026-03-02T12:33:31.578376Z INFO node{idx=2}: reth::cli: RPC auth server started url=127.0.0.1:51368 -2026-03-02T12:33:31.580586Z INFO node{idx=2}: reth::cli: RPC IPC server started path=/tmp/reth.ipc-3xCrUmZj -2026-03-02T12:33:31.580599Z INFO node{idx=2}: reth::cli: RPC HTTP server started url=127.0.0.1:51369 -2026-03-02T12:33:31.580628Z INFO node{idx=2}: rollup_node::args: Building rollup node with config: -ScrollRollupNodeConfig { - test: true, - consensus_args: ConsensusArgs { - algorithm: Noop, - authorized_signer: None, - }, - database_args: RollupNodeDatabaseArgs { - rn_db_path: None, - }, - chain_orchestrator_args: ChainOrchestratorArgs { - optimistic_sync_trigger: 100, - chain_buffer_size: 100, - }, - engine_driver_args: EngineDriverArgs { - sync_at_startup: true, - }, - blob_provider_args: BlobProviderArgs { - beacon_node_urls: None, - s3_url: None, - anvil_url: None, - mock: true, - compute_units_per_second: 0, - max_retries: 0, - initial_backoff: 0, - }, - l1_provider_args: L1ProviderArgs { - url: None, - compute_units_per_second: 10000, - max_retries: 10, - initial_backoff: 100, - logs_query_block_range: 500, - cache_max_items: 100, - }, - sequencer_args: SequencerArgs { - sequencer_enabled: false, - auto_start: false, - block_time: 100, - payload_building_duration: 40, - fee_recipient: 0x0000000000000000000000000000000000000000, - l1_message_inclusion_mode: BlockDepth( - 0, - ), - allow_empty_blocks: true, - max_l1_messages: None, - }, - network_args: RollupNodeNetworkArgs { - enable_eth_scroll_wire_bridge: true, - enable_scroll_wire: true, - sequencer_url: None, - signer_address: None, - }, - rpc_args: RpcArgs { - basic_enabled: true, - admin_enabled: true, - }, - signer_args: SignerArgs { - key_file: None, - aws_kms_key_id: None, - private_key: None, - }, - gas_price_oracle_args: RollupNodeGasPriceOracleArgs { - default_suggested_priority_fee: 0, - }, - pprof_args: PprofArgs { - enabled: false, - addr: 0.0.0.0:6868, - default_duration: 30, - }, - database: Some( - Database { - database: Retry { - inner: DatabaseInner { - connection: SqlxSqlitePoolConnection, - write_lock: Mutex { - data: (), - }, - read_locks: Semaphore { - ll_sem: Semaphore { - permits: 5, - }, - resource_span: Span { - name: "runtime.resource", - level: Level( - Trace, - ), - target: "tokio::sync::semaphore", - disabled: true, - module_path: "tokio::sync::semaphore", - line: 461, - file: "/Users/yiweichi/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tokio-1.48.0/src/sync/semaphore.rs", - }, - }, - metrics: DatabaseMetrics, - tmp_dir: None, - }, - max_retries: None, - initial_delay_ms: 50, - exponential_backoff: false, - metrics: RetryMetrics, - }, - metrics: { - GetL1BlockInfo: DatabaseOperationMetrics, - FetchAndUpdateUnprocessedFinalizedBatches: DatabaseOperationMetrics, - SetL2HeadBlockNumber: DatabaseOperationMetrics, - InsertGenesisBlock: DatabaseOperationMetrics, - ChangeBatchProcessingToCommittedStatus: DatabaseOperationMetrics, - SetFinalizedL1BlockNumber: DatabaseOperationMetrics, - InsertBlocks: DatabaseOperationMetrics, - Unwind: DatabaseOperationMetrics, - SetLatestL1BlockNumber: DatabaseOperationMetrics, - InsertSignature: DatabaseOperationMetrics, - GetBatchByHash: DatabaseOperationMetrics, - GetLatestL1BlockNumber: DatabaseOperationMetrics, - GetL2HeadBlockNumber: DatabaseOperationMetrics, - GetL2BlockInfoByNumber: DatabaseOperationMetrics, - DeleteL1MessagesGt: DatabaseOperationMetrics, - DeleteBatchFinalizationGtBlockNumber: DatabaseOperationMetrics, - PrepareOnStartup: DatabaseOperationMetrics, - DeleteL2BlocksGtBlockNumber: DatabaseOperationMetrics, - DeleteBatchRevertGtBlockNumber: DatabaseOperationMetrics, - DeleteL2BlocksGtBatchIndex: DatabaseOperationMetrics, - InsertBlock: DatabaseOperationMetrics, - DeleteBatchesGtBlockNumber: DatabaseOperationMetrics, - RemoveL1BlockInfoGt: DatabaseOperationMetrics, - GetFinalizedL1BlockNumber: DatabaseOperationMetrics, - UpdateBatchStatus: DatabaseOperationMetrics, - GetLatestSafeL2Info: DatabaseOperationMetrics, - PurgeL1MessageToL2BlockMappings: DatabaseOperationMetrics, - GetNL2BlockDataHint: DatabaseOperationMetrics, - GetHighestBlockForBatchHash: DatabaseOperationMetrics, - InsertL1BlockInfo: DatabaseOperationMetrics, - GetProcessedL1BlockNumber: DatabaseOperationMetrics, - GetHighestBlockForBatchIndex: DatabaseOperationMetrics, - UpdateL1MessagesFromL2Blocks: DatabaseOperationMetrics, - DeleteBatchesGtBatchIndex: DatabaseOperationMetrics, - UpdateL1MessagesWithL2Block: DatabaseOperationMetrics, - SetBatchRevertBlockNumberForBatchRange: DatabaseOperationMetrics, - GetNL1Messages: DatabaseOperationMetrics, - FinalizeConsolidatedBatches: DatabaseOperationMetrics, - GetMaxBlockDataHintBlockNumber: DatabaseOperationMetrics, - FetchAndUpdateUnprocessedCommittedBatches: DatabaseOperationMetrics, - InsertL1Message: DatabaseOperationMetrics, - InsertBatch: DatabaseOperationMetrics, - SetProcessedL1BlockNumber: DatabaseOperationMetrics, - GetBatchByIndex: DatabaseOperationMetrics, - InsertSignatures: DatabaseOperationMetrics, - GetBatchStatusByHash: DatabaseOperationMetrics, - UpdateSkippedL1Messages: DatabaseOperationMetrics, - GetLatestIndexedEventL1BlockNumber: DatabaseOperationMetrics, - GetL2BlockAndBatchInfoByHash: DatabaseOperationMetrics, - GetSignature: DatabaseOperationMetrics, - FinalizeBatchesUpToIndex: DatabaseOperationMetrics, - InsertBatchConsolidationOutcome: DatabaseOperationMetrics, - RemoveL1BlockInfoLeq: DatabaseOperationMetrics, - }, - }, - ), -} -2026-03-02T12:33:31.581813Z INFO node{idx=2}: sea_orm_migration::migrator: Applying all pending migrations -2026-03-02T12:33:31.582387Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20220101_000001_create_batch_commit_table' -2026-03-02T12:33:31.584272Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20220101_000001_create_batch_commit_table' has been applied -2026-03-02T12:33:31.584492Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250304_125946_add_l1_msg_table' -2026-03-02T12:33:31.585384Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250304_125946_add_l1_msg_table' has been applied -2026-03-02T12:33:31.585653Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250408_132123_add_header_metadata' -2026-03-02T12:33:31.585856Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250408_132123_add_header_metadata' has been applied -2026-03-02T12:33:31.586097Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250408_150338_load_header_metadata' -2026-03-02T12:33:31.586110Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250408_150338_load_header_metadata' has been applied -2026-03-02T12:33:31.586219Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250411_072004_add_l2_block' -2026-03-02T12:33:31.587769Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250411_072004_add_l2_block' has been applied -2026-03-02T12:33:31.588086Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250616_223947_add_metadata' -2026-03-02T12:33:31.588519Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250616_223947_add_metadata' has been applied -2026-03-02T12:33:31.588624Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250904_175949_block_signature' -2026-03-02T12:33:31.588837Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250904_175949_block_signature' has been applied -2026-03-02T12:33:31.589095Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20251028_110719_add_l1_block_table' -2026-03-02T12:33:31.589287Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20251028_110719_add_l1_block_table' has been applied -2026-03-02T12:33:31.597237Z INFO node{idx=2}: scroll::node::args: Starting engine driver fcs=ForkchoiceState { head: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, safe: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, finalized: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 } } payload_building_duration=40 -2026-03-02T12:33:31.597270Z INFO node{idx=2}: scroll::providers: Running with mock blob provider - all other blob provider configurations are ignored -2026-03-02T12:33:31.598247Z INFO node{idx=2}: reth::cli: Starting consensus engine -2026-03-02T12:33:31.598321Z INFO scroll::derivation_pipeline: Starting derivation pipeline worker -2026-03-02T12:33:31.598603Z INFO node{idx=2}: reth_node_events::node: Forkchoice updated head_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 safe_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 finalized_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 -2026-03-02T12:33:31.617418Z INFO node{idx=2}: reth_e2e_test_utils::network: Session established with peer: 0x1ff1c09c1caa8fa9327009d46565fa6871ffcce57b560c26b8ef453fa233a3b9a5a7aeeee9ece98d9c83c721950e960dce5cd36637fe73830adc9418af6bb88a -2026-03-02T12:33:31.617845Z INFO node{idx=2}: reth_e2e_test_utils::network: Session established with peer: 0x871a1b8b805b16f5d2bf85bbb4f144bb39b7175d725d42e8aa0e7903d936b1d9a0fdf6127430eb80df48fcdd749dd721fd87f1d4ade03e17ce932a79196aade3 -2026-03-02T12:33:34.576380Z INFO node{idx=2}: reth::cli: Status connected_peers=1 latest_block=0 -2026-03-02T12:33:36.582596Z INFO node{idx=2}: reth_e2e_test_utils::network: Session established with peer: 0x871a1b8b805b16f5d2bf85bbb4f144bb39b7175d725d42e8aa0e7903d936b1d9a0fdf6127430eb80df48fcdd749dd721fd87f1d4ade03e17ce932a79196aade3 -2026-03-02T12:33:36.583248Z INFO node{idx=2}: reth_e2e_test_utils::network: Session established with peer: 0x68d342edd2921250b59d89277d689bdd07e0068ae80a20b75cf9d6c3f56991b0a46b0b717c925efe7d9f87771d6fd89fc099fded52bd6eebd754be8b882fdc1f -2026-03-02T12:34:13.640892Z INFO node{idx=1}: reth::cli: Wrote network peers to file peers_file="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-ExVZKSsZ/known-peers.json" -2026-03-02T12:34:13.640899Z INFO node{idx=2}: reth::cli: Wrote network peers to file peers_file="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-E87A3wU3/known-peers.json" -2026-03-02T12:34:13.640974Z INFO node{idx=0}: reth::cli: Wrote network peers to file peers_file="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-BTYrGfAQ/known-peers.json" diff --git a/scroll-debug-47318.log b/scroll-debug-47318.log deleted file mode 100644 index 41c57b8a..00000000 --- a/scroll-debug-47318.log +++ /dev/null @@ -1,676 +0,0 @@ -2026-03-02T12:43:23.941198Z INFO node{idx=0}: reth::cli: Saving prune config to toml file -2026-03-02T12:43:23.941691Z INFO node{idx=0}: reth::cli: Configuration loaded path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-KUYbLIZB/reth.toml" -2026-03-02T12:43:23.942422Z INFO node{idx=0}: reth::cli: Database opened -2026-03-02T12:43:24.245935Z INFO node{idx=0}: reth::cli: -Pre-merge hard forks (block based): -- Homestead @0 -- Tangerine @0 -- SpuriousDragon @0 -- Byzantium @0 -- Constantinople @0 -- Petersburg @0 -- Istanbul @0 -- Berlin @0 -- London @0 -- Archimedes @0 -- Bernoulli @0 -- Curie @0 - -Post-merge hard forks (timestamp based): -- Shanghai @0 -- Darwin @0 -- DarwinV2 @0 -- Euclid @0 -- EuclidV2 @0 -- Feynman @0 -- Galileo @0 -- GalileoV2 @0 -2026-03-02T12:43:24.248691Z INFO node{idx=0}: reth::cli: Transaction pool initialized -2026-03-02T12:43:24.560959Z INFO node{idx=0}: reth::cli: P2P networking initialized enode=enode://2a854fec1913ab788092e38bc059988bd54406e53074eaf80662b3f243810b82477f0830b7739dd12a550405b721d790b8fdda008a89da71779a0ed90192e8b3@127.0.0.1:52288 -2026-03-02T12:43:24.561933Z INFO node{idx=0}: reth::cli: StaticFileProducer initialized -2026-03-02T12:43:24.562765Z INFO node{idx=0}: reth::cli: Verifying storage consistency. -2026-03-02T12:43:24.563206Z INFO node{idx=0}: reth::cli: Pruner initialized prune_config=PruneConfig { block_interval: 5, segments: PruneModes { sender_recovery: None, transaction_lookup: None, receipts: None, account_history: None, storage_history: None, bodies_history: None, receipts_log_filter: ReceiptsLogPruneConfig({}) } } -2026-03-02T12:43:24.563372Z INFO node{idx=0}: reth::cli: Creating JWT auth secret file path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-KUYbLIZB/jwt.hex" -2026-03-02T12:43:24.564552Z INFO node{idx=0}: reth::cli: Consensus engine initialized -2026-03-02T12:43:24.565074Z INFO node{idx=0}: reth::cli: Engine API handler initialized -2026-03-02T12:43:24.569199Z INFO node{idx=0}: reth::cli: RPC auth server started url=127.0.0.1:52289 -2026-03-02T12:43:24.570829Z INFO node{idx=0}: reth::cli: RPC IPC server started path=/tmp/reth.ipc-ZcdwhuVq -2026-03-02T12:43:24.570874Z INFO node{idx=0}: reth::cli: RPC HTTP server started url=127.0.0.1:52290 -2026-03-02T12:43:24.570927Z INFO node{idx=0}: rollup_node::args: Building rollup node with config: -ScrollRollupNodeConfig { - test: true, - consensus_args: ConsensusArgs { - algorithm: Noop, - authorized_signer: None, - }, - database_args: RollupNodeDatabaseArgs { - rn_db_path: None, - }, - chain_orchestrator_args: ChainOrchestratorArgs { - optimistic_sync_trigger: 100, - chain_buffer_size: 100, - }, - engine_driver_args: EngineDriverArgs { - sync_at_startup: true, - }, - blob_provider_args: BlobProviderArgs { - beacon_node_urls: None, - s3_url: None, - anvil_url: None, - mock: true, - compute_units_per_second: 0, - max_retries: 0, - initial_backoff: 0, - }, - l1_provider_args: L1ProviderArgs { - url: None, - compute_units_per_second: 10000, - max_retries: 10, - initial_backoff: 100, - logs_query_block_range: 500, - cache_max_items: 100, - }, - sequencer_args: SequencerArgs { - sequencer_enabled: true, - auto_start: false, - block_time: 100, - payload_building_duration: 40, - fee_recipient: 0x0000000000000000000000000000000000000000, - l1_message_inclusion_mode: BlockDepth( - 0, - ), - allow_empty_blocks: true, - max_l1_messages: None, - }, - network_args: RollupNodeNetworkArgs { - enable_eth_scroll_wire_bridge: true, - enable_scroll_wire: true, - sequencer_url: None, - signer_address: None, - }, - rpc_args: RpcArgs { - basic_enabled: true, - admin_enabled: true, - }, - signer_args: SignerArgs { - key_file: None, - aws_kms_key_id: None, - private_key: None, - }, - gas_price_oracle_args: RollupNodeGasPriceOracleArgs { - default_suggested_priority_fee: 0, - }, - pprof_args: PprofArgs { - enabled: false, - addr: 0.0.0.0:6868, - default_duration: 30, - }, - database: Some( - Database { - database: Retry { - inner: DatabaseInner { - connection: SqlxSqlitePoolConnection, - write_lock: Mutex { - data: (), - }, - read_locks: Semaphore { - ll_sem: Semaphore { - permits: 5, - }, - resource_span: Span { - name: "runtime.resource", - level: Level( - Trace, - ), - target: "tokio::sync::semaphore", - disabled: true, - module_path: "tokio::sync::semaphore", - line: 461, - file: "/Users/yiweichi/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tokio-1.48.0/src/sync/semaphore.rs", - }, - }, - metrics: DatabaseMetrics, - tmp_dir: None, - }, - max_retries: None, - initial_delay_ms: 50, - exponential_backoff: false, - metrics: RetryMetrics, - }, - metrics: { - RemoveL1BlockInfoLeq: DatabaseOperationMetrics, - SetBatchRevertBlockNumberForBatchRange: DatabaseOperationMetrics, - DeleteL2BlocksGtBlockNumber: DatabaseOperationMetrics, - InsertBlocks: DatabaseOperationMetrics, - DeleteL1MessagesGt: DatabaseOperationMetrics, - FetchAndUpdateUnprocessedFinalizedBatches: DatabaseOperationMetrics, - InsertGenesisBlock: DatabaseOperationMetrics, - UpdateL1MessagesFromL2Blocks: DatabaseOperationMetrics, - GetHighestBlockForBatchIndex: DatabaseOperationMetrics, - InsertSignatures: DatabaseOperationMetrics, - SetProcessedL1BlockNumber: DatabaseOperationMetrics, - DeleteBatchesGtBatchIndex: DatabaseOperationMetrics, - SetLatestL1BlockNumber: DatabaseOperationMetrics, - PurgeL1MessageToL2BlockMappings: DatabaseOperationMetrics, - InsertSignature: DatabaseOperationMetrics, - FetchAndUpdateUnprocessedCommittedBatches: DatabaseOperationMetrics, - GetL2BlockAndBatchInfoByHash: DatabaseOperationMetrics, - UpdateL1MessagesWithL2Block: DatabaseOperationMetrics, - GetFinalizedL1BlockNumber: DatabaseOperationMetrics, - GetLatestL1BlockNumber: DatabaseOperationMetrics, - DeleteBatchFinalizationGtBlockNumber: DatabaseOperationMetrics, - GetNL2BlockDataHint: DatabaseOperationMetrics, - GetLatestSafeL2Info: DatabaseOperationMetrics, - DeleteBatchesGtBlockNumber: DatabaseOperationMetrics, - RemoveL1BlockInfoGt: DatabaseOperationMetrics, - UpdateSkippedL1Messages: DatabaseOperationMetrics, - InsertBatch: DatabaseOperationMetrics, - SetL2HeadBlockNumber: DatabaseOperationMetrics, - InsertBatchConsolidationOutcome: DatabaseOperationMetrics, - GetBatchStatusByHash: DatabaseOperationMetrics, - GetL1BlockInfo: DatabaseOperationMetrics, - GetProcessedL1BlockNumber: DatabaseOperationMetrics, - GetL2HeadBlockNumber: DatabaseOperationMetrics, - SetFinalizedL1BlockNumber: DatabaseOperationMetrics, - GetL2BlockInfoByNumber: DatabaseOperationMetrics, - UpdateBatchStatus: DatabaseOperationMetrics, - InsertL1BlockInfo: DatabaseOperationMetrics, - Unwind: DatabaseOperationMetrics, - GetSignature: DatabaseOperationMetrics, - FinalizeConsolidatedBatches: DatabaseOperationMetrics, - ChangeBatchProcessingToCommittedStatus: DatabaseOperationMetrics, - GetHighestBlockForBatchHash: DatabaseOperationMetrics, - InsertL1Message: DatabaseOperationMetrics, - GetLatestIndexedEventL1BlockNumber: DatabaseOperationMetrics, - InsertBlock: DatabaseOperationMetrics, - GetBatchByIndex: DatabaseOperationMetrics, - DeleteBatchRevertGtBlockNumber: DatabaseOperationMetrics, - GetMaxBlockDataHintBlockNumber: DatabaseOperationMetrics, - PrepareOnStartup: DatabaseOperationMetrics, - DeleteL2BlocksGtBatchIndex: DatabaseOperationMetrics, - GetNL1Messages: DatabaseOperationMetrics, - FinalizeBatchesUpToIndex: DatabaseOperationMetrics, - GetBatchByHash: DatabaseOperationMetrics, - }, - }, - ), -} -2026-03-02T12:43:24.577126Z INFO node{idx=0}: sea_orm_migration::migrator: Applying all pending migrations -2026-03-02T12:43:24.577883Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20220101_000001_create_batch_commit_table' -2026-03-02T12:43:24.579501Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20220101_000001_create_batch_commit_table' has been applied -2026-03-02T12:43:24.579715Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250304_125946_add_l1_msg_table' -2026-03-02T12:43:24.580314Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250304_125946_add_l1_msg_table' has been applied -2026-03-02T12:43:24.580506Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250408_132123_add_header_metadata' -2026-03-02T12:43:24.580660Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250408_132123_add_header_metadata' has been applied -2026-03-02T12:43:24.580855Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250408_150338_load_header_metadata' -2026-03-02T12:43:24.580868Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250408_150338_load_header_metadata' has been applied -2026-03-02T12:43:24.580975Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250411_072004_add_l2_block' -2026-03-02T12:43:24.582094Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250411_072004_add_l2_block' has been applied -2026-03-02T12:43:24.582322Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250616_223947_add_metadata' -2026-03-02T12:43:24.582676Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250616_223947_add_metadata' has been applied -2026-03-02T12:43:24.582789Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250904_175949_block_signature' -2026-03-02T12:43:24.582982Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250904_175949_block_signature' has been applied -2026-03-02T12:43:24.583192Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20251028_110719_add_l1_block_table' -2026-03-02T12:43:24.583360Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20251028_110719_add_l1_block_table' has been applied -2026-03-02T12:43:24.594810Z INFO node{idx=0}: scroll::node::args: Starting engine driver fcs=ForkchoiceState { head: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, safe: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, finalized: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 } } payload_building_duration=40 -2026-03-02T12:43:24.594898Z INFO node{idx=0}: scroll::providers: Running with mock blob provider - all other blob provider configurations are ignored -2026-03-02T12:43:24.596013Z INFO node{idx=0}: reth::cli: Starting consensus engine -2026-03-02T12:43:24.596116Z INFO scroll::derivation_pipeline: Starting derivation pipeline worker -2026-03-02T12:43:24.596611Z INFO node{idx=0}: reth_node_events::node: Forkchoice updated head_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 safe_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 finalized_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 -2026-03-02T12:43:24.601141Z WARN node{idx=1}: reth_node_builder::launch::common: Failed to build global thread pool err=The global thread pool has already been initialized. -2026-03-02T12:43:24.601466Z INFO node{idx=1}: reth::cli: Saving prune config to toml file -2026-03-02T12:43:24.601738Z INFO node{idx=1}: reth::cli: Configuration loaded path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-UXOFiwdf/reth.toml" -2026-03-02T12:43:24.602128Z INFO node{idx=1}: reth::cli: Database opened -2026-03-02T12:43:24.687078Z INFO node{idx=1}: reth::cli: -Pre-merge hard forks (block based): -- Homestead @0 -- Tangerine @0 -- SpuriousDragon @0 -- Byzantium @0 -- Constantinople @0 -- Petersburg @0 -- Istanbul @0 -- Berlin @0 -- London @0 -- Archimedes @0 -- Bernoulli @0 -- Curie @0 - -Post-merge hard forks (timestamp based): -- Shanghai @0 -- Darwin @0 -- DarwinV2 @0 -- Euclid @0 -- EuclidV2 @0 -- Feynman @0 -- Galileo @0 -- GalileoV2 @0 -2026-03-02T12:43:24.687735Z INFO node{idx=1}: reth::cli: Transaction pool initialized -2026-03-02T12:43:24.689263Z INFO node{idx=1}: reth::cli: P2P networking initialized enode=enode://be607dbc7586bc1ed99f31b81bbe1a132c1d46579aefcbfd1c3a5ef1c8517ed666ff81a5b8d7e6582c106517523e5f72b7e76d466415aa45f95327baadd0c214@127.0.0.1:52295 -2026-03-02T12:43:24.689478Z INFO node{idx=1}: reth::cli: StaticFileProducer initialized -2026-03-02T12:43:24.689877Z INFO node{idx=1}: reth::cli: Verifying storage consistency. -2026-03-02T12:43:24.690177Z INFO node{idx=1}: reth::cli: Pruner initialized prune_config=PruneConfig { block_interval: 5, segments: PruneModes { sender_recovery: None, transaction_lookup: None, receipts: None, account_history: None, storage_history: None, bodies_history: None, receipts_log_filter: ReceiptsLogPruneConfig({}) } } -2026-03-02T12:43:24.690328Z INFO node{idx=1}: reth::cli: Creating JWT auth secret file path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-UXOFiwdf/jwt.hex" -2026-03-02T12:43:24.691108Z INFO node{idx=1}: reth::cli: Consensus engine initialized -2026-03-02T12:43:24.691303Z INFO node{idx=1}: reth::cli: Engine API handler initialized -2026-03-02T12:43:24.693805Z INFO node{idx=1}: reth::cli: RPC auth server started url=127.0.0.1:52296 -2026-03-02T12:43:24.695396Z INFO node{idx=1}: reth::cli: RPC IPC server started path=/tmp/reth.ipc-anQ1yfgM -2026-03-02T12:43:24.695406Z INFO node{idx=1}: reth::cli: RPC HTTP server started url=127.0.0.1:52297 -2026-03-02T12:43:24.695432Z INFO node{idx=1}: rollup_node::args: Building rollup node with config: -ScrollRollupNodeConfig { - test: true, - consensus_args: ConsensusArgs { - algorithm: Noop, - authorized_signer: None, - }, - database_args: RollupNodeDatabaseArgs { - rn_db_path: None, - }, - chain_orchestrator_args: ChainOrchestratorArgs { - optimistic_sync_trigger: 100, - chain_buffer_size: 100, - }, - engine_driver_args: EngineDriverArgs { - sync_at_startup: true, - }, - blob_provider_args: BlobProviderArgs { - beacon_node_urls: None, - s3_url: None, - anvil_url: None, - mock: true, - compute_units_per_second: 0, - max_retries: 0, - initial_backoff: 0, - }, - l1_provider_args: L1ProviderArgs { - url: None, - compute_units_per_second: 10000, - max_retries: 10, - initial_backoff: 100, - logs_query_block_range: 500, - cache_max_items: 100, - }, - sequencer_args: SequencerArgs { - sequencer_enabled: false, - auto_start: false, - block_time: 100, - payload_building_duration: 40, - fee_recipient: 0x0000000000000000000000000000000000000000, - l1_message_inclusion_mode: BlockDepth( - 0, - ), - allow_empty_blocks: true, - max_l1_messages: None, - }, - network_args: RollupNodeNetworkArgs { - enable_eth_scroll_wire_bridge: true, - enable_scroll_wire: true, - sequencer_url: None, - signer_address: None, - }, - rpc_args: RpcArgs { - basic_enabled: true, - admin_enabled: true, - }, - signer_args: SignerArgs { - key_file: None, - aws_kms_key_id: None, - private_key: None, - }, - gas_price_oracle_args: RollupNodeGasPriceOracleArgs { - default_suggested_priority_fee: 0, - }, - pprof_args: PprofArgs { - enabled: false, - addr: 0.0.0.0:6868, - default_duration: 30, - }, - database: Some( - Database { - database: Retry { - inner: DatabaseInner { - connection: SqlxSqlitePoolConnection, - write_lock: Mutex { - data: (), - }, - read_locks: Semaphore { - ll_sem: Semaphore { - permits: 5, - }, - resource_span: Span { - name: "runtime.resource", - level: Level( - Trace, - ), - target: "tokio::sync::semaphore", - disabled: true, - module_path: "tokio::sync::semaphore", - line: 461, - file: "/Users/yiweichi/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tokio-1.48.0/src/sync/semaphore.rs", - }, - }, - metrics: DatabaseMetrics, - tmp_dir: None, - }, - max_retries: None, - initial_delay_ms: 50, - exponential_backoff: false, - metrics: RetryMetrics, - }, - metrics: { - DeleteBatchFinalizationGtBlockNumber: DatabaseOperationMetrics, - RemoveL1BlockInfoGt: DatabaseOperationMetrics, - FetchAndUpdateUnprocessedCommittedBatches: DatabaseOperationMetrics, - DeleteBatchesGtBlockNumber: DatabaseOperationMetrics, - Unwind: DatabaseOperationMetrics, - GetBatchByHash: DatabaseOperationMetrics, - GetBatchStatusByHash: DatabaseOperationMetrics, - InsertBlocks: DatabaseOperationMetrics, - GetL2BlockInfoByNumber: DatabaseOperationMetrics, - GetHighestBlockForBatchIndex: DatabaseOperationMetrics, - FinalizeBatchesUpToIndex: DatabaseOperationMetrics, - InsertBatchConsolidationOutcome: DatabaseOperationMetrics, - SetLatestL1BlockNumber: DatabaseOperationMetrics, - UpdateL1MessagesFromL2Blocks: DatabaseOperationMetrics, - SetL2HeadBlockNumber: DatabaseOperationMetrics, - ChangeBatchProcessingToCommittedStatus: DatabaseOperationMetrics, - InsertBatch: DatabaseOperationMetrics, - FetchAndUpdateUnprocessedFinalizedBatches: DatabaseOperationMetrics, - InsertBlock: DatabaseOperationMetrics, - DeleteBatchesGtBatchIndex: DatabaseOperationMetrics, - InsertL1BlockInfo: DatabaseOperationMetrics, - SetBatchRevertBlockNumberForBatchRange: DatabaseOperationMetrics, - FinalizeConsolidatedBatches: DatabaseOperationMetrics, - UpdateL1MessagesWithL2Block: DatabaseOperationMetrics, - InsertSignatures: DatabaseOperationMetrics, - GetLatestL1BlockNumber: DatabaseOperationMetrics, - GetNL2BlockDataHint: DatabaseOperationMetrics, - SetFinalizedL1BlockNumber: DatabaseOperationMetrics, - PurgeL1MessageToL2BlockMappings: DatabaseOperationMetrics, - GetMaxBlockDataHintBlockNumber: DatabaseOperationMetrics, - GetHighestBlockForBatchHash: DatabaseOperationMetrics, - GetL2BlockAndBatchInfoByHash: DatabaseOperationMetrics, - GetBatchByIndex: DatabaseOperationMetrics, - GetL1BlockInfo: DatabaseOperationMetrics, - DeleteBatchRevertGtBlockNumber: DatabaseOperationMetrics, - PrepareOnStartup: DatabaseOperationMetrics, - InsertSignature: DatabaseOperationMetrics, - UpdateSkippedL1Messages: DatabaseOperationMetrics, - GetFinalizedL1BlockNumber: DatabaseOperationMetrics, - GetLatestIndexedEventL1BlockNumber: DatabaseOperationMetrics, - GetSignature: DatabaseOperationMetrics, - SetProcessedL1BlockNumber: DatabaseOperationMetrics, - GetNL1Messages: DatabaseOperationMetrics, - DeleteL2BlocksGtBatchIndex: DatabaseOperationMetrics, - GetLatestSafeL2Info: DatabaseOperationMetrics, - InsertGenesisBlock: DatabaseOperationMetrics, - GetProcessedL1BlockNumber: DatabaseOperationMetrics, - GetL2HeadBlockNumber: DatabaseOperationMetrics, - DeleteL2BlocksGtBlockNumber: DatabaseOperationMetrics, - InsertL1Message: DatabaseOperationMetrics, - RemoveL1BlockInfoLeq: DatabaseOperationMetrics, - UpdateBatchStatus: DatabaseOperationMetrics, - DeleteL1MessagesGt: DatabaseOperationMetrics, - }, - }, - ), -} -2026-03-02T12:43:24.696340Z INFO node{idx=1}: sea_orm_migration::migrator: Applying all pending migrations -2026-03-02T12:43:24.696770Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20220101_000001_create_batch_commit_table' -2026-03-02T12:43:24.698071Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20220101_000001_create_batch_commit_table' has been applied -2026-03-02T12:43:24.698239Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250304_125946_add_l1_msg_table' -2026-03-02T12:43:24.698820Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250304_125946_add_l1_msg_table' has been applied -2026-03-02T12:43:24.699019Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250408_132123_add_header_metadata' -2026-03-02T12:43:24.699158Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250408_132123_add_header_metadata' has been applied -2026-03-02T12:43:24.699328Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250408_150338_load_header_metadata' -2026-03-02T12:43:24.699337Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250408_150338_load_header_metadata' has been applied -2026-03-02T12:43:24.699413Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250411_072004_add_l2_block' -2026-03-02T12:43:24.700540Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250411_072004_add_l2_block' has been applied -2026-03-02T12:43:24.700745Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250616_223947_add_metadata' -2026-03-02T12:43:24.701060Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250616_223947_add_metadata' has been applied -2026-03-02T12:43:24.701148Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250904_175949_block_signature' -2026-03-02T12:43:24.701315Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250904_175949_block_signature' has been applied -2026-03-02T12:43:24.701515Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20251028_110719_add_l1_block_table' -2026-03-02T12:43:24.701662Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20251028_110719_add_l1_block_table' has been applied -2026-03-02T12:43:24.708674Z INFO node{idx=1}: scroll::node::args: Starting engine driver fcs=ForkchoiceState { head: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, safe: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, finalized: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 } } payload_building_duration=40 -2026-03-02T12:43:24.708704Z INFO node{idx=1}: scroll::providers: Running with mock blob provider - all other blob provider configurations are ignored -2026-03-02T12:43:24.709503Z INFO node{idx=1}: reth::cli: Starting consensus engine -2026-03-02T12:43:24.709547Z INFO scroll::derivation_pipeline: Starting derivation pipeline worker -2026-03-02T12:43:24.709794Z INFO node{idx=1}: reth_node_events::node: Forkchoice updated head_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 safe_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 finalized_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 -2026-03-02T12:43:27.566777Z INFO node{idx=0}: reth::cli: Status connected_peers=0 latest_block=0 -2026-03-02T12:43:27.693300Z INFO node{idx=1}: reth::cli: Status connected_peers=0 latest_block=0 -2026-03-02T12:43:29.572654Z INFO node{idx=1}: reth_e2e_test_utils::network: Session established with peer: 0x2a854fec1913ab788092e38bc059988bd54406e53074eaf80662b3f243810b82477f0830b7739dd12a550405b721d790b8fdda008a89da71779a0ed90192e8b3 -2026-03-02T12:43:29.573248Z INFO node{idx=1}: reth_e2e_test_utils::network: Session established with peer: 0xbe607dbc7586bc1ed99f31b81bbe1a132c1d46579aefcbfd1c3a5ef1c8517ed666ff81a5b8d7e6582c106517523e5f72b7e76d466415aa45f95327baadd0c214 -2026-03-02T12:43:29.580242Z WARN node{idx=2}: reth_node_builder::launch::common: Failed to build global thread pool err=The global thread pool has already been initialized. -2026-03-02T12:43:29.580775Z INFO node{idx=2}: reth::cli: Saving prune config to toml file -2026-03-02T12:43:29.581176Z INFO node{idx=2}: reth::cli: Configuration loaded path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-43IjIPFl/reth.toml" -2026-03-02T12:43:29.581597Z INFO node{idx=2}: reth::cli: Database opened -2026-03-02T12:43:29.674645Z INFO node{idx=2}: reth::cli: -Pre-merge hard forks (block based): -- Homestead @0 -- Tangerine @0 -- SpuriousDragon @0 -- Byzantium @0 -- Constantinople @0 -- Petersburg @0 -- Istanbul @0 -- Berlin @0 -- London @0 -- Archimedes @0 -- Bernoulli @0 -- Curie @0 - -Post-merge hard forks (timestamp based): -- Shanghai @0 -- Darwin @0 -- DarwinV2 @0 -- Euclid @0 -- EuclidV2 @0 -- Feynman @0 -- Galileo @0 -- GalileoV2 @0 -2026-03-02T12:43:29.675497Z INFO node{idx=2}: reth::cli: Transaction pool initialized -2026-03-02T12:43:29.677419Z INFO node{idx=2}: reth::cli: P2P networking initialized enode=enode://985963d1985d169e73e741aaa7af71eacad84c47e1247cabd01fe37de11086d5bbfb2301a1c49885981111067d9a950d180a99c2d38869cbd496306d7da9a2da@127.0.0.1:52305 -2026-03-02T12:43:29.677705Z INFO node{idx=2}: reth::cli: StaticFileProducer initialized -2026-03-02T12:43:29.678221Z INFO node{idx=2}: reth::cli: Verifying storage consistency. -2026-03-02T12:43:29.678596Z INFO node{idx=2}: reth::cli: Pruner initialized prune_config=PruneConfig { block_interval: 5, segments: PruneModes { sender_recovery: None, transaction_lookup: None, receipts: None, account_history: None, storage_history: None, bodies_history: None, receipts_log_filter: ReceiptsLogPruneConfig({}) } } -2026-03-02T12:43:29.678791Z INFO node{idx=2}: reth::cli: Creating JWT auth secret file path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-43IjIPFl/jwt.hex" -2026-03-02T12:43:29.679823Z INFO node{idx=2}: reth::cli: Consensus engine initialized -2026-03-02T12:43:29.680040Z INFO node{idx=2}: reth::cli: Engine API handler initialized -2026-03-02T12:43:29.682876Z INFO node{idx=2}: reth::cli: RPC auth server started url=127.0.0.1:52306 -2026-03-02T12:43:29.684728Z INFO node{idx=2}: reth::cli: RPC IPC server started path=/tmp/reth.ipc-joNGsKf3 -2026-03-02T12:43:29.684740Z INFO node{idx=2}: reth::cli: RPC HTTP server started url=127.0.0.1:52307 -2026-03-02T12:43:29.684769Z INFO node{idx=2}: rollup_node::args: Building rollup node with config: -ScrollRollupNodeConfig { - test: true, - consensus_args: ConsensusArgs { - algorithm: Noop, - authorized_signer: None, - }, - database_args: RollupNodeDatabaseArgs { - rn_db_path: None, - }, - chain_orchestrator_args: ChainOrchestratorArgs { - optimistic_sync_trigger: 100, - chain_buffer_size: 100, - }, - engine_driver_args: EngineDriverArgs { - sync_at_startup: true, - }, - blob_provider_args: BlobProviderArgs { - beacon_node_urls: None, - s3_url: None, - anvil_url: None, - mock: true, - compute_units_per_second: 0, - max_retries: 0, - initial_backoff: 0, - }, - l1_provider_args: L1ProviderArgs { - url: None, - compute_units_per_second: 10000, - max_retries: 10, - initial_backoff: 100, - logs_query_block_range: 500, - cache_max_items: 100, - }, - sequencer_args: SequencerArgs { - sequencer_enabled: false, - auto_start: false, - block_time: 100, - payload_building_duration: 40, - fee_recipient: 0x0000000000000000000000000000000000000000, - l1_message_inclusion_mode: BlockDepth( - 0, - ), - allow_empty_blocks: true, - max_l1_messages: None, - }, - network_args: RollupNodeNetworkArgs { - enable_eth_scroll_wire_bridge: true, - enable_scroll_wire: true, - sequencer_url: None, - signer_address: None, - }, - rpc_args: RpcArgs { - basic_enabled: true, - admin_enabled: true, - }, - signer_args: SignerArgs { - key_file: None, - aws_kms_key_id: None, - private_key: None, - }, - gas_price_oracle_args: RollupNodeGasPriceOracleArgs { - default_suggested_priority_fee: 0, - }, - pprof_args: PprofArgs { - enabled: false, - addr: 0.0.0.0:6868, - default_duration: 30, - }, - database: Some( - Database { - database: Retry { - inner: DatabaseInner { - connection: SqlxSqlitePoolConnection, - write_lock: Mutex { - data: (), - }, - read_locks: Semaphore { - ll_sem: Semaphore { - permits: 5, - }, - resource_span: Span { - name: "runtime.resource", - level: Level( - Trace, - ), - target: "tokio::sync::semaphore", - disabled: true, - module_path: "tokio::sync::semaphore", - line: 461, - file: "/Users/yiweichi/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tokio-1.48.0/src/sync/semaphore.rs", - }, - }, - metrics: DatabaseMetrics, - tmp_dir: None, - }, - max_retries: None, - initial_delay_ms: 50, - exponential_backoff: false, - metrics: RetryMetrics, - }, - metrics: { - SetProcessedL1BlockNumber: DatabaseOperationMetrics, - UpdateL1MessagesFromL2Blocks: DatabaseOperationMetrics, - FetchAndUpdateUnprocessedFinalizedBatches: DatabaseOperationMetrics, - Unwind: DatabaseOperationMetrics, - GetNL2BlockDataHint: DatabaseOperationMetrics, - GetHighestBlockForBatchIndex: DatabaseOperationMetrics, - GetNL1Messages: DatabaseOperationMetrics, - DeleteL2BlocksGtBatchIndex: DatabaseOperationMetrics, - GetL1BlockInfo: DatabaseOperationMetrics, - SetBatchRevertBlockNumberForBatchRange: DatabaseOperationMetrics, - InsertBatchConsolidationOutcome: DatabaseOperationMetrics, - UpdateL1MessagesWithL2Block: DatabaseOperationMetrics, - GetBatchByHash: DatabaseOperationMetrics, - FetchAndUpdateUnprocessedCommittedBatches: DatabaseOperationMetrics, - GetMaxBlockDataHintBlockNumber: DatabaseOperationMetrics, - DeleteL1MessagesGt: DatabaseOperationMetrics, - UpdateBatchStatus: DatabaseOperationMetrics, - RemoveL1BlockInfoGt: DatabaseOperationMetrics, - GetLatestL1BlockNumber: DatabaseOperationMetrics, - InsertBlock: DatabaseOperationMetrics, - GetL2HeadBlockNumber: DatabaseOperationMetrics, - GetLatestSafeL2Info: DatabaseOperationMetrics, - GetHighestBlockForBatchHash: DatabaseOperationMetrics, - GetSignature: DatabaseOperationMetrics, - InsertGenesisBlock: DatabaseOperationMetrics, - InsertBlocks: DatabaseOperationMetrics, - DeleteBatchesGtBatchIndex: DatabaseOperationMetrics, - SetLatestL1BlockNumber: DatabaseOperationMetrics, - InsertL1Message: DatabaseOperationMetrics, - UpdateSkippedL1Messages: DatabaseOperationMetrics, - InsertSignature: DatabaseOperationMetrics, - DeleteBatchFinalizationGtBlockNumber: DatabaseOperationMetrics, - GetBatchStatusByHash: DatabaseOperationMetrics, - GetFinalizedL1BlockNumber: DatabaseOperationMetrics, - GetBatchByIndex: DatabaseOperationMetrics, - FinalizeBatchesUpToIndex: DatabaseOperationMetrics, - RemoveL1BlockInfoLeq: DatabaseOperationMetrics, - ChangeBatchProcessingToCommittedStatus: DatabaseOperationMetrics, - SetL2HeadBlockNumber: DatabaseOperationMetrics, - InsertL1BlockInfo: DatabaseOperationMetrics, - PrepareOnStartup: DatabaseOperationMetrics, - DeleteL2BlocksGtBlockNumber: DatabaseOperationMetrics, - InsertSignatures: DatabaseOperationMetrics, - InsertBatch: DatabaseOperationMetrics, - DeleteBatchesGtBlockNumber: DatabaseOperationMetrics, - GetLatestIndexedEventL1BlockNumber: DatabaseOperationMetrics, - GetProcessedL1BlockNumber: DatabaseOperationMetrics, - GetL2BlockAndBatchInfoByHash: DatabaseOperationMetrics, - PurgeL1MessageToL2BlockMappings: DatabaseOperationMetrics, - GetL2BlockInfoByNumber: DatabaseOperationMetrics, - FinalizeConsolidatedBatches: DatabaseOperationMetrics, - DeleteBatchRevertGtBlockNumber: DatabaseOperationMetrics, - SetFinalizedL1BlockNumber: DatabaseOperationMetrics, - }, - }, - ), -} -2026-03-02T12:43:29.685918Z INFO node{idx=2}: sea_orm_migration::migrator: Applying all pending migrations -2026-03-02T12:43:29.686437Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20220101_000001_create_batch_commit_table' -2026-03-02T12:43:29.687907Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20220101_000001_create_batch_commit_table' has been applied -2026-03-02T12:43:29.688169Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250304_125946_add_l1_msg_table' -2026-03-02T12:43:29.688907Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250304_125946_add_l1_msg_table' has been applied -2026-03-02T12:43:29.689124Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250408_132123_add_header_metadata' -2026-03-02T12:43:29.689278Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250408_132123_add_header_metadata' has been applied -2026-03-02T12:43:29.689477Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250408_150338_load_header_metadata' -2026-03-02T12:43:29.689487Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250408_150338_load_header_metadata' has been applied -2026-03-02T12:43:29.689607Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250411_072004_add_l2_block' -2026-03-02T12:43:29.691046Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250411_072004_add_l2_block' has been applied -2026-03-02T12:43:29.691309Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250616_223947_add_metadata' -2026-03-02T12:43:29.691681Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250616_223947_add_metadata' has been applied -2026-03-02T12:43:29.691777Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250904_175949_block_signature' -2026-03-02T12:43:29.692007Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250904_175949_block_signature' has been applied -2026-03-02T12:43:29.692244Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20251028_110719_add_l1_block_table' -2026-03-02T12:43:29.692428Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20251028_110719_add_l1_block_table' has been applied -2026-03-02T12:43:29.700481Z INFO node{idx=2}: scroll::node::args: Starting engine driver fcs=ForkchoiceState { head: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, safe: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, finalized: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 } } payload_building_duration=40 -2026-03-02T12:43:29.700532Z INFO node{idx=2}: scroll::providers: Running with mock blob provider - all other blob provider configurations are ignored -2026-03-02T12:43:29.701494Z INFO node{idx=2}: reth::cli: Starting consensus engine -2026-03-02T12:43:29.701557Z INFO scroll::derivation_pipeline: Starting derivation pipeline worker -2026-03-02T12:43:29.701941Z INFO node{idx=2}: reth_node_events::node: Forkchoice updated head_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 safe_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 finalized_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 -2026-03-02T12:43:32.682331Z INFO node{idx=2}: reth::cli: Status connected_peers=0 latest_block=0 -2026-03-02T12:43:34.696510Z INFO node{idx=2}: reth_e2e_test_utils::network: Session established with peer: 0xbe607dbc7586bc1ed99f31b81bbe1a132c1d46579aefcbfd1c3a5ef1c8517ed666ff81a5b8d7e6582c106517523e5f72b7e76d466415aa45f95327baadd0c214 -2026-03-02T12:43:34.696936Z INFO node{idx=2}: reth_e2e_test_utils::network: Session established with peer: 0x985963d1985d169e73e741aaa7af71eacad84c47e1247cabd01fe37de11086d5bbfb2301a1c49885981111067d9a950d180a99c2d38869cbd496306d7da9a2da -2026-03-02T12:43:39.687164Z INFO node{idx=2}: reth_e2e_test_utils::network: Session established with peer: 0x985963d1985d169e73e741aaa7af71eacad84c47e1247cabd01fe37de11086d5bbfb2301a1c49885981111067d9a950d180a99c2d38869cbd496306d7da9a2da -2026-03-02T12:43:39.687776Z INFO node{idx=2}: reth_e2e_test_utils::network: Session established with peer: 0x2a854fec1913ab788092e38bc059988bd54406e53074eaf80662b3f243810b82477f0830b7739dd12a550405b721d790b8fdda008a89da71779a0ed90192e8b3 -2026-03-02T12:44:42.565673Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-03-02T12:44:42.692899Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-03-02T12:44:47.681405Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-03-02T12:45:57.565270Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-03-02T12:45:57.691842Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-03-02T12:46:02.679649Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-03-02T12:46:59.731446Z INFO node{idx=0}: reth::cli: Wrote network peers to file peers_file="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-KUYbLIZB/known-peers.json" -2026-03-02T12:46:59.733156Z INFO node{idx=1}: reth::cli: Wrote network peers to file peers_file="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-UXOFiwdf/known-peers.json" diff --git a/scroll-debug-51668.log b/scroll-debug-51668.log deleted file mode 100644 index e69de29b..00000000 diff --git a/scroll-debug-93283.log b/scroll-debug-93283.log deleted file mode 100644 index 48bbb5da..00000000 --- a/scroll-debug-93283.log +++ /dev/null @@ -1,862 +0,0 @@ -2026-02-28T21:43:50.406970Z INFO node{idx=0}: reth::cli: Saving prune config to toml file -2026-02-28T21:43:50.407491Z INFO node{idx=0}: reth::cli: Configuration loaded path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-iZaMJRZv/reth.toml" -2026-02-28T21:43:50.408368Z INFO node{idx=0}: reth::cli: Database opened -2026-02-28T21:43:50.709233Z INFO node{idx=0}: reth::cli: -Pre-merge hard forks (block based): -- Homestead @0 -- Tangerine @0 -- SpuriousDragon @0 -- Byzantium @0 -- Constantinople @0 -- Petersburg @0 -- Istanbul @0 -- Berlin @0 -- London @0 -- Archimedes @0 -- Bernoulli @0 -- Curie @0 - -Post-merge hard forks (timestamp based): -- Shanghai @0 -- Darwin @0 -- DarwinV2 @0 -- Euclid @0 -- EuclidV2 @0 -- Feynman @0 -- Galileo @0 -- GalileoV2 @0 -2026-02-28T21:43:50.711367Z INFO node{idx=0}: reth::cli: Transaction pool initialized -2026-02-28T21:43:51.042445Z INFO node{idx=0}: reth::cli: P2P networking initialized enode=enode://2c7946ba72f3c7890775cb525fd39dea7e107cdbe4c34d70909b19aeecf2866b48271d2c07b28d8e9420b954e40a137d66ccefbf3f8253a623a64dbf8c236b9d@127.0.0.1:54050 -2026-02-28T21:43:51.043328Z INFO node{idx=0}: reth::cli: StaticFileProducer initialized -2026-02-28T21:43:51.044182Z INFO node{idx=0}: reth::cli: Verifying storage consistency. -2026-02-28T21:43:51.044675Z INFO node{idx=0}: reth::cli: Pruner initialized prune_config=PruneConfig { block_interval: 5, segments: PruneModes { sender_recovery: None, transaction_lookup: None, receipts: None, account_history: None, storage_history: None, bodies_history: None, receipts_log_filter: ReceiptsLogPruneConfig({}) } } -2026-02-28T21:43:51.044846Z INFO node{idx=0}: reth::cli: Creating JWT auth secret file path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-iZaMJRZv/jwt.hex" -2026-02-28T21:43:51.046173Z INFO node{idx=0}: reth::cli: Consensus engine initialized -2026-02-28T21:43:51.046732Z INFO node{idx=0}: reth::cli: Engine API handler initialized -2026-02-28T21:43:51.051093Z INFO node{idx=0}: reth::cli: RPC auth server started url=127.0.0.1:54051 -2026-02-28T21:43:51.052733Z INFO node{idx=0}: reth::cli: RPC IPC server started path=/tmp/reth.ipc-XtAHMyDd -2026-02-28T21:43:51.052777Z INFO node{idx=0}: reth::cli: RPC HTTP server started url=127.0.0.1:54052 -2026-02-28T21:43:51.052844Z INFO node{idx=0}: rollup_node::args: Building rollup node with config: -ScrollRollupNodeConfig { - test: true, - consensus_args: ConsensusArgs { - algorithm: Noop, - authorized_signer: None, - }, - database_args: RollupNodeDatabaseArgs { - rn_db_path: None, - }, - chain_orchestrator_args: ChainOrchestratorArgs { - optimistic_sync_trigger: 100, - chain_buffer_size: 100, - }, - engine_driver_args: EngineDriverArgs { - sync_at_startup: true, - }, - blob_provider_args: BlobProviderArgs { - beacon_node_urls: None, - s3_url: None, - anvil_url: None, - mock: true, - compute_units_per_second: 0, - max_retries: 0, - initial_backoff: 0, - }, - l1_provider_args: L1ProviderArgs { - url: None, - compute_units_per_second: 10000, - max_retries: 10, - initial_backoff: 100, - logs_query_block_range: 500, - cache_max_items: 100, - }, - sequencer_args: SequencerArgs { - sequencer_enabled: true, - auto_start: false, - block_time: 100, - payload_building_duration: 40, - fee_recipient: 0x0000000000000000000000000000000000000000, - l1_message_inclusion_mode: BlockDepth( - 0, - ), - allow_empty_blocks: true, - max_l1_messages: None, - }, - network_args: RollupNodeNetworkArgs { - enable_eth_scroll_wire_bridge: true, - enable_scroll_wire: true, - sequencer_url: None, - signer_address: None, - }, - rpc_args: RpcArgs { - basic_enabled: true, - admin_enabled: true, - }, - signer_args: SignerArgs { - key_file: None, - aws_kms_key_id: None, - private_key: None, - }, - gas_price_oracle_args: RollupNodeGasPriceOracleArgs { - default_suggested_priority_fee: 0, - }, - pprof_args: PprofArgs { - enabled: false, - addr: 0.0.0.0:6868, - default_duration: 30, - }, - database: Some( - Database { - database: Retry { - inner: DatabaseInner { - connection: SqlxSqlitePoolConnection, - write_lock: Mutex { - data: (), - }, - read_locks: Semaphore { - ll_sem: Semaphore { - permits: 5, - }, - resource_span: Span { - name: "runtime.resource", - level: Level( - Trace, - ), - target: "tokio::sync::semaphore", - disabled: true, - module_path: "tokio::sync::semaphore", - line: 461, - file: "/Users/yiweichi/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tokio-1.48.0/src/sync/semaphore.rs", - }, - }, - metrics: DatabaseMetrics, - tmp_dir: None, - }, - max_retries: None, - initial_delay_ms: 50, - exponential_backoff: false, - metrics: RetryMetrics, - }, - metrics: { - SetLatestL1BlockNumber: DatabaseOperationMetrics, - InsertSignature: DatabaseOperationMetrics, - DeleteBatchesGtBlockNumber: DatabaseOperationMetrics, - GetL2BlockAndBatchInfoByHash: DatabaseOperationMetrics, - GetLatestSafeL2Info: DatabaseOperationMetrics, - RemoveL1BlockInfoLeq: DatabaseOperationMetrics, - InsertL1Message: DatabaseOperationMetrics, - DeleteL2BlocksGtBlockNumber: DatabaseOperationMetrics, - GetBatchByIndex: DatabaseOperationMetrics, - GetBatchByHash: DatabaseOperationMetrics, - UpdateL1MessagesFromL2Blocks: DatabaseOperationMetrics, - GetL2BlockInfoByNumber: DatabaseOperationMetrics, - UpdateL1MessagesWithL2Block: DatabaseOperationMetrics, - GetLatestL1BlockNumber: DatabaseOperationMetrics, - PrepareOnStartup: DatabaseOperationMetrics, - GetProcessedL1BlockNumber: DatabaseOperationMetrics, - FinalizeConsolidatedBatches: DatabaseOperationMetrics, - InsertBatch: DatabaseOperationMetrics, - DeleteBatchesGtBatchIndex: DatabaseOperationMetrics, - ChangeBatchProcessingToCommittedStatus: DatabaseOperationMetrics, - GetLatestIndexedEventL1BlockNumber: DatabaseOperationMetrics, - SetBatchRevertBlockNumberForBatchRange: DatabaseOperationMetrics, - RemoveL1BlockInfoGt: DatabaseOperationMetrics, - GetNL2BlockDataHint: DatabaseOperationMetrics, - InsertBlocks: DatabaseOperationMetrics, - GetHighestBlockForBatchHash: DatabaseOperationMetrics, - GetHighestBlockForBatchIndex: DatabaseOperationMetrics, - FetchAndUpdateUnprocessedFinalizedBatches: DatabaseOperationMetrics, - DeleteBatchFinalizationGtBlockNumber: DatabaseOperationMetrics, - GetBatchStatusByHash: DatabaseOperationMetrics, - SetL2HeadBlockNumber: DatabaseOperationMetrics, - GetFinalizedL1BlockNumber: DatabaseOperationMetrics, - GetL2HeadBlockNumber: DatabaseOperationMetrics, - GetMaxBlockDataHintBlockNumber: DatabaseOperationMetrics, - DeleteL1MessagesGt: DatabaseOperationMetrics, - SetFinalizedL1BlockNumber: DatabaseOperationMetrics, - FinalizeBatchesUpToIndex: DatabaseOperationMetrics, - SetProcessedL1BlockNumber: DatabaseOperationMetrics, - DeleteBatchRevertGtBlockNumber: DatabaseOperationMetrics, - UpdateSkippedL1Messages: DatabaseOperationMetrics, - InsertBlock: DatabaseOperationMetrics, - InsertGenesisBlock: DatabaseOperationMetrics, - InsertBatchConsolidationOutcome: DatabaseOperationMetrics, - Unwind: DatabaseOperationMetrics, - PurgeL1MessageToL2BlockMappings: DatabaseOperationMetrics, - InsertSignatures: DatabaseOperationMetrics, - DeleteL2BlocksGtBatchIndex: DatabaseOperationMetrics, - GetL1BlockInfo: DatabaseOperationMetrics, - GetNL1Messages: DatabaseOperationMetrics, - FetchAndUpdateUnprocessedCommittedBatches: DatabaseOperationMetrics, - InsertL1BlockInfo: DatabaseOperationMetrics, - UpdateBatchStatus: DatabaseOperationMetrics, - GetSignature: DatabaseOperationMetrics, - }, - }, - ), -} -2026-02-28T21:43:51.064554Z INFO node{idx=0}: sea_orm_migration::migrator: Applying all pending migrations -2026-02-28T21:43:51.065361Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20220101_000001_create_batch_commit_table' -2026-02-28T21:43:51.067186Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20220101_000001_create_batch_commit_table' has been applied -2026-02-28T21:43:51.067516Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250304_125946_add_l1_msg_table' -2026-02-28T21:43:51.068230Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250304_125946_add_l1_msg_table' has been applied -2026-02-28T21:43:51.068443Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250408_132123_add_header_metadata' -2026-02-28T21:43:51.068589Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250408_132123_add_header_metadata' has been applied -2026-02-28T21:43:51.068836Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250408_150338_load_header_metadata' -2026-02-28T21:43:51.068857Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250408_150338_load_header_metadata' has been applied -2026-02-28T21:43:51.068947Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250411_072004_add_l2_block' -2026-02-28T21:43:51.070227Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250411_072004_add_l2_block' has been applied -2026-02-28T21:43:51.070456Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250616_223947_add_metadata' -2026-02-28T21:43:51.070832Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250616_223947_add_metadata' has been applied -2026-02-28T21:43:51.070949Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20250904_175949_block_signature' -2026-02-28T21:43:51.071137Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20250904_175949_block_signature' has been applied -2026-02-28T21:43:51.071353Z INFO node{idx=0}: sea_orm_migration::migrator: Applying migration 'm20251028_110719_add_l1_block_table' -2026-02-28T21:43:51.071539Z INFO node{idx=0}: sea_orm_migration::migrator: Migration 'm20251028_110719_add_l1_block_table' has been applied -2026-02-28T21:43:51.084817Z INFO node{idx=0}: scroll::node::args: Starting engine driver fcs=ForkchoiceState { head: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, safe: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, finalized: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 } } payload_building_duration=40 -2026-02-28T21:43:51.084930Z INFO node{idx=0}: scroll::providers: Running with mock blob provider - all other blob provider configurations are ignored -2026-02-28T21:43:51.086201Z INFO scroll::derivation_pipeline: Starting derivation pipeline worker -2026-02-28T21:43:51.086241Z INFO node{idx=0}: reth::cli: Starting consensus engine -2026-02-28T21:43:51.086783Z INFO node{idx=0}: reth_node_events::node: Forkchoice updated head_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 safe_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 finalized_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 -2026-02-28T21:43:51.091336Z WARN node{idx=1}: reth_node_builder::launch::common: Failed to build global thread pool err=The global thread pool has already been initialized. -2026-02-28T21:43:51.091716Z INFO node{idx=1}: reth::cli: Saving prune config to toml file -2026-02-28T21:43:51.092009Z INFO node{idx=1}: reth::cli: Configuration loaded path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-cUHZXmTP/reth.toml" -2026-02-28T21:43:51.092404Z INFO node{idx=1}: reth::cli: Database opened -2026-02-28T21:43:51.191253Z INFO node{idx=1}: reth::cli: -Pre-merge hard forks (block based): -- Homestead @0 -- Tangerine @0 -- SpuriousDragon @0 -- Byzantium @0 -- Constantinople @0 -- Petersburg @0 -- Istanbul @0 -- Berlin @0 -- London @0 -- Archimedes @0 -- Bernoulli @0 -- Curie @0 - -Post-merge hard forks (timestamp based): -- Shanghai @0 -- Darwin @0 -- DarwinV2 @0 -- Euclid @0 -- EuclidV2 @0 -- Feynman @0 -- Galileo @0 -- GalileoV2 @0 -2026-02-28T21:43:51.192016Z INFO node{idx=1}: reth::cli: Transaction pool initialized -2026-02-28T21:43:51.193456Z INFO node{idx=1}: reth::cli: P2P networking initialized enode=enode://cfa09a330a1c50e7327a855a0a5074aa1591f3937c9d326d6d9d9e069a841edcedce392343a810c02306a28f0246202a5606f76ef740ee61259c3985e14bc7ed@127.0.0.1:54057 -2026-02-28T21:43:51.193668Z INFO node{idx=1}: reth::cli: StaticFileProducer initialized -2026-02-28T21:43:51.194044Z INFO node{idx=1}: reth::cli: Verifying storage consistency. -2026-02-28T21:43:51.194351Z INFO node{idx=1}: reth::cli: Pruner initialized prune_config=PruneConfig { block_interval: 5, segments: PruneModes { sender_recovery: None, transaction_lookup: None, receipts: None, account_history: None, storage_history: None, bodies_history: None, receipts_log_filter: ReceiptsLogPruneConfig({}) } } -2026-02-28T21:43:51.194465Z INFO node{idx=1}: reth::cli: Creating JWT auth secret file path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-cUHZXmTP/jwt.hex" -2026-02-28T21:43:51.195206Z INFO node{idx=1}: reth::cli: Consensus engine initialized -2026-02-28T21:43:51.195371Z INFO node{idx=1}: reth::cli: Engine API handler initialized -2026-02-28T21:43:51.197639Z INFO node{idx=1}: reth::cli: RPC auth server started url=127.0.0.1:54058 -2026-02-28T21:43:51.199146Z INFO node{idx=1}: reth::cli: RPC IPC server started path=/tmp/reth.ipc-zgzTRK9I -2026-02-28T21:43:51.199156Z INFO node{idx=1}: reth::cli: RPC HTTP server started url=127.0.0.1:54059 -2026-02-28T21:43:51.199182Z INFO node{idx=1}: rollup_node::args: Building rollup node with config: -ScrollRollupNodeConfig { - test: true, - consensus_args: ConsensusArgs { - algorithm: Noop, - authorized_signer: None, - }, - database_args: RollupNodeDatabaseArgs { - rn_db_path: None, - }, - chain_orchestrator_args: ChainOrchestratorArgs { - optimistic_sync_trigger: 100, - chain_buffer_size: 100, - }, - engine_driver_args: EngineDriverArgs { - sync_at_startup: true, - }, - blob_provider_args: BlobProviderArgs { - beacon_node_urls: None, - s3_url: None, - anvil_url: None, - mock: true, - compute_units_per_second: 0, - max_retries: 0, - initial_backoff: 0, - }, - l1_provider_args: L1ProviderArgs { - url: None, - compute_units_per_second: 10000, - max_retries: 10, - initial_backoff: 100, - logs_query_block_range: 500, - cache_max_items: 100, - }, - sequencer_args: SequencerArgs { - sequencer_enabled: false, - auto_start: false, - block_time: 100, - payload_building_duration: 40, - fee_recipient: 0x0000000000000000000000000000000000000000, - l1_message_inclusion_mode: BlockDepth( - 0, - ), - allow_empty_blocks: true, - max_l1_messages: None, - }, - network_args: RollupNodeNetworkArgs { - enable_eth_scroll_wire_bridge: true, - enable_scroll_wire: true, - sequencer_url: None, - signer_address: None, - }, - rpc_args: RpcArgs { - basic_enabled: true, - admin_enabled: true, - }, - signer_args: SignerArgs { - key_file: None, - aws_kms_key_id: None, - private_key: None, - }, - gas_price_oracle_args: RollupNodeGasPriceOracleArgs { - default_suggested_priority_fee: 0, - }, - pprof_args: PprofArgs { - enabled: false, - addr: 0.0.0.0:6868, - default_duration: 30, - }, - database: Some( - Database { - database: Retry { - inner: DatabaseInner { - connection: SqlxSqlitePoolConnection, - write_lock: Mutex { - data: (), - }, - read_locks: Semaphore { - ll_sem: Semaphore { - permits: 5, - }, - resource_span: Span { - name: "runtime.resource", - level: Level( - Trace, - ), - target: "tokio::sync::semaphore", - disabled: true, - module_path: "tokio::sync::semaphore", - line: 461, - file: "/Users/yiweichi/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tokio-1.48.0/src/sync/semaphore.rs", - }, - }, - metrics: DatabaseMetrics, - tmp_dir: None, - }, - max_retries: None, - initial_delay_ms: 50, - exponential_backoff: false, - metrics: RetryMetrics, - }, - metrics: { - RemoveL1BlockInfoGt: DatabaseOperationMetrics, - InsertBatchConsolidationOutcome: DatabaseOperationMetrics, - DeleteBatchFinalizationGtBlockNumber: DatabaseOperationMetrics, - ChangeBatchProcessingToCommittedStatus: DatabaseOperationMetrics, - GetBatchByHash: DatabaseOperationMetrics, - GetLatestIndexedEventL1BlockNumber: DatabaseOperationMetrics, - GetFinalizedL1BlockNumber: DatabaseOperationMetrics, - FetchAndUpdateUnprocessedCommittedBatches: DatabaseOperationMetrics, - GetL1BlockInfo: DatabaseOperationMetrics, - GetLatestL1BlockNumber: DatabaseOperationMetrics, - GetNL1Messages: DatabaseOperationMetrics, - PrepareOnStartup: DatabaseOperationMetrics, - GetLatestSafeL2Info: DatabaseOperationMetrics, - DeleteBatchRevertGtBlockNumber: DatabaseOperationMetrics, - GetL2BlockAndBatchInfoByHash: DatabaseOperationMetrics, - GetMaxBlockDataHintBlockNumber: DatabaseOperationMetrics, - RemoveL1BlockInfoLeq: DatabaseOperationMetrics, - SetL2HeadBlockNumber: DatabaseOperationMetrics, - SetLatestL1BlockNumber: DatabaseOperationMetrics, - DeleteL2BlocksGtBatchIndex: DatabaseOperationMetrics, - InsertL1BlockInfo: DatabaseOperationMetrics, - InsertBlocks: DatabaseOperationMetrics, - GetProcessedL1BlockNumber: DatabaseOperationMetrics, - GetL2HeadBlockNumber: DatabaseOperationMetrics, - PurgeL1MessageToL2BlockMappings: DatabaseOperationMetrics, - GetL2BlockInfoByNumber: DatabaseOperationMetrics, - GetHighestBlockForBatchHash: DatabaseOperationMetrics, - UpdateBatchStatus: DatabaseOperationMetrics, - FinalizeConsolidatedBatches: DatabaseOperationMetrics, - DeleteL2BlocksGtBlockNumber: DatabaseOperationMetrics, - GetBatchStatusByHash: DatabaseOperationMetrics, - GetNL2BlockDataHint: DatabaseOperationMetrics, - Unwind: DatabaseOperationMetrics, - InsertBatch: DatabaseOperationMetrics, - UpdateL1MessagesWithL2Block: DatabaseOperationMetrics, - InsertBlock: DatabaseOperationMetrics, - InsertSignature: DatabaseOperationMetrics, - InsertL1Message: DatabaseOperationMetrics, - SetBatchRevertBlockNumberForBatchRange: DatabaseOperationMetrics, - FetchAndUpdateUnprocessedFinalizedBatches: DatabaseOperationMetrics, - InsertGenesisBlock: DatabaseOperationMetrics, - DeleteBatchesGtBlockNumber: DatabaseOperationMetrics, - GetSignature: DatabaseOperationMetrics, - GetHighestBlockForBatchIndex: DatabaseOperationMetrics, - UpdateSkippedL1Messages: DatabaseOperationMetrics, - FinalizeBatchesUpToIndex: DatabaseOperationMetrics, - DeleteBatchesGtBatchIndex: DatabaseOperationMetrics, - DeleteL1MessagesGt: DatabaseOperationMetrics, - InsertSignatures: DatabaseOperationMetrics, - UpdateL1MessagesFromL2Blocks: DatabaseOperationMetrics, - SetFinalizedL1BlockNumber: DatabaseOperationMetrics, - SetProcessedL1BlockNumber: DatabaseOperationMetrics, - GetBatchByIndex: DatabaseOperationMetrics, - }, - }, - ), -} -2026-02-28T21:43:51.200044Z INFO node{idx=1}: sea_orm_migration::migrator: Applying all pending migrations -2026-02-28T21:43:51.200464Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20220101_000001_create_batch_commit_table' -2026-02-28T21:43:51.201783Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20220101_000001_create_batch_commit_table' has been applied -2026-02-28T21:43:51.201950Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250304_125946_add_l1_msg_table' -2026-02-28T21:43:51.202505Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250304_125946_add_l1_msg_table' has been applied -2026-02-28T21:43:51.202685Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250408_132123_add_header_metadata' -2026-02-28T21:43:51.202825Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250408_132123_add_header_metadata' has been applied -2026-02-28T21:43:51.202995Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250408_150338_load_header_metadata' -2026-02-28T21:43:51.203004Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250408_150338_load_header_metadata' has been applied -2026-02-28T21:43:51.203080Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250411_072004_add_l2_block' -2026-02-28T21:43:51.204135Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250411_072004_add_l2_block' has been applied -2026-02-28T21:43:51.204345Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250616_223947_add_metadata' -2026-02-28T21:43:51.204660Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250616_223947_add_metadata' has been applied -2026-02-28T21:43:51.204786Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20250904_175949_block_signature' -2026-02-28T21:43:51.204997Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20250904_175949_block_signature' has been applied -2026-02-28T21:43:51.205234Z INFO node{idx=1}: sea_orm_migration::migrator: Applying migration 'm20251028_110719_add_l1_block_table' -2026-02-28T21:43:51.205397Z INFO node{idx=1}: sea_orm_migration::migrator: Migration 'm20251028_110719_add_l1_block_table' has been applied -2026-02-28T21:43:51.217263Z INFO node{idx=1}: scroll::node::args: Starting engine driver fcs=ForkchoiceState { head: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, safe: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, finalized: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 } } payload_building_duration=40 -2026-02-28T21:43:51.217301Z INFO node{idx=1}: scroll::providers: Running with mock blob provider - all other blob provider configurations are ignored -2026-02-28T21:43:51.218104Z INFO node{idx=1}: reth::cli: Starting consensus engine -2026-02-28T21:43:51.218184Z INFO scroll::derivation_pipeline: Starting derivation pipeline worker -2026-02-28T21:43:51.218451Z INFO node{idx=1}: reth_node_events::node: Forkchoice updated head_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 safe_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 finalized_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 -2026-02-28T21:43:54.049010Z INFO node{idx=0}: reth::cli: Status connected_peers=0 latest_block=0 -2026-02-28T21:43:54.197887Z INFO node{idx=1}: reth::cli: Status connected_peers=0 latest_block=0 -2026-02-28T21:43:56.048860Z INFO node{idx=1}: reth_e2e_test_utils::network: Session established with peer: 0x2c7946ba72f3c7890775cb525fd39dea7e107cdbe4c34d70909b19aeecf2866b48271d2c07b28d8e9420b954e40a137d66ccefbf3f8253a623a64dbf8c236b9d -2026-02-28T21:43:56.049279Z INFO node{idx=1}: reth_e2e_test_utils::network: Session established with peer: 0xcfa09a330a1c50e7327a855a0a5074aa1591f3937c9d326d6d9d9e069a841edcedce392343a810c02306a28f0246202a5606f76ef740ee61259c3985e14bc7ed -2026-02-28T21:43:56.054420Z WARN node{idx=2}: reth_node_builder::launch::common: Failed to build global thread pool err=The global thread pool has already been initialized. -2026-02-28T21:43:56.054829Z INFO node{idx=2}: reth::cli: Saving prune config to toml file -2026-02-28T21:43:56.055120Z INFO node{idx=2}: reth::cli: Configuration loaded path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-8ZDUdK4C/reth.toml" -2026-02-28T21:43:56.055462Z INFO node{idx=2}: reth::cli: Database opened -2026-02-28T21:43:56.150611Z INFO node{idx=2}: reth::cli: -Pre-merge hard forks (block based): -- Homestead @0 -- Tangerine @0 -- SpuriousDragon @0 -- Byzantium @0 -- Constantinople @0 -- Petersburg @0 -- Istanbul @0 -- Berlin @0 -- London @0 -- Archimedes @0 -- Bernoulli @0 -- Curie @0 - -Post-merge hard forks (timestamp based): -- Shanghai @0 -- Darwin @0 -- DarwinV2 @0 -- Euclid @0 -- EuclidV2 @0 -- Feynman @0 -- Galileo @0 -- GalileoV2 @0 -2026-02-28T21:43:56.151341Z INFO node{idx=2}: reth::cli: Transaction pool initialized -2026-02-28T21:43:56.152842Z INFO node{idx=2}: reth::cli: P2P networking initialized enode=enode://ea6b5b34ec929761f5c81c9fc4a6e4ffb08788e8f63b8b98a44dbcb59273169ca325ed44bed4abaf754ceac1bb3d1d9fde4bb409a09e260be3dbc75f74c9cc1b@127.0.0.1:54091 -2026-02-28T21:43:56.153069Z INFO node{idx=2}: reth::cli: StaticFileProducer initialized -2026-02-28T21:43:56.153465Z INFO node{idx=2}: reth::cli: Verifying storage consistency. -2026-02-28T21:43:56.153750Z INFO node{idx=2}: reth::cli: Pruner initialized prune_config=PruneConfig { block_interval: 5, segments: PruneModes { sender_recovery: None, transaction_lookup: None, receipts: None, account_history: None, storage_history: None, bodies_history: None, receipts_log_filter: ReceiptsLogPruneConfig({}) } } -2026-02-28T21:43:56.153868Z INFO node{idx=2}: reth::cli: Creating JWT auth secret file path="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-8ZDUdK4C/jwt.hex" -2026-02-28T21:43:56.154645Z INFO node{idx=2}: reth::cli: Consensus engine initialized -2026-02-28T21:43:56.154823Z INFO node{idx=2}: reth::cli: Engine API handler initialized -2026-02-28T21:43:56.157167Z INFO node{idx=2}: reth::cli: RPC auth server started url=127.0.0.1:54092 -2026-02-28T21:43:56.158703Z INFO node{idx=2}: reth::cli: RPC IPC server started path=/tmp/reth.ipc-qrCb89S4 -2026-02-28T21:43:56.158716Z INFO node{idx=2}: reth::cli: RPC HTTP server started url=127.0.0.1:54093 -2026-02-28T21:43:56.158750Z INFO node{idx=2}: rollup_node::args: Building rollup node with config: -ScrollRollupNodeConfig { - test: true, - consensus_args: ConsensusArgs { - algorithm: Noop, - authorized_signer: None, - }, - database_args: RollupNodeDatabaseArgs { - rn_db_path: None, - }, - chain_orchestrator_args: ChainOrchestratorArgs { - optimistic_sync_trigger: 100, - chain_buffer_size: 100, - }, - engine_driver_args: EngineDriverArgs { - sync_at_startup: true, - }, - blob_provider_args: BlobProviderArgs { - beacon_node_urls: None, - s3_url: None, - anvil_url: None, - mock: true, - compute_units_per_second: 0, - max_retries: 0, - initial_backoff: 0, - }, - l1_provider_args: L1ProviderArgs { - url: None, - compute_units_per_second: 10000, - max_retries: 10, - initial_backoff: 100, - logs_query_block_range: 500, - cache_max_items: 100, - }, - sequencer_args: SequencerArgs { - sequencer_enabled: false, - auto_start: false, - block_time: 100, - payload_building_duration: 40, - fee_recipient: 0x0000000000000000000000000000000000000000, - l1_message_inclusion_mode: BlockDepth( - 0, - ), - allow_empty_blocks: true, - max_l1_messages: None, - }, - network_args: RollupNodeNetworkArgs { - enable_eth_scroll_wire_bridge: true, - enable_scroll_wire: true, - sequencer_url: None, - signer_address: None, - }, - rpc_args: RpcArgs { - basic_enabled: true, - admin_enabled: true, - }, - signer_args: SignerArgs { - key_file: None, - aws_kms_key_id: None, - private_key: None, - }, - gas_price_oracle_args: RollupNodeGasPriceOracleArgs { - default_suggested_priority_fee: 0, - }, - pprof_args: PprofArgs { - enabled: false, - addr: 0.0.0.0:6868, - default_duration: 30, - }, - database: Some( - Database { - database: Retry { - inner: DatabaseInner { - connection: SqlxSqlitePoolConnection, - write_lock: Mutex { - data: (), - }, - read_locks: Semaphore { - ll_sem: Semaphore { - permits: 5, - }, - resource_span: Span { - name: "runtime.resource", - level: Level( - Trace, - ), - target: "tokio::sync::semaphore", - disabled: true, - module_path: "tokio::sync::semaphore", - line: 461, - file: "/Users/yiweichi/.cargo/registry/src/index.crates.io-1949cf8c6b5b557f/tokio-1.48.0/src/sync/semaphore.rs", - }, - }, - metrics: DatabaseMetrics, - tmp_dir: None, - }, - max_retries: None, - initial_delay_ms: 50, - exponential_backoff: false, - metrics: RetryMetrics, - }, - metrics: { - DeleteL1MessagesGt: DatabaseOperationMetrics, - DeleteBatchesGtBlockNumber: DatabaseOperationMetrics, - DeleteBatchesGtBatchIndex: DatabaseOperationMetrics, - DeleteL2BlocksGtBlockNumber: DatabaseOperationMetrics, - RemoveL1BlockInfoGt: DatabaseOperationMetrics, - FinalizeBatchesUpToIndex: DatabaseOperationMetrics, - UpdateBatchStatus: DatabaseOperationMetrics, - SetLatestL1BlockNumber: DatabaseOperationMetrics, - PrepareOnStartup: DatabaseOperationMetrics, - UpdateL1MessagesWithL2Block: DatabaseOperationMetrics, - InsertBatch: DatabaseOperationMetrics, - InsertBatchConsolidationOutcome: DatabaseOperationMetrics, - UpdateSkippedL1Messages: DatabaseOperationMetrics, - UpdateL1MessagesFromL2Blocks: DatabaseOperationMetrics, - GetBatchByIndex: DatabaseOperationMetrics, - GetLatestIndexedEventL1BlockNumber: DatabaseOperationMetrics, - DeleteL2BlocksGtBatchIndex: DatabaseOperationMetrics, - GetL1BlockInfo: DatabaseOperationMetrics, - GetLatestL1BlockNumber: DatabaseOperationMetrics, - InsertBlocks: DatabaseOperationMetrics, - GetL2HeadBlockNumber: DatabaseOperationMetrics, - SetFinalizedL1BlockNumber: DatabaseOperationMetrics, - GetLatestSafeL2Info: DatabaseOperationMetrics, - ChangeBatchProcessingToCommittedStatus: DatabaseOperationMetrics, - InsertL1Message: DatabaseOperationMetrics, - SetL2HeadBlockNumber: DatabaseOperationMetrics, - FetchAndUpdateUnprocessedCommittedBatches: DatabaseOperationMetrics, - SetBatchRevertBlockNumberForBatchRange: DatabaseOperationMetrics, - PurgeL1MessageToL2BlockMappings: DatabaseOperationMetrics, - SetProcessedL1BlockNumber: DatabaseOperationMetrics, - Unwind: DatabaseOperationMetrics, - InsertSignatures: DatabaseOperationMetrics, - InsertBlock: DatabaseOperationMetrics, - GetBatchStatusByHash: DatabaseOperationMetrics, - FetchAndUpdateUnprocessedFinalizedBatches: DatabaseOperationMetrics, - DeleteBatchFinalizationGtBlockNumber: DatabaseOperationMetrics, - GetProcessedL1BlockNumber: DatabaseOperationMetrics, - InsertSignature: DatabaseOperationMetrics, - GetNL1Messages: DatabaseOperationMetrics, - GetHighestBlockForBatchHash: DatabaseOperationMetrics, - GetSignature: DatabaseOperationMetrics, - GetBatchByHash: DatabaseOperationMetrics, - GetL2BlockAndBatchInfoByHash: DatabaseOperationMetrics, - DeleteBatchRevertGtBlockNumber: DatabaseOperationMetrics, - GetHighestBlockForBatchIndex: DatabaseOperationMetrics, - GetL2BlockInfoByNumber: DatabaseOperationMetrics, - RemoveL1BlockInfoLeq: DatabaseOperationMetrics, - InsertGenesisBlock: DatabaseOperationMetrics, - FinalizeConsolidatedBatches: DatabaseOperationMetrics, - GetFinalizedL1BlockNumber: DatabaseOperationMetrics, - GetNL2BlockDataHint: DatabaseOperationMetrics, - GetMaxBlockDataHintBlockNumber: DatabaseOperationMetrics, - InsertL1BlockInfo: DatabaseOperationMetrics, - }, - }, - ), -} -2026-02-28T21:43:56.159884Z INFO node{idx=2}: sea_orm_migration::migrator: Applying all pending migrations -2026-02-28T21:43:56.160342Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20220101_000001_create_batch_commit_table' -2026-02-28T21:43:56.161800Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20220101_000001_create_batch_commit_table' has been applied -2026-02-28T21:43:56.161976Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250304_125946_add_l1_msg_table' -2026-02-28T21:43:56.162509Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250304_125946_add_l1_msg_table' has been applied -2026-02-28T21:43:56.162675Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250408_132123_add_header_metadata' -2026-02-28T21:43:56.162811Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250408_132123_add_header_metadata' has been applied -2026-02-28T21:43:56.162992Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250408_150338_load_header_metadata' -2026-02-28T21:43:56.163002Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250408_150338_load_header_metadata' has been applied -2026-02-28T21:43:56.163086Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250411_072004_add_l2_block' -2026-02-28T21:43:56.164158Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250411_072004_add_l2_block' has been applied -2026-02-28T21:43:56.164355Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250616_223947_add_metadata' -2026-02-28T21:43:56.164677Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250616_223947_add_metadata' has been applied -2026-02-28T21:43:56.164779Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20250904_175949_block_signature' -2026-02-28T21:43:56.164951Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20250904_175949_block_signature' has been applied -2026-02-28T21:43:56.165157Z INFO node{idx=2}: sea_orm_migration::migrator: Applying migration 'm20251028_110719_add_l1_block_table' -2026-02-28T21:43:56.165315Z INFO node{idx=2}: sea_orm_migration::migrator: Migration 'm20251028_110719_add_l1_block_table' has been applied -2026-02-28T21:43:56.172044Z INFO node{idx=2}: scroll::node::args: Starting engine driver fcs=ForkchoiceState { head: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, safe: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 }, finalized: BlockInfo { number: 0, hash: 0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 } } payload_building_duration=40 -2026-02-28T21:43:56.172099Z INFO node{idx=2}: scroll::providers: Running with mock blob provider - all other blob provider configurations are ignored -2026-02-28T21:43:56.172971Z INFO node{idx=2}: reth::cli: Starting consensus engine -2026-02-28T21:43:56.173156Z INFO scroll::derivation_pipeline: Starting derivation pipeline worker -2026-02-28T21:43:56.173369Z INFO node{idx=2}: reth_node_events::node: Forkchoice updated head_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 safe_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 finalized_block_hash=0x31ad487404525f8260b2d507892547a39850aa0280a2e4950438f444237fe4b7 -2026-02-28T21:43:56.200224Z INFO node{idx=2}: reth_e2e_test_utils::network: Session established with peer: 0xcfa09a330a1c50e7327a855a0a5074aa1591f3937c9d326d6d9d9e069a841edcedce392343a810c02306a28f0246202a5606f76ef740ee61259c3985e14bc7ed -2026-02-28T21:43:56.200583Z INFO node{idx=2}: reth_e2e_test_utils::network: Session established with peer: 0xea6b5b34ec929761f5c81c9fc4a6e4ffb08788e8f63b8b98a44dbcb59273169ca325ed44bed4abaf754ceac1bb3d1d9fde4bb409a09e260be3dbc75f74c9cc1b -2026-02-28T21:43:59.155517Z INFO node{idx=2}: reth::cli: Status connected_peers=1 latest_block=0 -2026-02-28T21:44:01.171492Z INFO node{idx=2}: reth_e2e_test_utils::network: Session established with peer: 0xea6b5b34ec929761f5c81c9fc4a6e4ffb08788e8f63b8b98a44dbcb59273169ca325ed44bed4abaf754ceac1bb3d1d9fde4bb409a09e260be3dbc75f74c9cc1b -2026-02-28T21:44:01.172460Z INFO node{idx=2}: reth_e2e_test_utils::network: Session established with peer: 0x2c7946ba72f3c7890775cb525fd39dea7e107cdbe4c34d70909b19aeecf2866b48271d2c07b28d8e9420b954e40a137d66ccefbf3f8253a623a64dbf8c236b9d -2026-02-28T21:45:09.048451Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:45:09.196777Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:45:14.155828Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:46:24.047569Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:46:24.197183Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:46:29.157143Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:47:39.049111Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:47:39.197265Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:47:44.156893Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:48:51.047833Z WARN node{idx=0}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=299.960894208s -2026-02-28T21:48:51.197405Z WARN node{idx=1}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=299.978783875s -2026-02-28T21:48:54.048042Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:48:54.197587Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:48:56.155756Z WARN node{idx=2}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=299.982226708s -2026-02-28T21:48:59.156789Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:50:09.048829Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:50:09.197745Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:50:14.156482Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:51:24.048875Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:51:24.198359Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:51:29.156899Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:52:39.059531Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:52:39.207785Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:52:44.166739Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:53:51.059649Z WARN node{idx=0}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=599.961285583s -2026-02-28T21:53:51.077389Z INFO node{idx=0}: scroll::db::maintenance: running periodic PRAGMA optimize... -2026-02-28T21:53:51.082726Z INFO node{idx=0}: scroll::db::maintenance: periodic PRAGMA optimize complete. -2026-02-28T21:53:51.208471Z WARN node{idx=1}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=599.978363416s -2026-02-28T21:53:51.213024Z INFO node{idx=1}: scroll::db::maintenance: running periodic PRAGMA optimize... -2026-02-28T21:53:51.218303Z INFO node{idx=1}: scroll::db::maintenance: periodic PRAGMA optimize complete. -2026-02-28T21:53:54.059664Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:53:54.209226Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:53:56.167599Z WARN node{idx=2}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=599.982586458s -2026-02-28T21:53:56.172224Z INFO node{idx=2}: scroll::db::maintenance: running periodic PRAGMA optimize... -2026-02-28T21:53:56.173228Z INFO node{idx=2}: scroll::db::maintenance: periodic PRAGMA optimize complete. -2026-02-28T21:53:59.167584Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:55:09.060113Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:55:09.209947Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:55:14.168964Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:56:24.060128Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:56:24.209790Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:56:29.169254Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:57:39.061349Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:57:39.210343Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:57:44.168842Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:58:51.060516Z WARN node{idx=0}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=899.961098583s -2026-02-28T21:58:51.209873Z WARN node{idx=1}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=899.978732791s -2026-02-28T21:58:54.061222Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:58:54.209690Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T21:58:56.168647Z WARN node{idx=2}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=899.982594875s -2026-02-28T21:58:59.169032Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:00:09.061569Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:00:09.210622Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:00:14.170030Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:01:24.062891Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:01:24.212286Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:01:29.168763Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:02:39.061769Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:02:39.210756Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:02:44.169653Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:03:51.061413Z WARN node{idx=0}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=1199.961018041s -2026-02-28T22:03:51.086987Z INFO node{idx=0}: scroll::db::maintenance: running periodic PRAGMA optimize... -2026-02-28T22:03:51.088233Z INFO node{idx=0}: scroll::db::maintenance: periodic PRAGMA optimize complete. -2026-02-28T22:03:51.210410Z WARN node{idx=1}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=1199.978263916s -2026-02-28T22:03:51.222668Z INFO node{idx=1}: scroll::db::maintenance: running periodic PRAGMA optimize... -2026-02-28T22:03:51.225178Z INFO node{idx=1}: scroll::db::maintenance: periodic PRAGMA optimize complete. -2026-02-28T22:03:54.062590Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:03:54.211394Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:03:56.169613Z WARN node{idx=2}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=1199.982559458s -2026-02-28T22:03:56.176544Z INFO node{idx=2}: scroll::db::maintenance: running periodic PRAGMA optimize... -2026-02-28T22:03:56.177380Z INFO node{idx=2}: scroll::db::maintenance: periodic PRAGMA optimize complete. -2026-02-28T22:03:59.170127Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:05:09.062486Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:05:09.211903Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:05:14.170299Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:06:24.061903Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:06:24.211886Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:06:29.170980Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:07:39.061707Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:07:39.212472Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:07:44.170640Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:08:51.062600Z WARN node{idx=0}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=1499.961128958s -2026-02-28T22:08:51.211571Z WARN node{idx=1}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=1499.97840675s -2026-02-28T22:08:54.063021Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:08:54.211364Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:08:56.170417Z WARN node{idx=2}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=1499.982343833s -2026-02-28T22:08:59.170985Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:10:09.063705Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:10:09.212789Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:10:14.172080Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:11:24.062679Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:11:24.211510Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:11:29.172009Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:12:39.064411Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:12:39.212779Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:12:44.171608Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:13:51.064166Z WARN node{idx=0}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=1799.961742041s -2026-02-28T22:13:51.092260Z INFO node{idx=0}: scroll::db::maintenance: running periodic PRAGMA optimize... -2026-02-28T22:13:51.094329Z INFO node{idx=0}: scroll::db::maintenance: periodic PRAGMA optimize complete. -2026-02-28T22:13:51.212373Z WARN node{idx=1}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=1799.978200416s -2026-02-28T22:13:51.228928Z INFO node{idx=1}: scroll::db::maintenance: running periodic PRAGMA optimize... -2026-02-28T22:13:51.232176Z INFO node{idx=1}: scroll::db::maintenance: periodic PRAGMA optimize complete. -2026-02-28T22:13:54.064007Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:13:54.212978Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:13:56.171567Z WARN node{idx=2}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=1799.982483958s -2026-02-28T22:13:56.179766Z INFO node{idx=2}: scroll::db::maintenance: running periodic PRAGMA optimize... -2026-02-28T22:13:56.180689Z INFO node{idx=2}: scroll::db::maintenance: periodic PRAGMA optimize complete. -2026-02-28T22:13:59.171819Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:15:09.064284Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:15:09.213263Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:15:14.172119Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:16:24.064452Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:16:24.212999Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:16:29.173080Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:17:39.064191Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:17:39.213953Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:17:44.173206Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:18:51.065927Z WARN node{idx=0}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=2099.962479625s -2026-02-28T22:18:51.213691Z WARN node{idx=1}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=2099.978500708s -2026-02-28T22:18:54.064186Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:18:54.214267Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:18:56.172613Z WARN node{idx=2}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=2099.98251125s -2026-02-28T22:18:59.174318Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:20:09.065339Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:20:09.214196Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:20:14.173501Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:21:24.066164Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:21:24.214686Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:21:29.173429Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:22:39.066245Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:22:39.214645Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:22:44.174519Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:23:51.065306Z WARN node{idx=0}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=2399.960828083s -2026-02-28T22:23:51.097898Z INFO node{idx=0}: scroll::db::maintenance: running periodic PRAGMA optimize... -2026-02-28T22:23:51.099240Z INFO node{idx=0}: scroll::db::maintenance: periodic PRAGMA optimize complete. -2026-02-28T22:23:51.214703Z WARN node{idx=1}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=2399.978484291s -2026-02-28T22:23:51.235936Z INFO node{idx=1}: scroll::db::maintenance: running periodic PRAGMA optimize... -2026-02-28T22:23:51.237115Z INFO node{idx=1}: scroll::db::maintenance: periodic PRAGMA optimize complete. -2026-02-28T22:23:54.066642Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:23:54.215715Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:23:56.174020Z WARN node{idx=2}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=2399.982891208s -2026-02-28T22:23:56.185168Z INFO node{idx=2}: scroll::db::maintenance: running periodic PRAGMA optimize... -2026-02-28T22:23:56.186160Z INFO node{idx=2}: scroll::db::maintenance: periodic PRAGMA optimize complete. -2026-02-28T22:23:59.173427Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:25:09.066823Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:25:09.215059Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:25:14.174187Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:26:24.065683Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:26:24.215148Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:26:29.175384Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:27:39.067354Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:27:39.215021Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:27:44.175364Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:28:51.067531Z WARN node{idx=0}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=2699.962020166s -2026-02-28T22:28:51.215793Z WARN node{idx=1}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=2699.978548708s -2026-02-28T22:28:54.067838Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:28:54.216653Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:28:56.173812Z WARN node{idx=2}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=2699.981664375s -2026-02-28T22:28:59.175213Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:30:09.071711Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:30:09.220714Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:30:14.180045Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:31:24.073024Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:31:24.221960Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:31:29.180724Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:32:39.073101Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:32:39.221421Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:32:44.180458Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:33:51.072896Z WARN node{idx=0}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=2999.961677333s -2026-02-28T22:33:51.107994Z INFO node{idx=0}: scroll::db::maintenance: running periodic PRAGMA optimize... -2026-02-28T22:33:51.110253Z INFO node{idx=0}: scroll::db::maintenance: periodic PRAGMA optimize complete. -2026-02-28T22:33:51.221746Z WARN node{idx=1}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=2999.978780333s -2026-02-28T22:33:51.245429Z INFO node{idx=1}: scroll::db::maintenance: running periodic PRAGMA optimize... -2026-02-28T22:33:51.248516Z INFO node{idx=1}: scroll::db::maintenance: periodic PRAGMA optimize complete. -2026-02-28T22:33:54.072921Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:33:54.221375Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:33:56.180559Z WARN node{idx=2}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=2999.982685125s -2026-02-28T22:33:56.194635Z INFO node{idx=2}: scroll::db::maintenance: running periodic PRAGMA optimize... -2026-02-28T22:33:56.195454Z INFO node{idx=2}: scroll::db::maintenance: periodic PRAGMA optimize complete. -2026-02-28T22:33:59.180980Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:35:09.073625Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:35:09.221676Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:35:14.181435Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:36:24.072692Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:36:24.222895Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:36:29.182104Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:37:39.073136Z INFO node{idx=0}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:37:39.223322Z INFO node{idx=1}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:37:44.182252Z INFO node{idx=2}: reth::cli: Status connected_peers=2 latest_block=0 -2026-02-28T22:38:51.074750Z WARN node{idx=0}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=3299.962292166s -2026-02-28T22:38:51.222887Z WARN node{idx=1}: reth_node_events::node: Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs! period=3299.978703166s -2026-02-28T22:38:52.425759Z INFO node{idx=0}: reth::cli: Wrote network peers to file peers_file="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-iZaMJRZv/known-peers.json" -2026-02-28T22:38:52.425764Z INFO node{idx=2}: reth::cli: Wrote network peers to file peers_file="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-8ZDUdK4C/known-peers.json" -2026-02-28T22:38:52.425629Z INFO node{idx=1}: reth::cli: Wrote network peers to file peers_file="/var/folders/gn/8krhcpg563g0p6zyjrm6pr_c0000gq/T/reth-test-cUHZXmTP/known-peers.json" From f34628615407bee62e47c8337d904d69127e6dde Mon Sep 17 00:00:00 2001 From: Morty Date: Wed, 4 Mar 2026 04:29:45 +0800 Subject: [PATCH 8/9] fix: debug toolkit attach mode --- crates/node/src/debug_toolkit/repl/attach.rs | 565 ------------------- 1 file changed, 565 deletions(-) delete mode 100644 crates/node/src/debug_toolkit/repl/attach.rs diff --git a/crates/node/src/debug_toolkit/repl/attach.rs b/crates/node/src/debug_toolkit/repl/attach.rs deleted file mode 100644 index f909d49e..00000000 --- a/crates/node/src/debug_toolkit/repl/attach.rs +++ /dev/null @@ -1,565 +0,0 @@ -/// REPL for attaching to an already-running scroll node via JSON-RPC. -use crate::debug_toolkit::commands::{ - print_help, AdminCommand, BlockArg, Command, EventsCommand, L1Command, PeersCommand, TxCommand, -}; -use alloy_consensus::{SignableTransaction, TxEip1559}; -use alloy_eips::{eip2718::Encodable2718, BlockId, BlockNumberOrTag}; -use alloy_network::TxSignerSync; -use alloy_primitives::TxKind; -use alloy_provider::{Provider, ProviderBuilder}; -use alloy_signer_local::PrivateKeySigner; -use colored::Colorize; -use crossterm::terminal::{disable_raw_mode, enable_raw_mode}; -use reqwest::Url; -use rollup_node_chain_orchestrator::ChainOrchestratorStatus; -use scroll_alloy_network::Scroll; -use std::{io::Write, path::PathBuf, time::Duration}; - -/// Interactive REPL that attaches to a running node via JSON-RPC. -#[derive(Debug)] -pub struct AttachRepl { - /// The RPC URL of the target node. - url: Url, - /// Alloy provider — all RPC calls including custom namespaces go through `raw_request`. - provider: alloy_provider::RootProvider, - /// Optional private key for signing transactions locally. - signer: Option, - /// Whether the REPL is running. - running: bool, - /// Whether background head-block polling is enabled. - events_enabled: bool, - /// Most recently seen block number (for head-block polling). - last_seen_block: u64, - /// Path to the log file (for `logs` command). - log_path: Option, -} - -impl AttachRepl { - /// Connect to a node at the given URL and build the REPL. - pub async fn new(url: Url, private_key: Option) -> eyre::Result { - // Use `default()` (no fillers) to get a plain `RootProvider`. - // We don't need gas/nonce fillers since we build transactions manually. - let provider = ProviderBuilder::default() - .connect(url.as_str()) - .await - .map_err(|e| eyre::eyre!("Failed to connect to {}: {}", url, e))?; - - let signer = if let Some(pk) = private_key { - let pk = pk.trim_start_matches("0x"); - let signer: PrivateKeySigner = - pk.parse().map_err(|e| eyre::eyre!("Invalid private key: {}", e))?; - Some(signer) - } else { - None - }; - - let last_seen_block = provider.get_block_number().await.unwrap_or(0); - - Ok(Self { - url, - provider, - signer, - running: false, - events_enabled: false, - last_seen_block, - log_path: None, - }) - } - - /// Set the log file path (shown by `logs` command). - pub fn set_log_path(&mut self, path: PathBuf) { - self.log_path = Some(path); - } - - /// Get the REPL prompt string. - fn get_prompt(&self) -> String { - let host = self.url.host_str().unwrap_or("?"); - let port = self.url.port().map(|p| format!(":{}", p)).unwrap_or_default(); - format!("{} [{}{}]> ", "scroll-debug".cyan(), host, port) - } - - /// Run the REPL loop. - pub async fn run(&mut self) -> eyre::Result<()> { - self.running = true; - - let _guard = super::terminal::RawModeGuard::new()?; - - let _ = disable_raw_mode(); - println!(); - println!("{}", "Scroll Debug Toolkit (attach mode)".bold().cyan()); - println!("Connected to: {}", self.url.as_str().green()); - if let Some(signer) = &self.signer { - println!("Signer: {:?}", signer.address()); - } else { - println!("{}", "No signer, tx send/inject require --private-key".yellow()); - } - println!("Type 'help' for available commands, 'exit' to quit."); - println!(); - if let Err(e) = self.cmd_status().await { - println!("{}: {}", "Warning: could not fetch initial status".yellow(), e); - } - let _ = enable_raw_mode(); - - let mut input_buffer = String::new(); - let mut stdout = std::io::stdout(); - - print!("{}", self.get_prompt()); - let _ = stdout.flush(); - - while self.running { - tokio::select! { - biased; - - // Head-block polling (only when events are enabled). - _ = async { tokio::time::sleep(Duration::from_secs(2)).await }, if self.events_enabled => { - if let Ok(number) = self.provider.get_block_number().await { - if number > self.last_seen_block { - for n in (self.last_seen_block + 1)..=number { - let id = BlockId::Number(BlockNumberOrTag::Number(n)); - if let Ok(Some(block)) = self.provider.get_block(id).await { - let msg = format!( - "[new block] #{} hash={:.12}... txs={}", - block.header.number, - format!("{:?}", block.header.hash), - block.transactions.len(), - ); - print!("\r\x1b[K{}\r\n{}{}", msg.cyan(), self.get_prompt(), input_buffer); - let _ = stdout.flush(); - } - } - self.last_seen_block = number; - } - } - } - - // Check for keyboard input (non-blocking) - _ = tokio::time::sleep(Duration::from_millis(50)) => { - match super::terminal::poll_keyboard(&mut input_buffer, &self.get_prompt())? { - super::terminal::InputAction::Command(line) => { - let _ = disable_raw_mode(); - if let Err(e) = self.execute_command(&line).await { - println!("{}: {}", "Error".red(), e); - } - let _ = enable_raw_mode(); - if self.running { - print!("{}", self.get_prompt()); - let _ = stdout.flush(); - } - } - super::terminal::InputAction::Quit => self.running = false, - super::terminal::InputAction::None => {} - } - } - } - } - - print!("Goodbye!\r\n"); - Ok(()) - } - - /// Dispatch a parsed command. - async fn execute_command(&mut self, input: &str) -> eyre::Result<()> { - let cmd = Command::parse(input); - match cmd { - Command::Status => self.cmd_status().await, - Command::SyncStatus => self.cmd_sync_status().await, - Command::Block(arg) => self.cmd_block(arg).await, - Command::Blocks { from, to } => self.cmd_blocks(from, to).await, - Command::Fcs => self.cmd_fcs().await, - Command::L1(l1_cmd) => self.cmd_l1(l1_cmd).await, - Command::Tx(tx_cmd) => self.cmd_tx(tx_cmd).await, - Command::Peers(peers_cmd) => self.cmd_peers(peers_cmd).await, - Command::Events(events_cmd) => self.cmd_events(events_cmd), - Command::Admin(admin_cmd) => self.cmd_admin(admin_cmd).await, - Command::Rpc { method, params } => self.cmd_rpc(&method, params.as_deref()).await, - Command::Logs => self.cmd_logs(), - Command::Help => { - print_help(); - Ok(()) - } - Command::Exit => { - self.running = false; - Ok(()) - } - // Spawn-mode-only commands — give informative errors - Command::Build => { - println!( - "{}", - "build is only available in spawn mode (--chain / --sequencer).".yellow() - ); - Ok(()) - } - Command::Wallet(_) => { - println!( - "{}", - "wallet gen is only available in spawn mode. Use --private-key to set a signer." - .yellow() - ); - Ok(()) - } - Command::Run(_) => { - println!("{}", "run actions are only available in spawn mode.".yellow()); - Ok(()) - } - Command::Node(_) | Command::Nodes => { - println!( - "{}", - "node switching is only available in spawn mode (multiple nodes).".yellow() - ); - Ok(()) - } - Command::Db => { - println!("{}", "db path is only available in spawn mode.".yellow()); - Ok(()) - } - Command::Unknown(s) => { - if !s.is_empty() { - println!("Unknown command: {}. Type 'help' for available commands.", s); - } - Ok(()) - } - } - } - - // ------------------------------------------------------------------------- - // Helper - // ------------------------------------------------------------------------- - - /// Call a custom-namespace JSON-RPC method and deserialize the response. - /// - /// Uses `raw_request_dyn` (no trait bounds on P/R) combined with `serde_json` for - /// maximum compatibility regardless of the provider's network/transport generics. - async fn raw( - &self, - method: &'static str, - params: impl serde::Serialize, - ) -> eyre::Result { - crate::debug_toolkit::shared::rpc::raw_typed(&self.provider, method, params).await - } - - // ------------------------------------------------------------------------- - // Command implementations - // ------------------------------------------------------------------------- - - /// `status` — show node status via `rollupNode_status`. - async fn cmd_status(&self) -> eyre::Result<()> { - let status: ChainOrchestratorStatus = self.raw("rollupNode_status", ()).await?; - - println!("{}", "=== Node Status ===".bold()); - println!("{}", "Node:".underline()); - println!(" RPC: {}", self.url.as_str()); - if let Some(signer) = &self.signer { - println!(" From: {:?}", signer.address()); - } - crate::debug_toolkit::shared::status::print_status_overview(&status); - - Ok(()) - } - - /// `sync-status` — detailed sync status. - async fn cmd_sync_status(&self) -> eyre::Result<()> { - let status: ChainOrchestratorStatus = self.raw("rollupNode_status", ()).await?; - crate::debug_toolkit::shared::status::print_sync_status(&status); - Ok(()) - } - - /// `fcs` — show forkchoice state. - async fn cmd_fcs(&self) -> eyre::Result<()> { - let status: ChainOrchestratorStatus = self.raw("rollupNode_status", ()).await?; - crate::debug_toolkit::shared::status::print_forkchoice(&status); - Ok(()) - } - - /// `block [n|latest]` — show block details. - async fn cmd_block(&self, arg: BlockArg) -> eyre::Result<()> { - let tag = match arg { - BlockArg::Latest => BlockNumberOrTag::Latest, - BlockArg::Number(n) => BlockNumberOrTag::Number(n), - }; - - let block: Option = - self.raw("eth_getBlockByNumber", (tag, false)).await?; - let block = block.ok_or_else(|| eyre::eyre!("Block not found"))?; - - let number = block["number"].as_str().unwrap_or("?"); - let hash = block["hash"].as_str().unwrap_or("?"); - let parent = block["parentHash"].as_str().unwrap_or("?"); - let timestamp = block["timestamp"].as_str().unwrap_or("?"); - let gas_used = block["gasUsed"].as_str().unwrap_or("?"); - let gas_limit = block["gasLimit"].as_str().unwrap_or("?"); - let txs = block["transactions"].as_array(); - - println!("{}", format!("Block {}", number).bold()); - println!(" Hash: {}", hash); - println!(" Parent: {}", parent); - println!(" Timestamp: {}", timestamp); - println!(" Gas Used: {}", gas_used); - println!(" Gas Limit: {}", gas_limit); - - if let Some(txs) = txs { - println!(" Txs: {}", txs.len()); - for (i, tx) in txs.iter().enumerate() { - let tx_hash = tx.as_str().or_else(|| tx["hash"].as_str()).unwrap_or("?"); - println!(" [{}] hash={}", i, tx_hash); - } - } - - Ok(()) - } - - /// `blocks ` — list blocks in a range. - async fn cmd_blocks(&self, from: u64, to: u64) -> eyre::Result<()> { - println!("{}", format!("Blocks {} to {}:", from, to).bold()); - for n in from..=to { - let tag = BlockNumberOrTag::Number(n); - let block: Option = - self.raw("eth_getBlockByNumber", (tag, false)).await?; - if let Some(block) = block { - let hash = block["hash"].as_str().unwrap_or("?"); - let gas = block["gasUsed"].as_str().unwrap_or("?"); - let tx_count = block["transactions"].as_array().map(|a| a.len()).unwrap_or(0); - println!(" #{}: {} txs, gas: {}, hash: {:.12}...", n, tx_count, gas, hash); - } else { - println!(" #{}: {}", n, "not found".dimmed()); - } - } - Ok(()) - } - - /// `l1 status` / `l1 messages` — L1-related queries. - async fn cmd_l1(&self, cmd: L1Command) -> eyre::Result<()> { - match cmd { - L1Command::Status => { - let status: ChainOrchestratorStatus = self.raw("rollupNode_status", ()).await?; - println!("{}", "L1 Status:".bold()); - println!( - " Synced: {}", - if status.l1.status.is_synced() { "true".green() } else { "false".red() } - ); - println!(" L1 Head: #{}", status.l1.latest); - println!(" L1 Final: #{}", status.l1.finalized); - println!(" Processed: #{}", status.l1.processed); - } - L1Command::Messages => { - let msg: Option = - self.raw("rollupNode_getL1MessageByIndex", [0u64]).await?; - println!("{}", "L1 Message Queue (index 0):".bold()); - match msg { - Some(m) => println!("{}", serde_json::to_string_pretty(&m)?), - None => println!(" {}", "No message at index 0".dimmed()), - } - println!( - "{}", - "Hint: use 'rpc rollupNode_getL1MessageByIndex []' for specific indices" - .dimmed() - ); - } - L1Command::Sync | L1Command::Block(_) | L1Command::Reorg(_) => { - println!( - "{}", - "l1 sync/block/reorg are only available in spawn mode (mock L1).".yellow() - ); - } - L1Command::Send { .. } => { - println!( - "{}", - "l1 send is only available in spawn mode. Use cast or a wallet to bridge." - .yellow() - ); - } - } - Ok(()) - } - - /// `tx pending` / `tx send` / `tx inject`. - async fn cmd_tx(&self, cmd: TxCommand) -> eyre::Result<()> { - match cmd { - TxCommand::Pending => { - let result: serde_json::Value = self.raw("txpool_content", ()).await?; - println!("{}", "Pending Transactions:".bold()); - println!("{}", serde_json::to_string_pretty(&result)?); - } - TxCommand::Send { to, value, from: _ } => { - let signer = self.signer.as_ref().ok_or_else(|| { - eyre::eyre!("No signer configured. Start with --private-key .") - })?; - let from_address = signer.address(); - - let chain_id: serde_json::Value = self.raw("eth_chainId", ()).await?; - let chain_id: u64 = u64::from_str_radix( - chain_id.as_str().unwrap_or("0x1").trim_start_matches("0x"), - 16, - ) - .unwrap_or(1); - - let nonce_val: serde_json::Value = - self.raw("eth_getTransactionCount", (from_address, "latest")).await?; - let nonce: u64 = u64::from_str_radix( - nonce_val.as_str().unwrap_or("0x0").trim_start_matches("0x"), - 16, - ) - .unwrap_or(0); - - let latest: serde_json::Value = - self.raw("eth_getBlockByNumber", ("latest", false)).await?; - let base_fee = latest["baseFeePerGas"] - .as_str() - .and_then(|s| u64::from_str_radix(s.trim_start_matches("0x"), 16).ok()) - .unwrap_or(1_000_000_000); - let base_fee_u128 = base_fee as u128; - // Keep priority tip conservative on low-fee chains and always satisfy: - // max_fee_per_gas >= max_priority_fee_per_gas. - let max_priority_fee_per_gas = (base_fee_u128 / 2).max(1); - let max_fee_per_gas = (base_fee_u128 * 2).max(max_priority_fee_per_gas); - let gas_limit = match self - .raw::( - "eth_estimateGas", - [serde_json::json!({ - "from": format!("{:#x}", from_address), - "to": format!("{:#x}", to), - "value": format!("0x{value:x}"), - })], - ) - .await - { - Ok(v) => v - .as_str() - .and_then(|s| u64::from_str_radix(s.trim_start_matches("0x"), 16).ok()) - // Add a small safety buffer on top of estimate. - .map(|g| g.saturating_mul(12) / 10) - .filter(|g| *g > 0) - .unwrap_or(21_000), - Err(e) => { - println!( - "{}", - format!( - "Warning: eth_estimateGas failed ({}), falling back to 21000", - e - ) - .yellow() - ); - 21_000 - } - }; - - let mut tx = TxEip1559 { - chain_id, - nonce, - gas_limit, - max_fee_per_gas, - max_priority_fee_per_gas, - to: TxKind::Call(to), - value, - access_list: Default::default(), - input: Default::default(), - }; - - let signature = signer.sign_transaction_sync(&mut tx)?; - let signed = tx.into_signed(signature); - let raw_tx = alloy_primitives::hex::encode_prefixed(signed.encoded_2718()); - - let tx_hash: serde_json::Value = - self.raw("eth_sendRawTransaction", [raw_tx]).await?; - let tx_hash_str = - tx_hash.as_str().map(ToOwned::to_owned).unwrap_or_else(|| tx_hash.to_string()); - crate::debug_toolkit::shared::output::print_tx_sent( - &tx_hash_str, - &format!("{:?}", from_address), - &format!("{:?}", to), - value, - false, - ); - } - TxCommand::Inject(bytes) => { - let hex = alloy_primitives::hex::encode_prefixed(&bytes); - let tx_hash: serde_json::Value = self.raw("eth_sendRawTransaction", [hex]).await?; - let tx_hash_str = - tx_hash.as_str().map(ToOwned::to_owned).unwrap_or_else(|| tx_hash.to_string()); - crate::debug_toolkit::shared::output::print_tx_injected(&tx_hash_str); - } - } - Ok(()) - } - - /// `peers` / `peers connect`. - async fn cmd_peers(&self, cmd: PeersCommand) -> eyre::Result<()> { - match cmd { - PeersCommand::List => { - let peers: serde_json::Value = self.raw("admin_peers", ()).await?; - println!("{}", "Connected Peers:".bold()); - println!("{}", serde_json::to_string_pretty(&peers)?); - - println!(); - let node_info: serde_json::Value = self.raw("admin_nodeInfo", ()).await?; - println!("{}", "Local Node Info:".bold()); - println!("{}", serde_json::to_string_pretty(&node_info)?); - } - PeersCommand::Connect(enode_url) => { - let result: bool = self.raw("admin_addPeer", [enode_url.as_str()]).await?; - if result { - println!("{}", format!("Peer add request sent: {}", enode_url).green()); - } else { - println!("{}", "admin_addPeer returned false".yellow()); - } - } - } - Ok(()) - } - - /// `events on` / `events off` — toggle head-block polling. - fn cmd_events(&mut self, cmd: EventsCommand) -> eyre::Result<()> { - match cmd { - EventsCommand::On => { - self.events_enabled = true; - println!("{}", "Head-block polling enabled (2s interval)".green()); - } - EventsCommand::Off => { - self.events_enabled = false; - println!("{}", "Head-block polling disabled".yellow()); - } - EventsCommand::Filter(_) | EventsCommand::History(_) => { - println!("{}", "events filter/history are only available in spawn mode.".yellow()); - } - } - Ok(()) - } - - /// `admin enable-seq` / `admin disable-seq` / `admin revert `. - async fn cmd_admin(&self, cmd: AdminCommand) -> eyre::Result<()> { - match cmd { - AdminCommand::EnableSequencing => { - let result: bool = - self.raw("rollupNodeAdmin_enableAutomaticSequencing", ()).await?; - crate::debug_toolkit::shared::output::print_admin_enable_result(result); - } - AdminCommand::DisableSequencing => { - let result: bool = - self.raw("rollupNodeAdmin_disableAutomaticSequencing", ()).await?; - crate::debug_toolkit::shared::output::print_admin_disable_result(result); - } - AdminCommand::RevertToL1Block(block_number) => { - crate::debug_toolkit::shared::output::print_admin_revert_start(block_number); - let result: bool = - self.raw("rollupNodeAdmin_revertToL1Block", [block_number]).await?; - crate::debug_toolkit::shared::output::print_admin_revert_result( - block_number, - result, - ); - } - } - Ok(()) - } - - /// `rpc [params]` — call any JSON-RPC method and pretty-print the result. - async fn cmd_rpc(&self, method: &str, params: Option<&str>) -> eyre::Result<()> { - let pretty = - crate::debug_toolkit::shared::rpc::raw_value(&self.provider, method, params).await?; - crate::debug_toolkit::shared::output::print_pretty_json(&pretty) - } - - /// `logs` — show log file path. - fn cmd_logs(&self) -> eyre::Result<()> { - crate::debug_toolkit::shared::output::print_log_file(&self.log_path); - Ok(()) - } -} From f794f0a405f7ce58fe4c5cb9a26681bb3f082a31 Mon Sep 17 00:00:00 2001 From: Morty Date: Wed, 4 Mar 2026 04:35:26 +0800 Subject: [PATCH 9/9] fix: debug toolkit attach mode --- crates/node/src/debug_toolkit/repl/attach.rs | 569 +++++++++++++++++++ 1 file changed, 569 insertions(+) create mode 100644 crates/node/src/debug_toolkit/repl/attach.rs diff --git a/crates/node/src/debug_toolkit/repl/attach.rs b/crates/node/src/debug_toolkit/repl/attach.rs new file mode 100644 index 00000000..0f28382a --- /dev/null +++ b/crates/node/src/debug_toolkit/repl/attach.rs @@ -0,0 +1,569 @@ +/// REPL for attaching to an already-running scroll node via JSON-RPC. +use crate::debug_toolkit::commands::{ + print_help, AdminCommand, BlockArg, Command, EventsCommand, L1Command, PeersCommand, TxCommand, +}; +use alloy_consensus::{SignableTransaction, TxEip1559}; +use alloy_eips::{eip2718::Encodable2718, BlockId, BlockNumberOrTag}; +use alloy_network::TxSignerSync; +use alloy_primitives::TxKind; +use alloy_provider::{Provider, ProviderBuilder}; +use alloy_signer_local::PrivateKeySigner; +use colored::Colorize; +use crossterm::terminal::{disable_raw_mode, enable_raw_mode}; +use reqwest::Url; +use rollup_node_chain_orchestrator::ChainOrchestratorStatus; +use scroll_alloy_network::Scroll; +use std::{io::Write, path::PathBuf, time::Duration}; + +/// Interactive REPL that attaches to a running node via JSON-RPC. +#[derive(Debug)] +pub struct AttachRepl { + /// The RPC URL of the target node. + url: Url, + /// Alloy provider — all RPC calls including custom namespaces go through `raw_request`. + provider: alloy_provider::RootProvider, + /// Optional private key for signing transactions locally. + signer: Option, + /// Whether the REPL is running. + running: bool, + /// Whether background head-block polling is enabled. + events_enabled: bool, + /// Most recently seen block number (for head-block polling). + last_seen_block: u64, + /// Path to the log file (for `logs` command). + log_path: Option, +} + +impl AttachRepl { + /// Connect to a node at the given URL and build the REPL. + pub async fn new(url: Url, private_key: Option) -> eyre::Result { + // Use `default()` (no fillers) to get a plain `RootProvider`. + // We don't need gas/nonce fillers since we build transactions manually. + let provider = ProviderBuilder::default() + .connect(url.as_str()) + .await + .map_err(|e| eyre::eyre!("Failed to connect to {}: {}", url, e))?; + + let signer = if let Some(pk) = private_key { + let pk = pk.trim_start_matches("0x"); + let signer: PrivateKeySigner = + pk.parse().map_err(|e| eyre::eyre!("Invalid private key: {}", e))?; + Some(signer) + } else { + None + }; + + let last_seen_block = provider.get_block_number().await.unwrap_or(0); + + Ok(Self { + url, + provider, + signer, + running: false, + events_enabled: false, + last_seen_block, + log_path: None, + }) + } + + /// Set the log file path (shown by `logs` command). + pub fn set_log_path(&mut self, path: PathBuf) { + self.log_path = Some(path); + } + + /// Get the REPL prompt string. + fn get_prompt(&self) -> String { + let host = self.url.host_str().unwrap_or("?"); + let port = self.url.port().map(|p| format!(":{}", p)).unwrap_or_default(); + format!("{} [{}{}]> ", "scroll-debug".cyan(), host, port) + } + + /// Run the REPL loop. + pub async fn run(&mut self) -> eyre::Result<()> { + self.running = true; + + let _guard = super::terminal::RawModeGuard::new()?; + + let _ = disable_raw_mode(); + println!(); + println!("{}", "Scroll Debug Toolkit (attach mode)".bold().cyan()); + println!("Connected to: {}", self.url.as_str().green()); + if let Some(signer) = &self.signer { + println!("Signer: {:?}", signer.address()); + } else { + println!("{}", "No signer, tx send/inject require --private-key".yellow()); + } + println!("Type 'help' for available commands, 'exit' to quit."); + println!(); + if let Err(e) = self.cmd_status().await { + println!("{}: {}", "Warning: could not fetch initial status".yellow(), e); + } + let _ = enable_raw_mode(); + + let mut input_buffer = String::new(); + let mut stdout = std::io::stdout(); + let mut head_poll_tick = tokio::time::interval(Duration::from_secs(2)); + head_poll_tick.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + let mut input_tick = tokio::time::interval(Duration::from_millis(50)); + input_tick.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + + print!("{}", self.get_prompt()); + let _ = stdout.flush(); + + while self.running { + tokio::select! { + biased; + + // Head-block polling (only when events are enabled). + _ = head_poll_tick.tick(), if self.events_enabled => { + if let Ok(number) = self.provider.get_block_number().await { + if number > self.last_seen_block { + for n in (self.last_seen_block + 1)..=number { + let id = BlockId::Number(BlockNumberOrTag::Number(n)); + if let Ok(Some(block)) = self.provider.get_block(id).await { + let msg = format!( + "[new block] #{} hash={:.12}... txs={}", + block.header.number, + format!("{:?}", block.header.hash), + block.transactions.len(), + ); + print!("\r\x1b[K{}\r\n{}{}", msg.cyan(), self.get_prompt(), input_buffer); + let _ = stdout.flush(); + } + } + self.last_seen_block = number; + } + } + } + + // Check for keyboard input (non-blocking) + _ = input_tick.tick() => { + match super::terminal::poll_keyboard(&mut input_buffer, &self.get_prompt())? { + super::terminal::InputAction::Command(line) => { + let _ = disable_raw_mode(); + if let Err(e) = self.execute_command(&line).await { + println!("{}: {}", "Error".red(), e); + } + let _ = enable_raw_mode(); + if self.running { + print!("{}", self.get_prompt()); + let _ = stdout.flush(); + } + } + super::terminal::InputAction::Quit => self.running = false, + super::terminal::InputAction::None => {} + } + } + } + } + + print!("Goodbye!\r\n"); + Ok(()) + } + + /// Dispatch a parsed command. + async fn execute_command(&mut self, input: &str) -> eyre::Result<()> { + let cmd = Command::parse(input); + match cmd { + Command::Status => self.cmd_status().await, + Command::SyncStatus => self.cmd_sync_status().await, + Command::Block(arg) => self.cmd_block(arg).await, + Command::Blocks { from, to } => self.cmd_blocks(from, to).await, + Command::Fcs => self.cmd_fcs().await, + Command::L1(l1_cmd) => self.cmd_l1(l1_cmd).await, + Command::Tx(tx_cmd) => self.cmd_tx(tx_cmd).await, + Command::Peers(peers_cmd) => self.cmd_peers(peers_cmd).await, + Command::Events(events_cmd) => self.cmd_events(events_cmd), + Command::Admin(admin_cmd) => self.cmd_admin(admin_cmd).await, + Command::Rpc { method, params } => self.cmd_rpc(&method, params.as_deref()).await, + Command::Logs => self.cmd_logs(), + Command::Help => { + print_help(); + Ok(()) + } + Command::Exit => { + self.running = false; + Ok(()) + } + // Spawn-mode-only commands — give informative errors + Command::Build => { + println!( + "{}", + "build is only available in spawn mode (--chain / --sequencer).".yellow() + ); + Ok(()) + } + Command::Wallet(_) => { + println!( + "{}", + "wallet gen is only available in spawn mode. Use --private-key to set a signer." + .yellow() + ); + Ok(()) + } + Command::Run(_) => { + println!("{}", "run actions are only available in spawn mode.".yellow()); + Ok(()) + } + Command::Node(_) | Command::Nodes => { + println!( + "{}", + "node switching is only available in spawn mode (multiple nodes).".yellow() + ); + Ok(()) + } + Command::Db => { + println!("{}", "db path is only available in spawn mode.".yellow()); + Ok(()) + } + Command::Unknown(s) => { + if !s.is_empty() { + println!("Unknown command: {}. Type 'help' for available commands.", s); + } + Ok(()) + } + } + } + + // ------------------------------------------------------------------------- + // Helper + // ------------------------------------------------------------------------- + + /// Call a custom-namespace JSON-RPC method and deserialize the response. + /// + /// Uses `raw_request_dyn` (no trait bounds on P/R) combined with `serde_json` for + /// maximum compatibility regardless of the provider's network/transport generics. + async fn raw( + &self, + method: &'static str, + params: impl serde::Serialize, + ) -> eyre::Result { + crate::debug_toolkit::shared::rpc::raw_typed(&self.provider, method, params).await + } + + // ------------------------------------------------------------------------- + // Command implementations + // ------------------------------------------------------------------------- + + /// `status` — show node status via `rollupNode_status`. + async fn cmd_status(&self) -> eyre::Result<()> { + let status: ChainOrchestratorStatus = self.raw("rollupNode_status", ()).await?; + + println!("{}", "=== Node Status ===".bold()); + println!("{}", "Node:".underline()); + println!(" RPC: {}", self.url.as_str()); + if let Some(signer) = &self.signer { + println!(" From: {:?}", signer.address()); + } + crate::debug_toolkit::shared::status::print_status_overview(&status); + + Ok(()) + } + + /// `sync-status` — detailed sync status. + async fn cmd_sync_status(&self) -> eyre::Result<()> { + let status: ChainOrchestratorStatus = self.raw("rollupNode_status", ()).await?; + crate::debug_toolkit::shared::status::print_sync_status(&status); + Ok(()) + } + + /// `fcs` — show forkchoice state. + async fn cmd_fcs(&self) -> eyre::Result<()> { + let status: ChainOrchestratorStatus = self.raw("rollupNode_status", ()).await?; + crate::debug_toolkit::shared::status::print_forkchoice(&status); + Ok(()) + } + + /// `block [n|latest]` — show block details. + async fn cmd_block(&self, arg: BlockArg) -> eyre::Result<()> { + let tag = match arg { + BlockArg::Latest => BlockNumberOrTag::Latest, + BlockArg::Number(n) => BlockNumberOrTag::Number(n), + }; + + let block: Option = + self.raw("eth_getBlockByNumber", (tag, false)).await?; + let block = block.ok_or_else(|| eyre::eyre!("Block not found"))?; + + let number = block["number"].as_str().unwrap_or("?"); + let hash = block["hash"].as_str().unwrap_or("?"); + let parent = block["parentHash"].as_str().unwrap_or("?"); + let timestamp = block["timestamp"].as_str().unwrap_or("?"); + let gas_used = block["gasUsed"].as_str().unwrap_or("?"); + let gas_limit = block["gasLimit"].as_str().unwrap_or("?"); + let txs = block["transactions"].as_array(); + + println!("{}", format!("Block {}", number).bold()); + println!(" Hash: {}", hash); + println!(" Parent: {}", parent); + println!(" Timestamp: {}", timestamp); + println!(" Gas Used: {}", gas_used); + println!(" Gas Limit: {}", gas_limit); + + if let Some(txs) = txs { + println!(" Txs: {}", txs.len()); + for (i, tx) in txs.iter().enumerate() { + let tx_hash = tx.as_str().or_else(|| tx["hash"].as_str()).unwrap_or("?"); + println!(" [{}] hash={}", i, tx_hash); + } + } + + Ok(()) + } + + /// `blocks ` — list blocks in a range. + async fn cmd_blocks(&self, from: u64, to: u64) -> eyre::Result<()> { + println!("{}", format!("Blocks {} to {}:", from, to).bold()); + for n in from..=to { + let tag = BlockNumberOrTag::Number(n); + let block: Option = + self.raw("eth_getBlockByNumber", (tag, false)).await?; + if let Some(block) = block { + let hash = block["hash"].as_str().unwrap_or("?"); + let gas = block["gasUsed"].as_str().unwrap_or("?"); + let tx_count = block["transactions"].as_array().map(|a| a.len()).unwrap_or(0); + println!(" #{}: {} txs, gas: {}, hash: {:.12}...", n, tx_count, gas, hash); + } else { + println!(" #{}: {}", n, "not found".dimmed()); + } + } + Ok(()) + } + + /// `l1 status` / `l1 messages` — L1-related queries. + async fn cmd_l1(&self, cmd: L1Command) -> eyre::Result<()> { + match cmd { + L1Command::Status => { + let status: ChainOrchestratorStatus = self.raw("rollupNode_status", ()).await?; + println!("{}", "L1 Status:".bold()); + println!( + " Synced: {}", + if status.l1.status.is_synced() { "true".green() } else { "false".red() } + ); + println!(" L1 Head: #{}", status.l1.latest); + println!(" L1 Final: #{}", status.l1.finalized); + println!(" Processed: #{}", status.l1.processed); + } + L1Command::Messages => { + let msg: Option = + self.raw("rollupNode_getL1MessageByIndex", [0u64]).await?; + println!("{}", "L1 Message Queue (index 0):".bold()); + match msg { + Some(m) => println!("{}", serde_json::to_string_pretty(&m)?), + None => println!(" {}", "No message at index 0".dimmed()), + } + println!( + "{}", + "Hint: use 'rpc rollupNode_getL1MessageByIndex []' for specific indices" + .dimmed() + ); + } + L1Command::Sync | L1Command::Block(_) | L1Command::Reorg(_) => { + println!( + "{}", + "l1 sync/block/reorg are only available in spawn mode (mock L1).".yellow() + ); + } + L1Command::Send { .. } => { + println!( + "{}", + "l1 send is only available in spawn mode. Use cast or a wallet to bridge." + .yellow() + ); + } + } + Ok(()) + } + + /// `tx pending` / `tx send` / `tx inject`. + async fn cmd_tx(&self, cmd: TxCommand) -> eyre::Result<()> { + match cmd { + TxCommand::Pending => { + let result: serde_json::Value = self.raw("txpool_content", ()).await?; + println!("{}", "Pending Transactions:".bold()); + println!("{}", serde_json::to_string_pretty(&result)?); + } + TxCommand::Send { to, value, from: _ } => { + let signer = self.signer.as_ref().ok_or_else(|| { + eyre::eyre!("No signer configured. Start with --private-key .") + })?; + let from_address = signer.address(); + + let chain_id: serde_json::Value = self.raw("eth_chainId", ()).await?; + let chain_id: u64 = u64::from_str_radix( + chain_id.as_str().unwrap_or("0x1").trim_start_matches("0x"), + 16, + ) + .unwrap_or(1); + + let nonce_val: serde_json::Value = + self.raw("eth_getTransactionCount", (from_address, "latest")).await?; + let nonce: u64 = u64::from_str_radix( + nonce_val.as_str().unwrap_or("0x0").trim_start_matches("0x"), + 16, + ) + .unwrap_or(0); + + let latest: serde_json::Value = + self.raw("eth_getBlockByNumber", ("latest", false)).await?; + let base_fee = latest["baseFeePerGas"] + .as_str() + .and_then(|s| u64::from_str_radix(s.trim_start_matches("0x"), 16).ok()) + .unwrap_or(1_000_000_000); + let base_fee_u128 = base_fee as u128; + // Keep priority tip conservative on low-fee chains and always satisfy: + // max_fee_per_gas >= max_priority_fee_per_gas. + let max_priority_fee_per_gas = (base_fee_u128 / 2).max(1); + let max_fee_per_gas = (base_fee_u128 * 2).max(max_priority_fee_per_gas); + let gas_limit = match self + .raw::( + "eth_estimateGas", + [serde_json::json!({ + "from": format!("{:#x}", from_address), + "to": format!("{:#x}", to), + "value": format!("0x{value:x}"), + })], + ) + .await + { + Ok(v) => v + .as_str() + .and_then(|s| u64::from_str_radix(s.trim_start_matches("0x"), 16).ok()) + // Add a small safety buffer on top of estimate. + .map(|g| g.saturating_mul(12) / 10) + .filter(|g| *g > 0) + .unwrap_or(21_000), + Err(e) => { + println!( + "{}", + format!( + "Warning: eth_estimateGas failed ({}), falling back to 21000", + e + ) + .yellow() + ); + 21_000 + } + }; + + let mut tx = TxEip1559 { + chain_id, + nonce, + gas_limit, + max_fee_per_gas, + max_priority_fee_per_gas, + to: TxKind::Call(to), + value, + access_list: Default::default(), + input: Default::default(), + }; + + let signature = signer.sign_transaction_sync(&mut tx)?; + let signed = tx.into_signed(signature); + let raw_tx = alloy_primitives::hex::encode_prefixed(signed.encoded_2718()); + + let tx_hash: serde_json::Value = + self.raw("eth_sendRawTransaction", [raw_tx]).await?; + let tx_hash_str = + tx_hash.as_str().map(ToOwned::to_owned).unwrap_or_else(|| tx_hash.to_string()); + crate::debug_toolkit::shared::output::print_tx_sent( + &tx_hash_str, + &format!("{:?}", from_address), + &format!("{:?}", to), + value, + false, + ); + } + TxCommand::Inject(bytes) => { + let hex = alloy_primitives::hex::encode_prefixed(&bytes); + let tx_hash: serde_json::Value = self.raw("eth_sendRawTransaction", [hex]).await?; + let tx_hash_str = + tx_hash.as_str().map(ToOwned::to_owned).unwrap_or_else(|| tx_hash.to_string()); + crate::debug_toolkit::shared::output::print_tx_injected(&tx_hash_str); + } + } + Ok(()) + } + + /// `peers` / `peers connect`. + async fn cmd_peers(&self, cmd: PeersCommand) -> eyre::Result<()> { + match cmd { + PeersCommand::List => { + let peers: serde_json::Value = self.raw("admin_peers", ()).await?; + println!("{}", "Connected Peers:".bold()); + println!("{}", serde_json::to_string_pretty(&peers)?); + + println!(); + let node_info: serde_json::Value = self.raw("admin_nodeInfo", ()).await?; + println!("{}", "Local Node Info:".bold()); + println!("{}", serde_json::to_string_pretty(&node_info)?); + } + PeersCommand::Connect(enode_url) => { + let result: bool = self.raw("admin_addPeer", [enode_url.as_str()]).await?; + if result { + println!("{}", format!("Peer add request sent: {}", enode_url).green()); + } else { + println!("{}", "admin_addPeer returned false".yellow()); + } + } + } + Ok(()) + } + + /// `events on` / `events off` — toggle head-block polling. + fn cmd_events(&mut self, cmd: EventsCommand) -> eyre::Result<()> { + match cmd { + EventsCommand::On => { + self.events_enabled = true; + println!("{}", "Head-block polling enabled (2s interval)".green()); + } + EventsCommand::Off => { + self.events_enabled = false; + println!("{}", "Head-block polling disabled".yellow()); + } + EventsCommand::Filter(_) | EventsCommand::History(_) => { + println!("{}", "events filter/history are only available in spawn mode.".yellow()); + } + } + Ok(()) + } + + /// `admin enable-seq` / `admin disable-seq` / `admin revert `. + async fn cmd_admin(&self, cmd: AdminCommand) -> eyre::Result<()> { + match cmd { + AdminCommand::EnableSequencing => { + let result: bool = + self.raw("rollupNodeAdmin_enableAutomaticSequencing", ()).await?; + crate::debug_toolkit::shared::output::print_admin_enable_result(result); + } + AdminCommand::DisableSequencing => { + let result: bool = + self.raw("rollupNodeAdmin_disableAutomaticSequencing", ()).await?; + crate::debug_toolkit::shared::output::print_admin_disable_result(result); + } + AdminCommand::RevertToL1Block(block_number) => { + crate::debug_toolkit::shared::output::print_admin_revert_start(block_number); + let result: bool = + self.raw("rollupNodeAdmin_revertToL1Block", [block_number]).await?; + crate::debug_toolkit::shared::output::print_admin_revert_result( + block_number, + result, + ); + } + } + Ok(()) + } + + /// `rpc [params]` — call any JSON-RPC method and pretty-print the result. + async fn cmd_rpc(&self, method: &str, params: Option<&str>) -> eyre::Result<()> { + let pretty = + crate::debug_toolkit::shared::rpc::raw_value(&self.provider, method, params).await?; + crate::debug_toolkit::shared::output::print_pretty_json(&pretty) + } + + /// `logs` — show log file path. + fn cmd_logs(&self) -> eyre::Result<()> { + crate::debug_toolkit::shared::output::print_log_file(&self.log_path); + Ok(()) + } +}