diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml
index 837d47e9f8..abc93f85c2 100644
--- a/.github/workflows/book.yml
+++ b/.github/workflows/book.yml
@@ -7,115 +7,50 @@ on:
branches: [main]
pull_request:
branches: [main]
+ types: [opened, reopened, synchronize, closed]
merge_group:
+# Add concurrency to prevent conflicts when multiple PR previews are being deployed
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: ${{ github.event_name == 'pull_request' }}
+
jobs:
- test:
- runs-on: ubuntu-latest
- name: test
- timeout-minutes: 60
-
- steps:
- - uses: actions/checkout@v4
-
- - name: Install mdbook
- run: |
- mkdir mdbook
- curl -sSL https://github.com/rust-lang/mdBook/releases/download/v0.4.14/mdbook-v0.4.14-x86_64-unknown-linux-gnu.tar.gz | tar -xz --directory=./mdbook
- echo $(pwd)/mdbook >> $GITHUB_PATH
-
- - name: Install mdbook-template
- run: |
- mkdir mdbook-template
- curl -sSL https://github.com/sgoudham/mdbook-template/releases/latest/download/mdbook-template-x86_64-unknown-linux-gnu.tar.gz | tar -xz --directory=./mdbook-template
- echo $(pwd)/mdbook-template >> $GITHUB_PATH
-
- - name: Run tests
- run: mdbook test
-
- lint:
- runs-on: ubuntu-latest
- name: lint
- timeout-minutes: 60
-
- steps:
- - uses: actions/checkout@v4
-
- - name: Install mdbook-linkcheck
- run: |
- mkdir mdbook-linkcheck
- curl -sSL -o mdbook-linkcheck.zip https://github.com/Michael-F-Bryan/mdbook-linkcheck/releases/latest/download/mdbook-linkcheck.x86_64-unknown-linux-gnu.zip
- unzip mdbook-linkcheck.zip -d ./mdbook-linkcheck
- chmod +x $(pwd)/mdbook-linkcheck/mdbook-linkcheck
- echo $(pwd)/mdbook-linkcheck >> $GITHUB_PATH
-
- - name: Run linkcheck
- run: mdbook-linkcheck --standalone
-
build:
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- - uses: actions/checkout@v4
- - uses: rui314/setup-mold@v1
- - uses: dtolnay/rust-toolchain@nightly
- - name: Install mdbook
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Install bun
+ uses: oven-sh/setup-bun@v2
+
+ - name: Install Playwright browsers
+ # Required for rehype-mermaid to render Mermaid diagrams during build
run: |
- mkdir mdbook
- curl -sSL https://github.com/rust-lang/mdBook/releases/download/v0.4.14/mdbook-v0.4.14-x86_64-unknown-linux-gnu.tar.gz | tar -xz --directory=./mdbook
- echo $(pwd)/mdbook >> $GITHUB_PATH
+ cd book/vocs/
+ bun i
+ npx playwright install --with-deps chromium
- - name: Install mdbook-template
+ - name: Build Vocs
run: |
- mkdir mdbook-template
- curl -sSL https://github.com/sgoudham/mdbook-template/releases/latest/download/mdbook-template-x86_64-unknown-linux-gnu.tar.gz | tar -xz --directory=./mdbook-template
- echo $(pwd)/mdbook-template >> $GITHUB_PATH
+ cd book/vocs/ && bun run build
+ echo "Vocs Build Complete"
- - uses: Swatinem/rust-cache@v2
- with:
- cache-on-failure: true
-
- - name: Build book
- run: mdbook build
-
- - name: Build docs
- run: cargo docs --exclude "example-*"
- env:
- # Keep in sync with ./ci.yml:jobs.docs
- RUSTDOCFLAGS: --cfg docsrs --show-type-layout --generate-link-to-definition --enable-index-page -Zunstable-options
-
- - name: Move docs to book folder
- run: |
- mv target/doc target/book/docs
-
- - name: Archive artifact
- shell: sh
- run: |
- chmod -c -R +rX "target/book" |
- while read line; do
- echo "::warning title=Invalid file permissions automatically fixed::$line"
- done
- tar \
- --dereference --hard-dereference \
- --directory "target/book" \
- -cvf "$RUNNER_TEMP/artifact.tar" \
- --exclude=.git \
- --exclude=.github \
- .
+ - name: Setup Pages
+ uses: actions/configure-pages@v5
- name: Upload artifact
- uses: actions/upload-artifact@v4
+ uses: actions/upload-pages-artifact@v3
with:
- name: github-pages
- path: ${{ runner.temp }}/artifact.tar
- retention-days: 1
- if-no-files-found: error
+ path: "./book/vocs/docs/dist"
deploy:
# Only deploy if a push to main
if: github.ref_name == 'main' && github.event_name == 'push'
runs-on: ubuntu-latest
- needs: [test, lint, build]
+ needs: [build]
# Grant GITHUB_TOKEN the permissions required to make a Pages deployment
permissions:
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index ffa9f8edc3..7a167da8b1 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -20,9 +20,6 @@ jobs:
- type: ethereum
args: --workspace --lib --examples --tests --benches --locked
features: "ethereum asm-keccak jemalloc jemalloc-prof min-error-logs min-warn-logs min-info-logs min-debug-logs min-trace-logs"
- - type: book
- args: --manifest-path book/sources/Cargo.toml --workspace --bins
- features: ""
steps:
- uses: actions/checkout@v4
- uses: rui314/setup-mold@v1
@@ -158,8 +155,6 @@ jobs:
components: rustfmt
- name: Run fmt
run: cargo fmt --all --check
- - name: Run fmt on book sources
- run: cargo fmt --manifest-path book/sources/Cargo.toml --all --check
udeps:
name: udeps
diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml
index 767a3e5c0a..a46bf5bc3c 100644
--- a/.github/workflows/unit.yml
+++ b/.github/workflows/unit.yml
@@ -42,10 +42,6 @@ jobs:
args: --features "asm-keccak" --locked --exclude reth --exclude reth-bench --exclude "example-*" --exclude "reth-ethereum-*" --exclude "*-ethereum"
partition: 2
total_partitions: 2
- - type: book
- args: --manifest-path book/sources/Cargo.toml
- partition: 1
- total_partitions: 1
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
diff --git a/.gitignore b/.gitignore
index 1072d75dfa..e4ca0420ba 100644
--- a/.gitignore
+++ b/.gitignore
@@ -54,5 +54,8 @@ rustc-ice-*
# Book sources should be able to build with the latest version
book/sources/Cargo.lock
+# vocs node_modules
+book/vocs/node_modules
+
# Cargo chef recipe file
recipe.json
diff --git a/book/SUMMARY.md b/book/SUMMARY.md
deleted file mode 100644
index 310eebb028..0000000000
--- a/book/SUMMARY.md
+++ /dev/null
@@ -1,84 +0,0 @@
-# Reth Book
-
-- [Introduction](./intro.md)
-- [Installation](./installation/installation.md)
- - [Pre-Built Binaries](./installation/binaries.md)
- - [Docker](./installation/docker.md)
- - [Build from Source](./installation/source.md)
- - [Build for ARM devices](./installation/build-for-arm-devices.md)
- - [Update Priorities](./installation/priorities.md)
-- [Run a Node](./run/run-a-node.md)
- - [Mainnet or official testnets](./run/mainnet.md)
- - [OP Stack](./run/optimism.md)
- - [Run an OP Mainnet Node](./run/sync-op-mainnet.md)
- - [Private testnet](./run/private-testnet.md)
- - [Metrics](./run/observability.md)
- - [Configuring Reth](./run/config.md)
- - [Transaction types](./run/transactions.md)
- - [Pruning & Full Node](./run/pruning.md)
- - [Ports](./run/ports.md)
- - [Troubleshooting](./run/troubleshooting.md)
-- [Interacting with Reth over JSON-RPC](./jsonrpc/intro.md)
- - [eth](./jsonrpc/eth.md)
- - [web3](./jsonrpc/web3.md)
- - [net](./jsonrpc/net.md)
- - [txpool](./jsonrpc/txpool.md)
- - [debug](./jsonrpc/debug.md)
- - [trace](./jsonrpc/trace.md)
- - [admin](./jsonrpc/admin.md)
- - [rpc](./jsonrpc/rpc.md)
-- [CLI Reference](./cli/cli.md)
- - [`reth`](./cli/reth.md)
- - [`reth node`](./cli/reth/node.md)
- - [`reth init`](./cli/reth/init.md)
- - [`reth init-state`](./cli/reth/init-state.md)
- - [`reth import`](./cli/reth/import.md)
- - [`reth import-era`](./cli/reth/import-era.md)
- - [`reth dump-genesis`](./cli/reth/dump-genesis.md)
- - [`reth db`](./cli/reth/db.md)
- - [`reth db stats`](./cli/reth/db/stats.md)
- - [`reth db list`](./cli/reth/db/list.md)
- - [`reth db checksum`](./cli/reth/db/checksum.md)
- - [`reth db diff`](./cli/reth/db/diff.md)
- - [`reth db get`](./cli/reth/db/get.md)
- - [`reth db get mdbx`](./cli/reth/db/get/mdbx.md)
- - [`reth db get static-file`](./cli/reth/db/get/static-file.md)
- - [`reth db drop`](./cli/reth/db/drop.md)
- - [`reth db clear`](./cli/reth/db/clear.md)
- - [`reth db clear mdbx`](./cli/reth/db/clear/mdbx.md)
- - [`reth db clear static-file`](./cli/reth/db/clear/static-file.md)
- - [`reth db version`](./cli/reth/db/version.md)
- - [`reth db path`](./cli/reth/db/path.md)
- - [`reth download`](./cli/reth/download.md)
- - [`reth stage`](./cli/reth/stage.md)
- - [`reth stage run`](./cli/reth/stage/run.md)
- - [`reth stage drop`](./cli/reth/stage/drop.md)
- - [`reth stage dump`](./cli/reth/stage/dump.md)
- - [`reth stage dump execution`](./cli/reth/stage/dump/execution.md)
- - [`reth stage dump storage-hashing`](./cli/reth/stage/dump/storage-hashing.md)
- - [`reth stage dump account-hashing`](./cli/reth/stage/dump/account-hashing.md)
- - [`reth stage dump merkle`](./cli/reth/stage/dump/merkle.md)
- - [`reth stage unwind`](./cli/reth/stage/unwind.md)
- - [`reth stage unwind to-block`](./cli/reth/stage/unwind/to-block.md)
- - [`reth stage unwind num-blocks`](./cli/reth/stage/unwind/num-blocks.md)
- - [`reth p2p`](./cli/reth/p2p.md)
- - [`reth p2p header`](./cli/reth/p2p/header.md)
- - [`reth p2p body`](./cli/reth/p2p/body.md)
- - [`reth p2p rlpx`](./cli/reth/p2p/rlpx.md)
- - [`reth p2p rlpx ping`](./cli/reth/p2p/rlpx/ping.md)
- - [`reth config`](./cli/reth/config.md)
- - [`reth debug`](./cli/reth/debug.md)
- - [`reth debug execution`](./cli/reth/debug/execution.md)
- - [`reth debug merkle`](./cli/reth/debug/merkle.md)
- - [`reth debug in-memory-merkle`](./cli/reth/debug/in-memory-merkle.md)
- - [`reth debug build-block`](./cli/reth/debug/build-block.md)
- - [`reth recover`](./cli/reth/recover.md)
- - [`reth recover storage-tries`](./cli/reth/recover/storage-tries.md)
- - [`reth prune`](./cli/reth/prune.md)
-- [Developers](./developers/developers.md)
- - [Execution Extensions](./developers/exex/exex.md)
- - [How do ExExes work?](./developers/exex/how-it-works.md)
- - [Hello World](./developers/exex/hello-world.md)
- - [Tracking State](./developers/exex/tracking-state.md)
- - [Remote](./developers/exex/remote.md)
- - [Contribute](./developers/contribute.md)
diff --git a/book/cli/SUMMARY.md b/book/cli/SUMMARY.md
deleted file mode 100644
index aa62529859..0000000000
--- a/book/cli/SUMMARY.md
+++ /dev/null
@@ -1,47 +0,0 @@
-- [`reth`](./reth.md)
- - [`reth node`](./reth/node.md)
- - [`reth init`](./reth/init.md)
- - [`reth init-state`](./reth/init-state.md)
- - [`reth import`](./reth/import.md)
- - [`reth import-era`](./reth/import-era.md)
- - [`reth dump-genesis`](./reth/dump-genesis.md)
- - [`reth db`](./reth/db.md)
- - [`reth db stats`](./reth/db/stats.md)
- - [`reth db list`](./reth/db/list.md)
- - [`reth db checksum`](./reth/db/checksum.md)
- - [`reth db diff`](./reth/db/diff.md)
- - [`reth db get`](./reth/db/get.md)
- - [`reth db get mdbx`](./reth/db/get/mdbx.md)
- - [`reth db get static-file`](./reth/db/get/static-file.md)
- - [`reth db drop`](./reth/db/drop.md)
- - [`reth db clear`](./reth/db/clear.md)
- - [`reth db clear mdbx`](./reth/db/clear/mdbx.md)
- - [`reth db clear static-file`](./reth/db/clear/static-file.md)
- - [`reth db version`](./reth/db/version.md)
- - [`reth db path`](./reth/db/path.md)
- - [`reth download`](./reth/download.md)
- - [`reth stage`](./reth/stage.md)
- - [`reth stage run`](./reth/stage/run.md)
- - [`reth stage drop`](./reth/stage/drop.md)
- - [`reth stage dump`](./reth/stage/dump.md)
- - [`reth stage dump execution`](./reth/stage/dump/execution.md)
- - [`reth stage dump storage-hashing`](./reth/stage/dump/storage-hashing.md)
- - [`reth stage dump account-hashing`](./reth/stage/dump/account-hashing.md)
- - [`reth stage dump merkle`](./reth/stage/dump/merkle.md)
- - [`reth stage unwind`](./reth/stage/unwind.md)
- - [`reth stage unwind to-block`](./reth/stage/unwind/to-block.md)
- - [`reth stage unwind num-blocks`](./reth/stage/unwind/num-blocks.md)
- - [`reth p2p`](./reth/p2p.md)
- - [`reth p2p header`](./reth/p2p/header.md)
- - [`reth p2p body`](./reth/p2p/body.md)
- - [`reth p2p rlpx`](./reth/p2p/rlpx.md)
- - [`reth p2p rlpx ping`](./reth/p2p/rlpx/ping.md)
- - [`reth config`](./reth/config.md)
- - [`reth debug`](./reth/debug.md)
- - [`reth debug execution`](./reth/debug/execution.md)
- - [`reth debug merkle`](./reth/debug/merkle.md)
- - [`reth debug in-memory-merkle`](./reth/debug/in-memory-merkle.md)
- - [`reth debug build-block`](./reth/debug/build-block.md)
- - [`reth recover`](./reth/recover.md)
- - [`reth recover storage-tries`](./reth/recover/storage-tries.md)
- - [`reth prune`](./reth/prune.md)
diff --git a/book/cli/help.rs b/book/cli/help.rs
index 963f53deb0..e97d0bbfc4 100755
--- a/book/cli/help.rs
+++ b/book/cli/help.rs
@@ -10,25 +10,28 @@ regex = "1"
---
use clap::Parser;
use regex::Regex;
-use std::borrow::Cow;
-use std::fs::{self, File};
-use std::io::{self, Write};
-use std::iter::once;
-use std::path::{Path, PathBuf};
-use std::process::{Command, Stdio};
-use std::str;
-use std::sync::LazyLock;
-use std::{fmt, process};
+use std::{
+ borrow::Cow,
+ fmt,
+ fs::{self, File},
+ io::{self, Write},
+ iter::once,
+ path::{Path, PathBuf},
+ process,
+ process::{Command, Stdio},
+ str,
+ sync::LazyLock,
+};
-const SECTION_START: &str = "";
-const SECTION_END: &str = "";
-const README: &str = r#"# CLI Reference
+const SECTION_START: &str = "{/* CLI_REFERENCE START */}";
+const SECTION_END: &str = "{/* CLI_REFERENCE END */";
+const README: &str = r#"import Summary from './SUMMARY.mdx';
-
+# CLI Reference
Automatically-generated CLI reference from `--help` output.
-{{#include ./SUMMARY.md}}
+
"#;
const TRIM_LINE_END_MARKDOWN: bool = true;
@@ -49,7 +52,7 @@ struct Args {
#[arg(long, default_value_t = String::from("."))]
root_dir: String,
- /// Indentation for the root SUMMARY.md file
+ /// Indentation for the root SUMMARY.mdx file
#[arg(long, default_value_t = 2)]
root_indentation: usize,
@@ -61,7 +64,7 @@ struct Args {
#[arg(long)]
readme: bool,
- /// Whether to update the root SUMMARY.md file
+ /// Whether to update the root SUMMARY.mdx file
#[arg(long)]
root_summary: bool,
@@ -76,11 +79,7 @@ struct Args {
fn write_file(file_path: &Path, content: &str) -> io::Result<()> {
let content = if TRIM_LINE_END_MARKDOWN {
- content
- .lines()
- .map(|line| line.trim_end())
- .collect::>()
- .join("\n")
+ content.lines().map(|line| line.trim_end()).collect::>().join("\n")
} else {
content.to_string()
};
@@ -106,25 +105,13 @@ fn main() -> io::Result<()> {
while let Some(cmd) = todo_iter.pop() {
let (new_subcmds, stdout) = get_entry(&cmd)?;
if args.verbose && !new_subcmds.is_empty() {
- println!(
- "Found subcommands for \"{}\": {:?}",
- cmd.command_name(),
- new_subcmds
- );
+ println!("Found subcommands for \"{}\": {:?}", cmd.command_name(), new_subcmds);
}
// Add new subcommands to todo_iter (so that they are processed in the correct order).
for subcmd in new_subcmds.into_iter().rev() {
- let new_subcmds: Vec<_> = cmd
- .subcommands
- .iter()
- .cloned()
- .chain(once(subcmd))
- .collect();
+ let new_subcmds: Vec<_> = cmd.subcommands.iter().cloned().chain(once(subcmd)).collect();
- todo_iter.push(Cmd {
- cmd: cmd.cmd,
- subcommands: new_subcmds,
- });
+ todo_iter.push(Cmd { cmd: cmd.cmd, subcommands: new_subcmds });
}
output.push((cmd, stdout));
}
@@ -134,25 +121,25 @@ fn main() -> io::Result<()> {
cmd_markdown(&out_dir, cmd, stdout)?;
}
- // Generate SUMMARY.md.
+ // Generate SUMMARY.mdx.
let summary: String = output
.iter()
.map(|(cmd, _)| cmd_summary(None, cmd, 0))
.chain(once("\n".to_string()))
.collect();
- write_file(&out_dir.clone().join("SUMMARY.md"), &summary)?;
+ write_file(&out_dir.clone().join("SUMMARY.mdx"), &summary)?;
// Generate README.md.
if args.readme {
- let path = &out_dir.join("README.md");
+ let path = &out_dir.join("README.mdx");
if args.verbose {
- println!("Writing README.md to \"{}\"", path.to_string_lossy());
+ println!("Writing README.mdx to \"{}\"", path.to_string_lossy());
}
write_file(path, README)?;
}
- // Generate root SUMMARY.md.
+ // Generate root SUMMARY.mdx.
if args.root_summary {
let root_summary: String = output
.iter()
@@ -166,7 +153,8 @@ fn main() -> io::Result<()> {
if args.verbose {
println!("Updating root summary in \"{}\"", path.to_string_lossy());
}
- update_root_summary(path, &root_summary)?;
+ // TODO: This is where we update the cli reference sidebar.ts
+ // update_root_summary(path, &root_summary)?;
}
Ok(())
@@ -213,8 +201,7 @@ fn parse_sub_commands(s: &str) -> Vec {
.lines()
.take_while(|line| !line.starts_with("Options:") && !line.starts_with("Arguments:"))
.filter_map(|line| {
- re.captures(line)
- .and_then(|cap| cap.get(1).map(|m| m.as_str().to_string()))
+ re.captures(line).and_then(|cap| cap.get(1).map(|m| m.as_str().to_string()))
})
.filter(|cmd| cmd != "help")
.map(String::from)
@@ -229,7 +216,7 @@ fn cmd_markdown(out_dir: &Path, cmd: &Cmd, stdout: &str) -> io::Result<()> {
let out_path = out_dir.join(cmd.to_string().replace(" ", "/"));
fs::create_dir_all(out_path.parent().unwrap())?;
- write_file(&out_path.with_extension("md"), &out)?;
+ write_file(&out_path.with_extension("mdx"), &out)?;
Ok(())
}
@@ -265,12 +252,12 @@ fn cmd_summary(md_root: Option, cmd: &Cmd, indent: usize) -> String {
Some(md_root) => format!("{}/{}", md_root.to_string_lossy(), cmd_path),
};
let indent_string = " ".repeat(indent + (cmd.subcommands.len() * 2));
- format!("{}- [`{}`](./{}.md)\n", indent_string, cmd_s, full_cmd_path)
+ format!("{}- [`{}`](/cli/{})\n", indent_string, cmd_s, full_cmd_path)
}
-/// Replaces the CLI_REFERENCE section in the root SUMMARY.md file.
+/// Replaces the CLI_REFERENCE section in the root SUMMARY.mdx file.
fn update_root_summary(root_dir: &Path, root_summary: &str) -> io::Result<()> {
- let summary_file = root_dir.join("SUMMARY.md");
+ let summary_file = root_dir.join("SUMMARY.mdx");
let original_summary_content = fs::read_to_string(&summary_file)?;
let section_re = regex!(&format!(r"(?s)\s*{SECTION_START}.*?{SECTION_END}"));
@@ -293,9 +280,8 @@ fn update_root_summary(root_dir: &Path, root_summary: &str) -> io::Result<()> {
let root_summary_s = root_summary.trim_end().replace("\n\n", "\n");
let replace_with = format!(" {}\n{}\n{}", SECTION_START, root_summary_s, last_line);
- let new_root_summary = section_re
- .replace(&original_summary_content, replace_with.as_str())
- .to_string();
+ let new_root_summary =
+ section_re.replace(&original_summary_content, replace_with.as_str()).to_string();
let mut root_summary_file = File::create(&summary_file)?;
root_summary_file.write_all(new_root_summary.as_bytes())
@@ -349,17 +335,11 @@ struct Cmd<'a> {
impl<'a> Cmd<'a> {
fn command_name(&self) -> &str {
- self.cmd
- .file_name()
- .and_then(|os_str| os_str.to_str())
- .expect("Expect valid command")
+ self.cmd.file_name().and_then(|os_str| os_str.to_str()).expect("Expect valid command")
}
fn new(cmd: &'a PathBuf) -> Self {
- Self {
- cmd,
- subcommands: Vec::new(),
- }
+ Self { cmd, subcommands: Vec::new() }
}
}
diff --git a/book/cli/reth/debug/replay-engine.md b/book/cli/reth/debug/replay-engine.md
deleted file mode 100644
index da36f11cc0..0000000000
--- a/book/cli/reth/debug/replay-engine.md
+++ /dev/null
@@ -1,332 +0,0 @@
-# reth debug replay-engine
-
-Debug engine API by replaying stored messages
-
-```bash
-$ reth debug replay-engine --help
-```
-```txt
-Usage: reth debug replay-engine [OPTIONS] --engine-api-store
-
-Options:
- --instance
- Add a new instance of a node.
-
- Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine.
-
- Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other.
-
- Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2
-
- [default: 1]
-
- -h, --help
- Print help (see a summary with '-h')
-
-Datadir:
- --datadir
- The path to the data dir for all reth files and subdirectories.
-
- Defaults to the OS-specific data directory:
-
- - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/`
- - Windows: `{FOLDERID_RoamingAppData}/reth/`
- - macOS: `$HOME/Library/Application Support/reth/`
-
- [default: default]
-
- --datadir.static-files
- The absolute path to store static files in.
-
- --config
- The path to the configuration file to use
-
- --chain
- The chain this node is running.
- Possible values are either a built-in chain or the path to a chain specification file.
-
- Built-in chains:
- mainnet, sepolia, holesky, dev
-
- [default: mainnet]
-
-Database:
- --db.log-level
- Database logging level. Levels higher than "notice" require a debug build
-
- Possible values:
- - fatal: Enables logging for critical conditions, i.e. assertion failures
- - error: Enables logging for error conditions
- - warn: Enables logging for warning conditions
- - notice: Enables logging for normal but significant condition
- - verbose: Enables logging for verbose informational
- - debug: Enables logging for debug-level messages
- - trace: Enables logging for trace debug-level messages
- - extra: Enables logging for extra debug-level messages
-
- --db.exclusive
- Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume
-
- [possible values: true, false]
-
- --db.max-size
- Maximum database size (e.g., 4TB, 8MB)
-
- --db.growth-step
- Database growth step (e.g., 4GB, 4KB)
-
- --db.read-transaction-timeout
- Read transaction timeout in seconds, 0 means no timeout
-
-Networking:
- -d, --disable-discovery
- Disable the discovery service
-
- --disable-dns-discovery
- Disable the DNS discovery
-
- --disable-discv4-discovery
- Disable Discv4 discovery
-
- --enable-discv5-discovery
- Enable Discv5 discovery
-
- --disable-nat
- Disable Nat discovery
-
- --discovery.addr
- The UDP address to use for devp2p peer discovery version 4
-
- [default: 0.0.0.0]
-
- --discovery.port
- The UDP port to use for devp2p peer discovery version 4
-
- [default: 30303]
-
- --discovery.v5.addr
- The UDP IPv4 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv4
-
- --discovery.v5.addr.ipv6
- The UDP IPv6 address to use for devp2p peer discovery version 5. Overwritten by `RLPx` address, if it's also IPv6
-
- --discovery.v5.port
- The UDP IPv4 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv4, or `--discovery.v5.addr` is set
-
- [default: 9200]
-
- --discovery.v5.port.ipv6
- The UDP IPv6 port to use for devp2p peer discovery version 5. Not used unless `--addr` is IPv6, or `--discovery.addr.ipv6` is set
-
- [default: 9200]
-
- --discovery.v5.lookup-interval
- The interval in seconds at which to carry out periodic lookup queries, for the whole run of the program
-
- [default: 20]
-
- --discovery.v5.bootstrap.lookup-interval
- The interval in seconds at which to carry out boost lookup queries, for a fixed number of times, at bootstrap
-
- [default: 5]
-
- --discovery.v5.bootstrap.lookup-countdown
- The number of times to carry out boost lookup queries at bootstrap
-
- [default: 200]
-
- --trusted-peers
- Comma separated enode URLs of trusted peers for P2P connections.
-
- --trusted-peers enode://abcd@192.168.0.1:30303
-
- --trusted-only
- Connect to or accept from trusted peers only
-
- --bootnodes
- Comma separated enode URLs for P2P discovery bootstrap.
-
- Will fall back to a network-specific default if not specified.
-
- --dns-retries
- Amount of DNS resolution requests retries to perform when peering
-
- [default: 0]
-
- --peers-file
- The path to the known peers file. Connected peers are dumped to this file on nodes
- shutdown, and read on startup. Cannot be used with `--no-persist-peers`.
-
- --identity
- Custom node identity
-
- [default: reth/-/]
-
- --p2p-secret-key
- Secret key to use for this node.
-
- This will also deterministically set the peer ID. If not specified, it will be set in the data dir for the chain being used.
-
- --no-persist-peers
- Do not persist peers.
-
- --nat
- NAT resolution method (any|none|upnp|publicip|extip:\)
-
- [default: any]
-
- --addr
- Network listening address
-
- [default: 0.0.0.0]
-
- --port
- Network listening port
-
- [default: 30303]
-
- --max-outbound-peers
- Maximum number of outbound requests. default: 100
-
- --max-inbound-peers
- Maximum number of inbound requests. default: 30
-
- --max-tx-reqs
- Max concurrent `GetPooledTransactions` requests.
-
- [default: 130]
-
- --max-tx-reqs-peer
- Max concurrent `GetPooledTransactions` requests per peer.
-
- [default: 1]
-
- --max-seen-tx-history
- Max number of seen transactions to remember per peer.
-
- Default is 320 transaction hashes.
-
- [default: 320]
-
- --max-pending-imports
- Max number of transactions to import concurrently.
-
- [default: 4096]
-
- --pooled-tx-response-soft-limit
- Experimental, for usage in research. Sets the max accumulated byte size of transactions
- to pack in one response.
- Spec'd at 2MiB.
-
- [default: 2097152]
-
- --pooled-tx-pack-soft-limit
- Experimental, for usage in research. Sets the max accumulated byte size of transactions to
- request in one request.
-
- Since `RLPx` protocol version 68, the byte size of a transaction is shared as metadata in a
- transaction announcement (see `RLPx` specs). This allows a node to request a specific size
- response.
-
- By default, nodes request only 128 KiB worth of transactions, but should a peer request
- more, up to 2 MiB, a node will answer with more than 128 KiB.
-
- Default is 128 KiB.
-
- [default: 131072]
-
- --max-tx-pending-fetch
- Max capacity of cache of hashes for transactions pending fetch.
-
- [default: 25600]
-
- --net-if.experimental
- Name of network interface used to communicate with peers.
-
- If flag is set, but no value is passed, the default interface for docker `eth0` is tried.
-
- --engine-api-store
- The path to read engine API messages from
-
- --interval
- The number of milliseconds between Engine API messages
-
- [default: 1000]
-
-Logging:
- --log.stdout.format
- The format to use for logs written to stdout
-
- [default: terminal]
-
- Possible values:
- - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging
- - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications
- - terminal: Represents terminal-friendly formatting for logs
-
- --log.stdout.filter
- The filter to use for logs written to stdout
-
- [default: ]
-
- --log.file.format
- The format to use for logs written to the log file
-
- [default: terminal]
-
- Possible values:
- - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging
- - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications
- - terminal: Represents terminal-friendly formatting for logs
-
- --log.file.filter
- The filter to use for logs written to the log file
-
- [default: debug]
-
- --log.file.directory
- The path to put log files in
-
- [default: /logs]
-
- --log.file.max-size
- The maximum size (in MB) of one log file
-
- [default: 200]
-
- --log.file.max-files
- The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
-
- [default: 5]
-
- --log.journald
- Write logs to journald
-
- --log.journald.filter
- The filter to use for logs written to journald
-
- [default: error]
-
- --color
- Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting
-
- [default: always]
-
- Possible values:
- - always: Colors on
- - auto: Colors on
- - never: Colors off
-
-Display:
- -v, --verbosity...
- Set the minimum log level.
-
- -v Errors
- -vv Warnings
- -vvv Info
- -vvvv Debug
- -vvvvv Traces (warning: very verbose!)
-
- -q, --quiet
- Silence all log output
-```
\ No newline at end of file
diff --git a/book/cli/reth/import-op.md b/book/cli/reth/import-op.md
deleted file mode 100644
index d2d81980ce..0000000000
--- a/book/cli/reth/import-op.md
+++ /dev/null
@@ -1,134 +0,0 @@
-# op-reth import
-
-This syncs RLP encoded blocks from a file. Supports import of OVM blocks
-from the Bedrock datadir. Requires blocks, up to same height as receipts
-file, to already be imported.
-
-```bash
-$ op-reth import-op --help
-Usage: op-reth import-op [OPTIONS]
-
-Options:
- --config
- The path to the configuration file to use.
-
- --datadir
- The path to the data dir for all reth files and subdirectories.
-
- Defaults to the OS-specific data directory:
-
- - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/`
- - Windows: `{FOLDERID_RoamingAppData}/reth/`
- - macOS: `$HOME/Library/Application Support/reth/`
-
- [default: default]
-
- --chunk-len
- Chunk byte length to read from file.
-
- [default: 1GB]
-
- -h, --help
- Print help (see a summary with '-h')
-
-Database:
- --db.log-level
- Database logging level. Levels higher than "notice" require a debug build
-
- Possible values:
- - fatal: Enables logging for critical conditions, i.e. assertion failures
- - error: Enables logging for error conditions
- - warn: Enables logging for warning conditions
- - notice: Enables logging for normal but significant condition
- - verbose: Enables logging for verbose informational
- - debug: Enables logging for debug-level messages
- - trace: Enables logging for trace debug-level messages
- - extra: Enables logging for extra debug-level messages
-
- --db.exclusive
- Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume
-
- [possible values: true, false]
-
-
- The path to a `.rlp` block file for import.
-
- The online sync pipeline stages (headers and bodies) are replaced by a file import. Skips block execution since blocks below Bedrock are built on OVM.
-
-Logging:
- --log.stdout.format
- The format to use for logs written to stdout
-
- [default: terminal]
-
- Possible values:
- - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging
- - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications
- - terminal: Represents terminal-friendly formatting for logs
-
- --log.stdout.filter
- The filter to use for logs written to stdout
-
- [default: ]
-
- --log.file.format
- The format to use for logs written to the log file
-
- [default: terminal]
-
- Possible values:
- - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging
- - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications
- - terminal: Represents terminal-friendly formatting for logs
-
- --log.file.filter
- The filter to use for logs written to the log file
-
- [default: debug]
-
- --log.file.directory
- The path to put log files in
-
- [default: /logs]
-
- --log.file.max-size
- The maximum size (in MB) of one log file
-
- [default: 200]
-
- --log.file.max-files
- The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
-
- [default: 5]
-
- --log.journald
- Write logs to journald
-
- --log.journald.filter
- The filter to use for logs written to journald
-
- [default: error]
-
- --color
- Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting
-
- [default: always]
-
- Possible values:
- - always: Colors on
- - auto: Colors on
- - never: Colors off
-
-Display:
- -v, --verbosity...
- Set the minimum log level.
-
- -v Errors
- -vv Warnings
- -vvv Info
- -vvvv Debug
- -vvvvv Traces (warning: very verbose!)
-
- -q, --quiet
- Silence all log output
-```
\ No newline at end of file
diff --git a/book/cli/reth/import-receipts-op.md b/book/cli/reth/import-receipts-op.md
deleted file mode 100644
index 0b7135e1d7..0000000000
--- a/book/cli/reth/import-receipts-op.md
+++ /dev/null
@@ -1,133 +0,0 @@
-# op-reth import-receipts-op
-
-This imports non-standard RLP encoded receipts from a file.
-The supported RLP encoding, is the non-standard encoding used
-for receipt export in .
-Supports import of OVM receipts from the Bedrock datadir.
-
-```bash
-$ op-reth import-receipts-op --help
-Usage: op-reth import-receipts-op [OPTIONS]
-
-Options:
- --datadir
- The path to the data dir for all reth files and subdirectories.
-
- Defaults to the OS-specific data directory:
-
- - Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/`
- - Windows: `{FOLDERID_RoamingAppData}/reth/`
- - macOS: `$HOME/Library/Application Support/reth/`
-
- [default: default]
-
- --chunk-len
- Chunk byte length to read from file.
-
- [default: 1GB]
-
- -h, --help
- Print help (see a summary with '-h')
-
-Database:
- --db.log-level
- Database logging level. Levels higher than "notice" require a debug build
-
- Possible values:
- - fatal: Enables logging for critical conditions, i.e. assertion failures
- - error: Enables logging for error conditions
- - warn: Enables logging for warning conditions
- - notice: Enables logging for normal but significant condition
- - verbose: Enables logging for verbose informational
- - debug: Enables logging for debug-level messages
- - trace: Enables logging for trace debug-level messages
- - extra: Enables logging for extra debug-level messages
-
- --db.exclusive
- Open environment in exclusive/monopolistic mode. Makes it possible to open a database on an NFS volume
-
- [possible values: true, false]
-
-
- The path to a receipts file for import. File must use `OpGethReceiptFileCodec` (used for
- exporting OP chain segment below Bedrock block via testinprod/op-geth).
-
-
-
-Logging:
- --log.stdout.format
- The format to use for logs written to stdout
-
- [default: terminal]
-
- Possible values:
- - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging
- - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications
- - terminal: Represents terminal-friendly formatting for logs
-
- --log.stdout.filter
- The filter to use for logs written to stdout
-
- [default: ]
-
- --log.file.format
- The format to use for logs written to the log file
-
- [default: terminal]
-
- Possible values:
- - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging
- - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications
- - terminal: Represents terminal-friendly formatting for logs
-
- --log.file.filter
- The filter to use for logs written to the log file
-
- [default: debug]
-
- --log.file.directory
- The path to put log files in
-
- [default: /logs]
-
- --log.file.max-size
- The maximum size (in MB) of one log file
-
- [default: 200]
-
- --log.file.max-files
- The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
-
- [default: 5]
-
- --log.journald
- Write logs to journald
-
- --log.journald.filter
- The filter to use for logs written to journald
-
- [default: error]
-
- --color
- Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting
-
- [default: always]
-
- Possible values:
- - always: Colors on
- - auto: Colors on
- - never: Colors off
-
-Display:
- -v, --verbosity...
- Set the minimum log level.
-
- -v Errors
- -vv Warnings
- -vvv Info
- -vvvv Debug
- -vvvvv Traces (warning: very verbose!)
-
- -q, --quiet
- Silence all log output
-```
\ No newline at end of file
diff --git a/book/cli/reth/test-vectors.md b/book/cli/reth/test-vectors.md
deleted file mode 100644
index 844c5ed845..0000000000
--- a/book/cli/reth/test-vectors.md
+++ /dev/null
@@ -1,113 +0,0 @@
-# reth test-vectors
-
-Generate Test Vectors
-
-```bash
-$ reth test-vectors --help
-Usage: reth test-vectors [OPTIONS]
-
-Commands:
- tables Generates test vectors for specified tables. If no table is specified, generate for all
- help Print this message or the help of the given subcommand(s)
-
-Options:
- --chain
- The chain this node is running.
- Possible values are either a built-in chain or the path to a chain specification file.
-
- Built-in chains:
- mainnet, sepolia, holesky, dev
-
- [default: mainnet]
-
- --instance
- Add a new instance of a node.
-
- Configures the ports of the node to avoid conflicts with the defaults. This is useful for running multiple nodes on the same machine.
-
- Max number of instances is 200. It is chosen in a way so that it's not possible to have port numbers that conflict with each other.
-
- Changes to the following port numbers: - `DISCOVERY_PORT`: default + `instance` - 1 - `AUTH_PORT`: default + `instance` * 100 - 100 - `HTTP_RPC_PORT`: default - `instance` + 1 - `WS_RPC_PORT`: default + `instance` * 2 - 2
-
- [default: 1]
-
- -h, --help
- Print help (see a summary with '-h')
-
-Logging:
- --log.stdout.format
- The format to use for logs written to stdout
-
- [default: terminal]
-
- Possible values:
- - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging
- - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications
- - terminal: Represents terminal-friendly formatting for logs
-
- --log.stdout.filter
- The filter to use for logs written to stdout
-
- [default: ]
-
- --log.file.format
- The format to use for logs written to the log file
-
- [default: terminal]
-
- Possible values:
- - json: Represents JSON formatting for logs. This format outputs log records as JSON objects, making it suitable for structured logging
- - log-fmt: Represents logfmt (key=value) formatting for logs. This format is concise and human-readable, typically used in command-line applications
- - terminal: Represents terminal-friendly formatting for logs
-
- --log.file.filter
- The filter to use for logs written to the log file
-
- [default: debug]
-
- --log.file.directory
- The path to put log files in
-
- [default: /logs]
-
- --log.file.max-size
- The maximum size (in MB) of one log file
-
- [default: 200]
-
- --log.file.max-files
- The maximum amount of log files that will be stored. If set to 0, background file logging is disabled
-
- [default: 5]
-
- --log.journald
- Write logs to journald
-
- --log.journald.filter
- The filter to use for logs written to journald
-
- [default: error]
-
- --color
- Sets whether or not the formatter emits ANSI terminal escape codes for colors and other text formatting
-
- [default: always]
-
- Possible values:
- - always: Colors on
- - auto: Colors on
- - never: Colors off
-
-Display:
- -v, --verbosity...
- Set the minimum log level.
-
- -v Errors
- -vv Warnings
- -vvv Info
- -vvvv Debug
- -vvvvv Traces (warning: very verbose!)
-
- -q, --quiet
- Silence all log output
-```
\ No newline at end of file
diff --git a/book/cli/update.sh b/book/cli/update.sh
index 6e792df0f2..01593bfb79 100755
--- a/book/cli/update.sh
+++ b/book/cli/update.sh
@@ -3,13 +3,18 @@ set -eo pipefail
BOOK_ROOT="$(dirname "$(dirname "$0")")"
RETH=${1:-"$(dirname "$BOOK_ROOT")/target/debug/reth"}
+VOCS_PAGES_ROOT="$BOOK_ROOT/vocs/docs/pages"
+echo "Generating CLI documentation for reth at $RETH"
+echo "Using book root: $BOOK_ROOT"
+echo "Using vocs pages root: $VOCS_PAGES_ROOT"
cmd=(
"$(dirname "$0")/help.rs"
--root-dir "$BOOK_ROOT/"
--root-indentation 2
--root-summary
- --out-dir "$BOOK_ROOT/cli/"
+ --verbose
+ --out-dir "$VOCS_PAGES_ROOT/cli/"
"$RETH"
)
echo "Running: $" "${cmd[*]}"
diff --git a/book/developers/contribute.md b/book/developers/contribute.md
deleted file mode 100644
index 74f00e69a1..0000000000
--- a/book/developers/contribute.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# Contribute
-
-
-
-Reth has docs specifically geared for developers and contributors, including documentation on the structure and architecture of reth, the general workflow we employ, and other useful tips.
-
-You can find these docs [here](https://github.com/paradigmxyz/reth/tree/main/docs).
-
-Check out our contributing guidelines [here](https://github.com/paradigmxyz/reth/blob/main/CONTRIBUTING.md).
diff --git a/book/developers/developers.md b/book/developers/developers.md
deleted file mode 100644
index 9d8c5a9c67..0000000000
--- a/book/developers/developers.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Developers
-
-Reth is composed of several crates that can be used in standalone projects. If you are interested in using one or more of the crates, you can get an overview of them in the [developer docs](https://github.com/paradigmxyz/reth/tree/main/docs), or take a look at the [crate docs](https://paradigmxyz.github.io/reth/docs).
diff --git a/book/installation/priorities.md b/book/installation/priorities.md
deleted file mode 100644
index f7444e79d6..0000000000
--- a/book/installation/priorities.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# Update Priorities
-
-When publishing releases, reth will include an "Update Priority" section in the release notes, in the same manner Lighthouse does.
-
-The "Update Priority" section will include a table which may appear like so:
-
-| User Class | Priority |
-|----------------------|-----------------|
-| Payload Builders | Medium Priority |
-| Non-Payload Builders | Low Priority |
-
-To understand this table, the following terms are important:
-
-- *Payload builders* are those who use reth to build and validate payloads.
-- *Non-payload builders* are those who run reth for other purposes (e.g., data analysis, RPC or applications).
-- *High priority* updates should be completed as soon as possible (e.g., hours or days).
-- *Medium priority* updates should be completed at the next convenience (e.g., days or a week).
-- *Low priority* updates should be completed in the next routine update cycle (e.g., two weeks).
diff --git a/book/run/ports.md b/book/run/ports.md
deleted file mode 100644
index 5239a5262c..0000000000
--- a/book/run/ports.md
+++ /dev/null
@@ -1,38 +0,0 @@
-# Ports
-
-This section provides essential information about the ports used by the system, their primary purposes, and recommendations for exposure settings.
-
-## Peering Ports
-
-- **Port:** 30303
-- **Protocol:** TCP and UDP
-- **Purpose:** Peering with other nodes for synchronization of blockchain data. Nodes communicate through this port to maintain network consensus and share updated information.
-- **Exposure Recommendation:** This port should be exposed to enable seamless interaction and synchronization with other nodes in the network.
-
-## Metrics Port
-
-- **Port:** 9001
-- **Protocol:** TCP
-- **Purpose:** This port is designated for serving metrics related to the system's performance and operation. It allows internal monitoring and data collection for analysis.
-- **Exposure Recommendation:** By default, this port should not be exposed to the public. It is intended for internal monitoring and analysis purposes.
-
-## HTTP RPC Port
-
-- **Port:** 8545
-- **Protocol:** TCP
-- **Purpose:** Port 8545 provides an HTTP-based Remote Procedure Call (RPC) interface. It enables external applications to interact with the blockchain by sending requests over HTTP.
-- **Exposure Recommendation:** Similar to the metrics port, exposing this port to the public is not recommended by default due to security considerations.
-
-## WS RPC Port
-
-- **Port:** 8546
-- **Protocol:** TCP
-- **Purpose:** Port 8546 offers a WebSocket-based Remote Procedure Call (RPC) interface. It allows real-time communication between external applications and the blockchain.
-- **Exposure Recommendation:** As with the HTTP RPC port, the WS RPC port should not be exposed by default for security reasons.
-
-## Engine API Port
-
-- **Port:** 8551
-- **Protocol:** TCP
-- **Purpose:** Port 8551 facilitates communication between specific components, such as "reth" and "CL" (assuming their definitions are understood within the context of the system). It enables essential internal processes.
-- **Exposure Recommendation:** This port is not meant to be exposed to the public by default. It should be reserved for internal communication between vital components of the system.
diff --git a/book/run/run-a-node.md b/book/run/run-a-node.md
deleted file mode 100644
index d8981e1552..0000000000
--- a/book/run/run-a-node.md
+++ /dev/null
@@ -1,15 +0,0 @@
-# Run a Node
-
-Congratulations, now that you have installed Reth, it's time to run it!
-
-In this chapter we'll go through a few different topics you'll encounter when running Reth, including:
-1. [Running on mainnet or official testnets](./mainnet.md)
-1. [Running on OP Stack chains](./optimism.md)
-1. [Logs and Observability](./observability.md)
-1. [Configuring reth.toml](./config.md)
-1. [Transaction types](./transactions.md)
-1. [Pruning & Full Node](./pruning.md)
-1. [Ports](./ports.md)
-1. [Troubleshooting](./troubleshooting.md)
-
-In the future, we also intend to support the [OP Stack](https://docs.optimism.io/get-started/superchain), which will allow you to run Reth as a Layer 2 client. More there soon!
diff --git a/book/templates/source_and_github.md b/book/templates/source_and_github.md
deleted file mode 100644
index c4abbaa389..0000000000
--- a/book/templates/source_and_github.md
+++ /dev/null
@@ -1,4 +0,0 @@
-[File: [[ #path ]]](https://github.com/paradigmxyz/reth/blob/main/[[ #path ]])
-```rust,no_run,noplayground
-{{#include [[ #path_to_root ]][[ #path ]]:[[ #anchor ]]}}
-```
\ No newline at end of file
diff --git a/book/theme/head.hbs b/book/theme/head.hbs
deleted file mode 100644
index 37667d80f6..0000000000
--- a/book/theme/head.hbs
+++ /dev/null
@@ -1,5 +0,0 @@
-
-
-
-
-
diff --git a/book/vocs/CLAUDE.md b/book/vocs/CLAUDE.md
new file mode 100644
index 0000000000..98b57a5791
--- /dev/null
+++ b/book/vocs/CLAUDE.md
@@ -0,0 +1,103 @@
+# CLAUDE.md
+
+This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
+
+## Project Overview
+
+This is the **Reth documentation website** built with [Vocs](https://vocs.dev), a modern documentation framework. The site contains comprehensive documentation for Reth, the Ethereum execution client, including installation guides, CLI references, SDK documentation, and tutorials.
+
+## Repository Structure
+
+- **`docs/pages/`**: All documentation content in MDX format
+ - `cli/`: Command-line interface documentation and references
+ - `exex/`: Execution Extensions (ExEx) guides and examples
+ - `installation/`: Installation and setup guides
+ - `introduction/`: Introduction, benchmarks, and why-reth content
+ - `jsonrpc/`: JSON-RPC API documentation
+ - `run/`: Node running guides and configuration
+ - `sdk/`: SDK documentation and examples
+- **`docs/snippets/`**: Code examples and snippets used in documentation
+- **`sidebar.ts`**: Navigation configuration
+- **`vocs.config.ts`**: Vocs configuration file
+
+## Essential Commands
+
+```bash
+# Install dependencies
+bun install
+
+# Start development server
+bun run dev
+
+# Build for production
+bun run build
+
+# Preview production build
+bun run preview
+```
+
+## Development Workflow
+
+### Content Organization
+
+1. **MDX Files**: All content is written in MDX (Markdown + React components)
+2. **Navigation**: Update `sidebar.ts` when adding new pages
+3. **Code Examples**: Place reusable code snippets in `docs/snippets/`
+4. **Assets**: Place images and static assets in `docs/public/`
+
+### Adding New Documentation
+
+1. Create new `.mdx` files in appropriate subdirectories under `docs/pages/`
+2. Update `sidebar.ts` to include new pages in navigation
+3. Use consistent heading structure and markdown formatting
+4. Reference code examples from `docs/snippets/` when possible
+
+### Code Examples and Snippets
+
+- **Live Examples**: Use the snippets system to include actual runnable code
+- **Rust Code**: Include cargo project examples in `docs/snippets/sources/`
+- **CLI Examples**: Show actual command usage with expected outputs
+
+### Configuration
+
+- **Base Path**: Site deploys to `/reth` path (configured in `vocs.config.ts`)
+- **Theme**: Custom accent colors for light/dark themes
+- **Vite**: Uses Vite as the underlying build tool
+
+### Content Guidelines
+
+1. **Be Practical**: Focus on actionable guides and real-world examples
+2. **Code First**: Show working code examples before explaining concepts
+3. **Consistent Structure**: Follow existing page structures for consistency
+4. **Cross-References**: Link between related pages and sections
+5. **Keep Current**: Ensure documentation matches latest Reth features
+
+### File Naming Conventions
+
+- Use kebab-case for file and directory names
+- Match URL structure to file structure
+- Use descriptive names that reflect content purpose
+
+### Common Tasks
+
+**Adding a new CLI command documentation:**
+1. Create `.mdx` file in `docs/pages/cli/reth/`
+2. Add to sidebar navigation
+3. Include usage examples and parameter descriptions
+
+**Adding a new guide:**
+1. Create `.mdx` file in appropriate category
+2. Update sidebar with new entry
+3. Include practical examples and next steps
+
+**Updating code examples:**
+1. Modify files in `docs/snippets/sources/`
+2. Ensure examples compile and run correctly
+3. Test that documentation references work properly
+
+## Development Notes
+
+- This is a TypeScript/React project using Vocs framework
+- Content is primarily MDX with some TypeScript configuration
+- Focus on clear, practical documentation that helps users succeed with Reth
+- Maintain consistency with existing documentation style and structure
\ No newline at end of file
diff --git a/book/vocs/README.md b/book/vocs/README.md
new file mode 100644
index 0000000000..3bb11a44a0
--- /dev/null
+++ b/book/vocs/README.md
@@ -0,0 +1 @@
+This is a [Vocs](https://vocs.dev) project bootstrapped with the Vocs CLI.
diff --git a/book/vocs/bun.lockb b/book/vocs/bun.lockb
new file mode 100755
index 0000000000..0f15cd4dc6
Binary files /dev/null and b/book/vocs/bun.lockb differ
diff --git a/book/vocs/check-links.ts b/book/vocs/check-links.ts
new file mode 100644
index 0000000000..e6bf42c8cb
--- /dev/null
+++ b/book/vocs/check-links.ts
@@ -0,0 +1,316 @@
+#!/usr/bin/env bun
+import { Glob } from "bun";
+import { readFileSync } from "node:fs";
+import { join, dirname, resolve, relative } from "node:path";
+
+const CONFIG = {
+ DOCS_DIR: "./docs/pages",
+ PUBLIC_DIR: "./docs/public",
+ REPORT_PATH: "links-report.json",
+ FILE_PATTERNS: "**/*.{md,mdx}",
+ MARKDOWN_EXTENSIONS: /\.(md|mdx)$/,
+} as const;
+
+interface BrokenLink {
+ file: string;
+ link: string;
+ line: number;
+ reason: string;
+}
+
+interface LinkCheckReport {
+ timestamp: string;
+ totalFiles: number;
+ totalLinks: number;
+ brokenLinks: Array;
+ summary: {
+ brokenCount: number;
+ validCount: number;
+ };
+}
+
+main();
+
+async function main() {
+ try {
+ const report = await checkLinks();
+ await saveReport(report);
+ displayResults(report);
+
+ process.exit(report.summary.brokenCount > 0 ? 1 : 0);
+ } catch (error) {
+ console.error("\nβ Fatal error during link checking:");
+
+ if (error instanceof Error) {
+ console.error(` ${error.message}`);
+ if (error.stack) {
+ [console.error("\nStack trace:"), console.error(error.stack)];
+ }
+ } else console.error(error);
+
+ process.exit(2);
+ }
+}
+
+async function checkLinks(): Promise {
+ console.log("π Finding markdown files...");
+ const files = await getAllMarkdownFiles();
+ console.log(`π Found ${files.length} markdown files`);
+
+ console.log("π Finding public assets...");
+ const publicAssets = await getAllPublicAssets();
+ console.log(`πΌοΈ Found ${publicAssets.length} public assets`);
+
+ console.log("πΊοΈ Building file path map...");
+ const pathMap = buildFilePathMap(files, publicAssets);
+ console.log(`π Mapped ${pathMap.size} possible paths`);
+
+ const brokenLinks: BrokenLink[] = [];
+ let totalLinks = 0;
+
+ console.log("π Checking links in files...");
+
+ for (let index = 0; index < files.length; index++) {
+ const file = files[index];
+
+ try {
+ const content = readFileSync(file, "utf-8");
+ const links = extractLinksFromMarkdown(content);
+
+ for (const { link, line } of links) {
+ totalLinks++;
+ const error = validateLink(link, file, pathMap);
+
+ if (error) {
+ brokenLinks.push({
+ file: relative(process.cwd(), file),
+ link,
+ line,
+ reason: error,
+ });
+ }
+ }
+ } catch (error) {
+ console.error(`\nError reading ${file}:`, error);
+ }
+ }
+
+ console.log("\nβ Link checking complete!");
+
+ return {
+ timestamp: new Date().toISOString(),
+ totalFiles: files.length,
+ totalLinks,
+ brokenLinks,
+ summary: {
+ brokenCount: brokenLinks.length,
+ validCount: totalLinks - brokenLinks.length,
+ },
+ };
+}
+
+async function getAllMarkdownFiles(): Promise {
+ const glob = new Glob(CONFIG.FILE_PATTERNS);
+ const files = await Array.fromAsync(glob.scan({ cwd: CONFIG.DOCS_DIR }));
+ return files.map((file) => join(CONFIG.DOCS_DIR, file));
+}
+
+async function getAllPublicAssets(): Promise {
+ const glob = new Glob("**/*");
+ const files = await Array.fromAsync(glob.scan({ cwd: CONFIG.PUBLIC_DIR }));
+ return files;
+}
+
+function buildFilePathMap(
+ files: Array,
+ publicAssets: Array,
+): Set {
+ const pathMap = new Set();
+
+ const addPath = (path: string) => {
+ if (path && typeof path === "string") pathMap.add(path);
+ };
+
+ for (const file of files) {
+ const relativePath = relative(CONFIG.DOCS_DIR, file);
+
+ addPath(relativePath);
+
+ const withoutExt = relativePath.replace(CONFIG.MARKDOWN_EXTENSIONS, "");
+ addPath(withoutExt);
+
+ if (withoutExt.endsWith("/index"))
+ addPath(withoutExt.replace("/index", ""));
+
+ addPath(`/${withoutExt}`);
+ if (withoutExt.endsWith("/index"))
+ addPath(`/${withoutExt.replace("/index", "")}`);
+ }
+
+ for (const asset of publicAssets) addPath(`/${asset}`);
+
+ return pathMap;
+}
+
+function extractLinksFromMarkdown(
+ content: string,
+): Array<{ link: string; line: number }> {
+ const lines = content.split("\n");
+ const links: Array<{ link: string; line: number }> = [];
+ let inCodeBlock = false;
+
+ for (let lineIndex = 0; lineIndex < lines.length; lineIndex++) {
+ const line = lines[lineIndex];
+ const lineNumber = lineIndex + 1;
+
+ // Toggle code block state
+ if (line.trim().startsWith("```")) {
+ inCodeBlock = !inCodeBlock;
+ continue;
+ }
+
+ if (inCodeBlock) continue;
+
+ const processedLine = line
+ .split("`")
+ .filter((_, index) => index % 2 === 0)
+ .join("");
+
+ links.push(...extractMarkdownLinks(processedLine, lineNumber));
+ links.push(...extractHtmlLinks(processedLine, lineNumber));
+ }
+
+ return links;
+}
+
+function extractMarkdownLinks(
+ line: string,
+ lineNumber: number,
+): Array<{ link: string; line: number }> {
+ const regex = /\[([^\]]*)\]\(([^)]+)\)/g;
+ return [...line.matchAll(regex)]
+ .map(([, , url]) => ({ link: url, line: lineNumber }))
+ .filter(({ link }) => isInternalLink(link));
+}
+
+function extractHtmlLinks(
+ line: string,
+ lineNumber: number,
+): Array<{ link: string; line: number }> {
+ const regex = /]+href=["']([^"']+)["'][^>]*>/g;
+ return [...line.matchAll(regex)]
+ .map(([, url]) => ({ link: url, line: lineNumber }))
+ .filter(({ link }) => isInternalLink(link));
+}
+
+function isInternalLink(url: string): boolean {
+ return (
+ !url.startsWith("http") &&
+ !url.startsWith("mailto:") &&
+ !url.startsWith("#")
+ );
+}
+
+function validateLink(
+ link: string,
+ sourceFile: string,
+ pathMap: Set,
+): string | null {
+ const [linkPath] = link.split("#");
+ if (!linkPath) return null; // Pure anchor link
+
+ if (linkPath.startsWith("/")) return validateAbsolutePath(linkPath, pathMap);
+ return validateRelativePath(linkPath, sourceFile, pathMap);
+}
+
+function validateAbsolutePath(
+ linkPath: string,
+ pathMap: Set,
+): string | null {
+ const variations = [
+ linkPath,
+ linkPath.slice(1), // Remove leading slash
+ linkPath.replace(/\/$/, ""), // Remove trailing slash
+ linkPath
+ .slice(1)
+ .replace(/\/$/, ""), // Remove both
+ ];
+
+ return variations.some((path) => pathMap.has(path))
+ ? null
+ : `Absolute path not found: ${linkPath}`;
+}
+
+function validateRelativePath(
+ linkPath: string,
+ sourceFile: string,
+ pathMap: Set,
+): string | null {
+ const sourceDir = dirname(relative(CONFIG.DOCS_DIR, sourceFile));
+ const resolvedPath = resolve(sourceDir, linkPath);
+ const normalizedPath = relative(".", resolvedPath);
+
+ const variations = [
+ linkPath,
+ normalizedPath,
+ `/${normalizedPath}`,
+ normalizedPath.replace(CONFIG.MARKDOWN_EXTENSIONS, ""),
+ `/${normalizedPath.replace(CONFIG.MARKDOWN_EXTENSIONS, "")}`,
+ ];
+
+ return variations.some((path) => pathMap.has(path))
+ ? null
+ : `Relative path not found: ${linkPath} (resolved to: ${normalizedPath})`;
+}
+
+async function saveReport(report: LinkCheckReport) {
+ try {
+ await Bun.write(CONFIG.REPORT_PATH, JSON.stringify(report, null, 2));
+ console.log(`\nπ Report saved to: ${CONFIG.REPORT_PATH}`);
+ } catch (error) {
+ console.error(
+ `\nβ οΈ Warning: Failed to save report to ${CONFIG.REPORT_PATH}`,
+ );
+ console.error(error);
+ }
+}
+
+function displayResults(report: LinkCheckReport) {
+ LinkCheckReporter.printSummary(report);
+
+ if (report.brokenLinks.length > 0)
+ LinkCheckReporter.printBrokenLinks(report.brokenLinks);
+ else console.log("\nβ All links are valid!");
+}
+
+const LinkCheckReporter = {
+ printSummary: (report: LinkCheckReport) => {
+ console.log("\nπ Link Check Summary:");
+ console.log(` π Files checked: ${report.totalFiles}`);
+ console.log(` π Total links: ${report.totalLinks}`);
+ console.log(` β Valid links: ${report.summary.validCount}`);
+ console.log(` β Broken links: ${report.summary.brokenCount}`);
+ },
+ printBrokenLinks: (brokenLinks: Array) => {
+ if (brokenLinks.length === 0) return;
+
+ console.log("\nβ Broken Links Found:\n");
+
+ const byFile = brokenLinks.reduce(
+ (acc, broken) => {
+ if (!acc[broken.file]) acc[broken.file] = [];
+ acc[broken.file].push(broken);
+ return acc;
+ },
+ {} as Record,
+ );
+
+ for (const [file, links] of Object.entries(byFile)) {
+ console.log(`π ${file}:`);
+ for (const broken of links) {
+ console.log(` Line ${broken.line}: ${broken.link}`);
+ console.log(` ββ ${broken.reason}\n`);
+ }
+ }
+ },
+};
\ No newline at end of file
diff --git a/book/vocs/docs/components/SdkShowcase.tsx b/book/vocs/docs/components/SdkShowcase.tsx
new file mode 100644
index 0000000000..14a1f491b8
--- /dev/null
+++ b/book/vocs/docs/components/SdkShowcase.tsx
@@ -0,0 +1,88 @@
+import React from 'react'
+
+interface SdkProject {
+ name: string
+ description: string
+ loc: string
+ githubUrl: string
+ logoUrl?: string
+ company: string
+}
+
+const projects: SdkProject[] = [
+ {
+ name: 'Base Node',
+ description: "Coinbase's L2 scaling solution node implementation",
+ loc: '~3K',
+ githubUrl: 'https://github.com/base/node-reth',
+ company: 'Coinbase'
+ },
+ {
+ name: 'Bera Reth',
+ description: "Berachain's high-performance EVM node with custom features",
+ loc: '~1K',
+ githubUrl: 'https://github.com/berachain/bera-reth',
+ company: 'Berachain'
+ },
+ {
+ name: 'Reth Gnosis',
+ description: "Gnosis Chain's xDai-compatible execution client",
+ loc: '~5K',
+ githubUrl: 'https://github.com/gnosischain/reth_gnosis',
+ company: 'Gnosis'
+ },
+ {
+ name: 'Reth BSC',
+ description: "BNB Smart Chain execution client implementation",
+ loc: '~6K',
+ githubUrl: 'https://github.com/loocapro/reth-bsc',
+ company: 'LooCa Protocol'
+ }
+]
+
+export function SdkShowcase() {
+ return (
+
+ )
+}
\ No newline at end of file
diff --git a/book/vocs/docs/pages/cli/SUMMARY.mdx b/book/vocs/docs/pages/cli/SUMMARY.mdx
new file mode 100644
index 0000000000..330f32b3fd
--- /dev/null
+++ b/book/vocs/docs/pages/cli/SUMMARY.mdx
@@ -0,0 +1,47 @@
+- [`reth`](/cli/reth)
+ - [`reth node`](/cli/reth/node)
+ - [`reth init`](/cli/reth/init)
+ - [`reth init-state`](/cli/reth/init-state)
+ - [`reth import`](/cli/reth/import)
+ - [`reth import-era`](/cli/reth/import-era)
+ - [`reth dump-genesis`](/cli/reth/dump-genesis)
+ - [`reth db`](/cli/reth/db)
+ - [`reth db stats`](/cli/reth/db/stats)
+ - [`reth db list`](/cli/reth/db/list)
+ - [`reth db checksum`](/cli/reth/db/checksum)
+ - [`reth db diff`](/cli/reth/db/diff)
+ - [`reth db get`](/cli/reth/db/get)
+ - [`reth db get mdbx`](/cli/reth/db/get/mdbx)
+ - [`reth db get static-file`](/cli/reth/db/get/static-file)
+ - [`reth db drop`](/cli/reth/db/drop)
+ - [`reth db clear`](/cli/reth/db/clear)
+ - [`reth db clear mdbx`](/cli/reth/db/clear/mdbx)
+ - [`reth db clear static-file`](/cli/reth/db/clear/static-file)
+ - [`reth db version`](/cli/reth/db/version)
+ - [`reth db path`](/cli/reth/db/path)
+ - [`reth download`](/cli/reth/download)
+ - [`reth stage`](/cli/reth/stage)
+ - [`reth stage run`](/cli/reth/stage/run)
+ - [`reth stage drop`](/cli/reth/stage/drop)
+ - [`reth stage dump`](/cli/reth/stage/dump)
+ - [`reth stage dump execution`](/cli/reth/stage/dump/execution)
+ - [`reth stage dump storage-hashing`](/cli/reth/stage/dump/storage-hashing)
+ - [`reth stage dump account-hashing`](/cli/reth/stage/dump/account-hashing)
+ - [`reth stage dump merkle`](/cli/reth/stage/dump/merkle)
+ - [`reth stage unwind`](/cli/reth/stage/unwind)
+ - [`reth stage unwind to-block`](/cli/reth/stage/unwind/to-block)
+ - [`reth stage unwind num-blocks`](/cli/reth/stage/unwind/num-blocks)
+ - [`reth p2p`](/cli/reth/p2p)
+ - [`reth p2p header`](/cli/reth/p2p/header)
+ - [`reth p2p body`](/cli/reth/p2p/body)
+ - [`reth p2p rlpx`](/cli/reth/p2p/rlpx)
+ - [`reth p2p rlpx ping`](/cli/reth/p2p/rlpx/ping)
+ - [`reth config`](/cli/reth/config)
+ - [`reth debug`](/cli/reth/debug)
+ - [`reth debug execution`](/cli/reth/debug/execution)
+ - [`reth debug merkle`](/cli/reth/debug/merkle)
+ - [`reth debug in-memory-merkle`](/cli/reth/debug/in-memory-merkle)
+ - [`reth debug build-block`](/cli/reth/debug/build-block)
+ - [`reth recover`](/cli/reth/recover)
+ - [`reth recover storage-tries`](/cli/reth/recover/storage-tries)
+ - [`reth prune`](/cli/reth/prune)
diff --git a/book/cli/cli.md b/book/vocs/docs/pages/cli/cli.mdx
similarity index 83%
rename from book/cli/cli.md
rename to book/vocs/docs/pages/cli/cli.mdx
index ef1a98af52..20046ce9e7 100644
--- a/book/cli/cli.md
+++ b/book/vocs/docs/pages/cli/cli.mdx
@@ -1,7 +1,9 @@
+import Summary from './SUMMARY.mdx';
+
# CLI Reference
The Reth node is operated via the CLI by running the `reth node` command. To stop it, press `ctrl-c`. You may need to wait a bit as Reth tears down existing p2p connections or other cleanup tasks.
However, Reth has more commands:
-{{#include ./SUMMARY.md}}
+
diff --git a/book/cli/op-reth.md b/book/vocs/docs/pages/cli/op-reth.md
similarity index 100%
rename from book/cli/op-reth.md
rename to book/vocs/docs/pages/cli/op-reth.md
diff --git a/book/cli/reth.md b/book/vocs/docs/pages/cli/reth.mdx
similarity index 100%
rename from book/cli/reth.md
rename to book/vocs/docs/pages/cli/reth.mdx
diff --git a/book/cli/reth/config.md b/book/vocs/docs/pages/cli/reth/config.mdx
similarity index 100%
rename from book/cli/reth/config.md
rename to book/vocs/docs/pages/cli/reth/config.mdx
diff --git a/book/cli/reth/db.md b/book/vocs/docs/pages/cli/reth/db.mdx
similarity index 100%
rename from book/cli/reth/db.md
rename to book/vocs/docs/pages/cli/reth/db.mdx
diff --git a/book/cli/reth/db/checksum.md b/book/vocs/docs/pages/cli/reth/db/checksum.mdx
similarity index 100%
rename from book/cli/reth/db/checksum.md
rename to book/vocs/docs/pages/cli/reth/db/checksum.mdx
diff --git a/book/cli/reth/db/clear.md b/book/vocs/docs/pages/cli/reth/db/clear.mdx
similarity index 100%
rename from book/cli/reth/db/clear.md
rename to book/vocs/docs/pages/cli/reth/db/clear.mdx
diff --git a/book/cli/reth/db/clear/mdbx.md b/book/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx
similarity index 100%
rename from book/cli/reth/db/clear/mdbx.md
rename to book/vocs/docs/pages/cli/reth/db/clear/mdbx.mdx
diff --git a/book/cli/reth/db/clear/static-file.md b/book/vocs/docs/pages/cli/reth/db/clear/static-file.mdx
similarity index 100%
rename from book/cli/reth/db/clear/static-file.md
rename to book/vocs/docs/pages/cli/reth/db/clear/static-file.mdx
diff --git a/book/cli/reth/db/diff.md b/book/vocs/docs/pages/cli/reth/db/diff.mdx
similarity index 100%
rename from book/cli/reth/db/diff.md
rename to book/vocs/docs/pages/cli/reth/db/diff.mdx
diff --git a/book/cli/reth/db/drop.md b/book/vocs/docs/pages/cli/reth/db/drop.mdx
similarity index 100%
rename from book/cli/reth/db/drop.md
rename to book/vocs/docs/pages/cli/reth/db/drop.mdx
diff --git a/book/cli/reth/db/get.md b/book/vocs/docs/pages/cli/reth/db/get.mdx
similarity index 100%
rename from book/cli/reth/db/get.md
rename to book/vocs/docs/pages/cli/reth/db/get.mdx
diff --git a/book/cli/reth/db/get/mdbx.md b/book/vocs/docs/pages/cli/reth/db/get/mdbx.mdx
similarity index 100%
rename from book/cli/reth/db/get/mdbx.md
rename to book/vocs/docs/pages/cli/reth/db/get/mdbx.mdx
diff --git a/book/cli/reth/db/get/static-file.md b/book/vocs/docs/pages/cli/reth/db/get/static-file.mdx
similarity index 100%
rename from book/cli/reth/db/get/static-file.md
rename to book/vocs/docs/pages/cli/reth/db/get/static-file.mdx
diff --git a/book/cli/reth/db/list.md b/book/vocs/docs/pages/cli/reth/db/list.mdx
similarity index 100%
rename from book/cli/reth/db/list.md
rename to book/vocs/docs/pages/cli/reth/db/list.mdx
diff --git a/book/cli/reth/db/path.md b/book/vocs/docs/pages/cli/reth/db/path.mdx
similarity index 100%
rename from book/cli/reth/db/path.md
rename to book/vocs/docs/pages/cli/reth/db/path.mdx
diff --git a/book/cli/reth/db/stats.md b/book/vocs/docs/pages/cli/reth/db/stats.mdx
similarity index 100%
rename from book/cli/reth/db/stats.md
rename to book/vocs/docs/pages/cli/reth/db/stats.mdx
diff --git a/book/cli/reth/db/version.md b/book/vocs/docs/pages/cli/reth/db/version.mdx
similarity index 100%
rename from book/cli/reth/db/version.md
rename to book/vocs/docs/pages/cli/reth/db/version.mdx
diff --git a/book/cli/reth/debug.md b/book/vocs/docs/pages/cli/reth/debug.mdx
similarity index 100%
rename from book/cli/reth/debug.md
rename to book/vocs/docs/pages/cli/reth/debug.mdx
diff --git a/book/cli/reth/debug/build-block.md b/book/vocs/docs/pages/cli/reth/debug/build-block.mdx
similarity index 100%
rename from book/cli/reth/debug/build-block.md
rename to book/vocs/docs/pages/cli/reth/debug/build-block.mdx
diff --git a/book/cli/reth/debug/execution.md b/book/vocs/docs/pages/cli/reth/debug/execution.mdx
similarity index 100%
rename from book/cli/reth/debug/execution.md
rename to book/vocs/docs/pages/cli/reth/debug/execution.mdx
diff --git a/book/cli/reth/debug/in-memory-merkle.md b/book/vocs/docs/pages/cli/reth/debug/in-memory-merkle.mdx
similarity index 100%
rename from book/cli/reth/debug/in-memory-merkle.md
rename to book/vocs/docs/pages/cli/reth/debug/in-memory-merkle.mdx
diff --git a/book/cli/reth/debug/merkle.md b/book/vocs/docs/pages/cli/reth/debug/merkle.mdx
similarity index 100%
rename from book/cli/reth/debug/merkle.md
rename to book/vocs/docs/pages/cli/reth/debug/merkle.mdx
diff --git a/book/cli/reth/download.md b/book/vocs/docs/pages/cli/reth/download.mdx
similarity index 100%
rename from book/cli/reth/download.md
rename to book/vocs/docs/pages/cli/reth/download.mdx
diff --git a/book/cli/reth/dump-genesis.md b/book/vocs/docs/pages/cli/reth/dump-genesis.mdx
similarity index 100%
rename from book/cli/reth/dump-genesis.md
rename to book/vocs/docs/pages/cli/reth/dump-genesis.mdx
diff --git a/book/cli/reth/import-era.md b/book/vocs/docs/pages/cli/reth/import-era.mdx
similarity index 100%
rename from book/cli/reth/import-era.md
rename to book/vocs/docs/pages/cli/reth/import-era.mdx
diff --git a/book/cli/reth/import.md b/book/vocs/docs/pages/cli/reth/import.mdx
similarity index 100%
rename from book/cli/reth/import.md
rename to book/vocs/docs/pages/cli/reth/import.mdx
diff --git a/book/cli/reth/init-state.md b/book/vocs/docs/pages/cli/reth/init-state.mdx
similarity index 100%
rename from book/cli/reth/init-state.md
rename to book/vocs/docs/pages/cli/reth/init-state.mdx
diff --git a/book/cli/reth/init.md b/book/vocs/docs/pages/cli/reth/init.mdx
similarity index 100%
rename from book/cli/reth/init.md
rename to book/vocs/docs/pages/cli/reth/init.mdx
diff --git a/book/cli/reth/node.md b/book/vocs/docs/pages/cli/reth/node.mdx
similarity index 100%
rename from book/cli/reth/node.md
rename to book/vocs/docs/pages/cli/reth/node.mdx
diff --git a/book/cli/reth/p2p.md b/book/vocs/docs/pages/cli/reth/p2p.mdx
similarity index 100%
rename from book/cli/reth/p2p.md
rename to book/vocs/docs/pages/cli/reth/p2p.mdx
diff --git a/book/cli/reth/p2p/body.md b/book/vocs/docs/pages/cli/reth/p2p/body.mdx
similarity index 100%
rename from book/cli/reth/p2p/body.md
rename to book/vocs/docs/pages/cli/reth/p2p/body.mdx
diff --git a/book/cli/reth/p2p/header.md b/book/vocs/docs/pages/cli/reth/p2p/header.mdx
similarity index 100%
rename from book/cli/reth/p2p/header.md
rename to book/vocs/docs/pages/cli/reth/p2p/header.mdx
diff --git a/book/cli/reth/p2p/rlpx.md b/book/vocs/docs/pages/cli/reth/p2p/rlpx.mdx
similarity index 100%
rename from book/cli/reth/p2p/rlpx.md
rename to book/vocs/docs/pages/cli/reth/p2p/rlpx.mdx
diff --git a/book/cli/reth/p2p/rlpx/ping.md b/book/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx
similarity index 100%
rename from book/cli/reth/p2p/rlpx/ping.md
rename to book/vocs/docs/pages/cli/reth/p2p/rlpx/ping.mdx
diff --git a/book/cli/reth/prune.md b/book/vocs/docs/pages/cli/reth/prune.mdx
similarity index 100%
rename from book/cli/reth/prune.md
rename to book/vocs/docs/pages/cli/reth/prune.mdx
diff --git a/book/cli/reth/recover.md b/book/vocs/docs/pages/cli/reth/recover.mdx
similarity index 100%
rename from book/cli/reth/recover.md
rename to book/vocs/docs/pages/cli/reth/recover.mdx
diff --git a/book/cli/reth/recover/storage-tries.md b/book/vocs/docs/pages/cli/reth/recover/storage-tries.mdx
similarity index 100%
rename from book/cli/reth/recover/storage-tries.md
rename to book/vocs/docs/pages/cli/reth/recover/storage-tries.mdx
diff --git a/book/cli/reth/stage.md b/book/vocs/docs/pages/cli/reth/stage.mdx
similarity index 100%
rename from book/cli/reth/stage.md
rename to book/vocs/docs/pages/cli/reth/stage.mdx
diff --git a/book/cli/reth/stage/drop.md b/book/vocs/docs/pages/cli/reth/stage/drop.mdx
similarity index 100%
rename from book/cli/reth/stage/drop.md
rename to book/vocs/docs/pages/cli/reth/stage/drop.mdx
diff --git a/book/cli/reth/stage/dump.md b/book/vocs/docs/pages/cli/reth/stage/dump.mdx
similarity index 100%
rename from book/cli/reth/stage/dump.md
rename to book/vocs/docs/pages/cli/reth/stage/dump.mdx
diff --git a/book/cli/reth/stage/dump/account-hashing.md b/book/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx
similarity index 100%
rename from book/cli/reth/stage/dump/account-hashing.md
rename to book/vocs/docs/pages/cli/reth/stage/dump/account-hashing.mdx
diff --git a/book/cli/reth/stage/dump/execution.md b/book/vocs/docs/pages/cli/reth/stage/dump/execution.mdx
similarity index 100%
rename from book/cli/reth/stage/dump/execution.md
rename to book/vocs/docs/pages/cli/reth/stage/dump/execution.mdx
diff --git a/book/cli/reth/stage/dump/merkle.md b/book/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx
similarity index 100%
rename from book/cli/reth/stage/dump/merkle.md
rename to book/vocs/docs/pages/cli/reth/stage/dump/merkle.mdx
diff --git a/book/cli/reth/stage/dump/storage-hashing.md b/book/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx
similarity index 100%
rename from book/cli/reth/stage/dump/storage-hashing.md
rename to book/vocs/docs/pages/cli/reth/stage/dump/storage-hashing.mdx
diff --git a/book/cli/reth/stage/run.md b/book/vocs/docs/pages/cli/reth/stage/run.mdx
similarity index 100%
rename from book/cli/reth/stage/run.md
rename to book/vocs/docs/pages/cli/reth/stage/run.mdx
diff --git a/book/cli/reth/stage/unwind.md b/book/vocs/docs/pages/cli/reth/stage/unwind.mdx
similarity index 100%
rename from book/cli/reth/stage/unwind.md
rename to book/vocs/docs/pages/cli/reth/stage/unwind.mdx
diff --git a/book/cli/reth/stage/unwind/num-blocks.md b/book/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx
similarity index 100%
rename from book/cli/reth/stage/unwind/num-blocks.md
rename to book/vocs/docs/pages/cli/reth/stage/unwind/num-blocks.mdx
diff --git a/book/cli/reth/stage/unwind/to-block.md b/book/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx
similarity index 100%
rename from book/cli/reth/stage/unwind/to-block.md
rename to book/vocs/docs/pages/cli/reth/stage/unwind/to-block.mdx
diff --git a/book/cli/reth/test-vectors/tables.md b/book/vocs/docs/pages/cli/reth/test-vectors/tables.mdx
similarity index 100%
rename from book/cli/reth/test-vectors/tables.md
rename to book/vocs/docs/pages/cli/reth/test-vectors/tables.mdx
diff --git a/book/developers/exex/hello-world.md b/book/vocs/docs/pages/exex/hello-world.mdx
similarity index 70%
rename from book/developers/exex/hello-world.md
rename to book/vocs/docs/pages/exex/hello-world.mdx
index c1f3e5af94..547f6e4e31 100644
--- a/book/developers/exex/hello-world.md
+++ b/book/vocs/docs/pages/exex/hello-world.mdx
@@ -1,3 +1,7 @@
+---
+description: Example of a minimal Hello World ExEx in Reth.
+---
+
# Hello World
Let's write a simple "Hello World" ExEx that emits a log every time a new chain of blocks is committed, reverted, or reorged.
@@ -14,15 +18,15 @@ cd my-exex
And add Reth as a dependency in `Cargo.toml`
```toml
-{{#include ../../sources/exex/hello-world/Cargo.toml}}
+// [!include ~/snippets/sources/exex/hello-world/Cargo.toml]
```
### Default Reth node
Now, let's jump to our `main.rs` and start by initializing and launching a default Reth node
-```rust,norun,noplayground,ignore
-{{#include ../../sources/exex/hello-world/src/bin/1.rs}}
+```rust
+// [!include ~/snippets/sources/exex/hello-world/src/bin/1.rs]
```
You can already test that it works by running the binary and initializing the Holesky node in a custom datadir
@@ -42,8 +46,8 @@ $ cargo run -- init --chain holesky --datadir data
The simplest ExEx is just an async function that never returns. We need to install it into our node
-```rust,norun,noplayground,ignore
-{{#include ../../sources/exex/hello-world/src/bin/2.rs}}
+```rust
+// [!include ~/snippets/sources/exex/hello-world/src/bin/2.rs]
```
See that unused `_ctx`? That's the context that we'll use to listen to new notifications coming from the main node,
@@ -63,17 +67,17 @@ If you try running a node with an ExEx that exits, the node will exit as well.
Now, let's extend our simplest ExEx and start actually listening to new notifications, log them, and send events back to the main node
-```rust,norun,noplayground,ignore
-{{#include ../../sources/exex/hello-world/src/bin/3.rs}}
+```rust
+// [!include ~/snippets/sources/exex/hello-world/src/bin/3.rs]
```
Woah, there's a lot of new stuff here! Let's go through it step by step:
-- First, we've added a `while let Some(notification) = ctx.notifications.recv().await` loop that waits for new notifications to come in.
- - The main node is responsible for sending notifications to the ExEx, so we're waiting for them to come in.
-- Next, we've added a `match ¬ification { ... }` block that matches on the type of the notification.
- - In each case, we're logging the notification and the corresponding block range, be it a chain commit, revert, or reorg.
-- Finally, we're checking if the notification contains a committed chain, and if it does, we're sending a `ExExEvent::FinishedHeight` event back to the main node using the `ctx.events.send` method.
+- First, we've added a `while let Some(notification) = ctx.notifications.recv().await` loop that waits for new notifications to come in.
+ - The main node is responsible for sending notifications to the ExEx, so we're waiting for them to come in.
+- Next, we've added a `match ¬ification { ... }` block that matches on the type of the notification.
+ - In each case, we're logging the notification and the corresponding block range, be it a chain commit, revert, or reorg.
+- Finally, we're checking if the notification contains a committed chain, and if it does, we're sending a `ExExEvent::FinishedHeight` event back to the main node using the `ctx.events.send` method.
@@ -88,4 +92,4 @@ What we've arrived at is the [minimal ExEx example](https://github.com/paradigmx
## What's next?
-Let's do something a bit more interesting, and see how you can [keep track of some state](./tracking-state.md) inside your ExEx.
+Let's do something a bit more interesting, and see how you can [keep track of some state](./tracking-state) inside your ExEx.
diff --git a/book/developers/exex/how-it-works.md b/book/vocs/docs/pages/exex/how-it-works.mdx
similarity index 67%
rename from book/developers/exex/how-it-works.md
rename to book/vocs/docs/pages/exex/how-it-works.mdx
index 7f80d71cbf..21162a7562 100644
--- a/book/developers/exex/how-it-works.md
+++ b/book/vocs/docs/pages/exex/how-it-works.mdx
@@ -1,3 +1,7 @@
+---
+description: How Execution Extensions (ExExes) work in Reth.
+---
+
# How do ExExes work?
ExExes are just [Futures](https://doc.rust-lang.org/std/future/trait.Future.html) that run indefinitely alongside Reth
@@ -7,12 +11,13 @@ An ExEx is usually driven by and acts on new notifications about chain commits,
They are installed into the node by using the [node builder](https://reth.rs/docs/reth/builder/struct.NodeBuilder.html).
Reth manages the lifecycle of all ExExes, including:
-- Polling ExEx futures
-- Sending [notifications](https://reth.rs/docs/reth_exex/enum.ExExNotification.html) about new chain, reverts,
- and reorgs from historical and live sync
-- Processing [events](https://reth.rs/docs/reth_exex/enum.ExExEvent.html) emitted by ExExes
-- Pruning (in case of a full or pruned node) only the data that has been processed by all ExExes
-- Shutting ExExes down when the node is shut down
+
+- Polling ExEx futures
+- Sending [notifications](https://reth.rs/docs/reth_exex/enum.ExExNotification.html) about new chain, reverts,
+ and reorgs from historical and live sync
+- Processing [events](https://reth.rs/docs/reth_exex/enum.ExExEvent.html) emitted by ExExes
+- Pruning (in case of a full or pruned node) only the data that has been processed by all ExExes
+- Shutting ExExes down when the node is shut down
## Pruning
diff --git a/book/developers/exex/exex.md b/book/vocs/docs/pages/exex/overview.mdx
similarity index 62%
rename from book/developers/exex/exex.md
rename to book/vocs/docs/pages/exex/overview.mdx
index 25372a7c92..abfcc8f3b8 100644
--- a/book/developers/exex/exex.md
+++ b/book/vocs/docs/pages/exex/overview.mdx
@@ -1,9 +1,13 @@
+---
+description: Introduction to Execution Extensions (ExEx) in Reth.
+---
+
# Execution Extensions (ExEx)
## What are Execution Extensions?
Execution Extensions (or ExExes, for short) allow developers to build their own infrastructure that relies on Reth
-as a base for driving the chain (be it [Ethereum](../../run/mainnet.md) or [OP Stack](../../run/optimism.md)) forward.
+as a base for driving the chain (be it [Ethereum](/run/ethereum) or [OP Stack](/run/opstack)) forward.
An Execution Extension is a task that derives its state from changes in Reth's state.
Some examples of such state derivations are rollups, bridges, and indexers.
@@ -18,14 +22,18 @@ Read more about things you can build with Execution Extensions in the [Paradigm
Execution Extensions are not separate processes that connect to the main Reth node process.
Instead, ExExes are compiled into the same binary as Reth, and run alongside it, using shared memory for communication.
-If you want to build an Execution Extension that sends data into a separate process, check out the [Remote](./remote.md) chapter.
+If you want to build an Execution Extension that sends data into a separate process, check out the [Remote](/exex/remote) chapter.
## How do I build an Execution Extension?
Let's dive into how to build our own ExEx from scratch, add tests for it,
and run it on the Holesky testnet.
-1. [How do ExExes work?](./how-it-works.md)
-1. [Hello World](./hello-world.md)
-1. [Tracking State](./tracking-state.md)
-1. [Remote](./remote.md)
+1. [How do ExExes work?](/exex/how-it-works)
+1. [Hello World](/exex/hello-world)
+1. [Tracking State](/exex/tracking-state)
+1. [Remote](/exex/remote)
+
+:::tip
+For more practical examples and ready-to-use ExEx implementations, check out the [reth-exex-examples](https://github.com/paradigmxyz/reth-exex-examples) repository which contains various ExEx examples including indexers, bridges, and other state derivation patterns.
+:::
diff --git a/book/developers/exex/remote.md b/book/vocs/docs/pages/exex/remote.mdx
similarity index 76%
rename from book/developers/exex/remote.md
rename to book/vocs/docs/pages/exex/remote.mdx
index 0ec704308f..92da337208 100644
--- a/book/developers/exex/remote.md
+++ b/book/vocs/docs/pages/exex/remote.mdx
@@ -1,10 +1,15 @@
+---
+description: Building a remote ExEx that communicates via gRPC.
+---
+
# Remote Execution Extensions
In this chapter, we will learn how to create an ExEx that emits all notifications to an external process.
We will use [Tonic](https://github.com/hyperium/tonic) to create a gRPC server and a client.
-- The server binary will have the Reth client, our ExEx and the gRPC server.
-- The client binary will have the gRPC client that connects to the server.
+
+- The server binary will have the Reth client, our ExEx and the gRPC server.
+- The client binary will have the gRPC client that connects to the server.
## Prerequisites
@@ -21,20 +26,21 @@ $ cargo new --lib exex-remote
$ cd exex-remote
```
-We will also need a bunch of dependencies. Some of them you know from the [Hello World](./hello-world.md) chapter,
+We will also need a bunch of dependencies. Some of them you know from the [Hello World](./hello-world) chapter,
but some of specific to what we need now.
```toml
-{{#include ../../sources/exex/remote/Cargo.toml}}
+// [!include ~/snippets/sources/exex/remote/Cargo.toml]
```
We also added a build dependency for Tonic. We will use it to generate the Rust code for our
Protobuf definitions at compile time. Read more about using Tonic in the
-[introductory tutorial](https://github.com/hyperium/tonic/blob/6a213e9485965db0628591e30577ed81cdaeaf2b/examples/helloworld-tutorial.md).
+[introductory tutorial](https://github.com/hyperium/tonic/blob/6a213e9485965db0628591e30577ed81cdaeaf2b/examples/helloworld-tutorial).
Also, we now have two separate binaries:
-- `exex` is the server binary that will run the ExEx and the gRPC server.
-- `consumer` is the client binary that will connect to the server and receive notifications.
+
+- `exex` is the server binary that will run the ExEx and the gRPC server.
+- `consumer` is the client binary that will connect to the server and receive notifications.
### Create the Protobuf definitions
@@ -53,12 +59,13 @@ For an example of a full schema, see the [Remote ExEx](https://github.com/paradi
```protobuf
-{{#include ../../sources/exex/remote/proto/exex.proto}}
+// [!include ~/snippets/sources/exex/remote/proto/exex.proto]
```
To instruct Tonic to generate the Rust code using this `.proto`, add the following lines to your `lib.rs` file:
-```rust,norun,noplayground,ignore
-{{#include ../../sources/exex/remote/src/lib.rs}}
+
+```rust
+// [!include ~/snippets/sources/exex/remote/src/lib.rs]
```
## ExEx and gRPC server
@@ -70,8 +77,8 @@ We will now create the ExEx and the gRPC server in our `src/exex.rs` file.
Let's create a minimal gRPC server that listens on the port `:10000`, and spawn it using
the [NodeBuilder](https://reth.rs/docs/reth/builder/struct.NodeBuilder.html)'s [task executor](https://reth.rs/docs/reth/tasks/struct.TaskExecutor.html).
-```rust,norun,noplayground,ignore
-{{#include ../../sources/exex/remote/src/exex_1.rs}}
+```rust
+// [!include ~/snippets/sources/exex/remote/src/exex_1.rs]
```
Currently, it does not send anything on the stream.
@@ -81,8 +88,8 @@ to send new `ExExNotification` on it.
Let's create this channel in the `main` function where we will have both gRPC server and ExEx initiated,
and save the sender part (that way we will be able to create new receivers) of this channel in our gRPC server.
-```rust,norun,noplayground,ignore
-{{#include ../../sources/exex/remote/src/exex_2.rs}}
+```rust
+// [!include ~/snippets/sources/exex/remote/src/exex_2.rs]
```
And with that, we're ready to handle incoming notifications, serialize them with [bincode](https://docs.rs/bincode/)
@@ -91,8 +98,8 @@ and send back to the client.
For each incoming request, we spawn a separate tokio task that will run in the background,
and then return the stream receiver to the client.
-```rust,norun,noplayground,ignore
-{{#rustdoc_include ../../sources/exex/remote/src/exex_3.rs:snippet}}
+```rust
+// [!include ~/snippets/sources/exex/remote/src/exex_3.rs]
```
That's it for the gRPC server part! It doesn't receive anything on the `notifications` channel yet,
@@ -110,25 +117,24 @@ Don't forget to emit `ExExEvent::FinishedHeight`
-```rust,norun,noplayground,ignore
-{{#rustdoc_include ../../sources/exex/remote/src/exex_4.rs:snippet}}
+```rust
+// [!include ~/snippets/sources/exex/remote/src/exex_4.rs]
```
All that's left is to connect all pieces together: install our ExEx in the node and pass the sender part
of communication channel to it.
-```rust,norun,noplayground,ignore
-{{#rustdoc_include ../../sources/exex/remote/src/exex.rs:snippet}}
+```rust
+// [!include ~/snippets/sources/exex/remote/src/exex.rs]
```
### Full `exex.rs` code
-Click to expand
-
-```rust,norun,noplayground,ignore
-{{#include ../../sources/exex/remote/src/exex.rs}}
-```
+ Click to expand
+ ```rust
+ // [!include ~/snippets/sources/exex/remote/src/exex.rs]
+ ```
## Consumer
@@ -143,8 +149,8 @@ because notifications can get very heavy
-```rust,norun,noplayground,ignore
-{{#include ../../sources/exex/remote/src/consumer.rs}}
+```rust
+// [!include ~/snippets/sources/exex/remote/src/consumer.rs]
```
## Running
@@ -162,4 +168,4 @@ And in the other, we will run our consumer:
cargo run --bin consumer --release
```
-
+
diff --git a/book/developers/exex/tracking-state.md b/book/vocs/docs/pages/exex/tracking-state.mdx
similarity index 63%
rename from book/developers/exex/tracking-state.md
rename to book/vocs/docs/pages/exex/tracking-state.mdx
index 92e4ee0f18..cd704c8896 100644
--- a/book/developers/exex/tracking-state.md
+++ b/book/vocs/docs/pages/exex/tracking-state.mdx
@@ -1,8 +1,12 @@
+---
+description: How to track state in a custom ExEx.
+---
+
# Tracking State
In this chapter, we'll learn how to keep track of some state inside our ExEx.
-Let's continue with our Hello World example from the [previous chapter](./hello-world.md).
+Let's continue with our Hello World example from the [previous chapter](./hello-world).
### Turning ExEx into a struct
@@ -18,8 +22,8 @@ because you can't access variables inside the function to assert the state of yo
-```rust,norun,noplayground,ignore
-{{#include ../../sources/exex/tracking-state/src/bin/1.rs}}
+```rust
+// [!include ~/snippets/sources/exex/tracking-state/src/bin/1.rs]
```
For those who are not familiar with how async Rust works on a lower level, that may seem scary,
@@ -39,23 +43,25 @@ With all that done, we're now free to add more fields to our `MyExEx` struct, an
Our ExEx will count the number of transactions in each block and log it to the console.
-```rust,norun,noplayground,ignore
-{{#include ../../sources/exex/tracking-state/src/bin/2.rs}}
+```rust
+// [!include ~/snippets/sources/exex/tracking-state/src/bin/2.rs]
```
As you can see, we added two fields to our ExEx struct:
-- `first_block` to keep track of the first block that was committed since the start of the ExEx.
-- `transactions` to keep track of the total number of transactions committed, accounting for reorgs and reverts.
+
+- `first_block` to keep track of the first block that was committed since the start of the ExEx.
+- `transactions` to keep track of the total number of transactions committed, accounting for reorgs and reverts.
We also changed our `match` block to two `if` clauses:
-- First one checks if there's a reverted chain using `notification.reverted_chain()`. If there is:
- - We subtract the number of transactions in the reverted chain from the total number of transactions.
- - It's important to do the `saturating_sub` here, because if we just started our node and
- instantly received a reorg, our `transactions` field will still be zero.
-- Second one checks if there's a committed chain using `notification.committed_chain()`. If there is:
- - We update the `first_block` field to the first block of the committed chain.
- - We add the number of transactions in the committed chain to the total number of transactions.
- - We send a `FinishedHeight` event back to the main node.
+
+- First one checks if there's a reverted chain using `notification.reverted_chain()`. If there is:
+ - We subtract the number of transactions in the reverted chain from the total number of transactions.
+ - It's important to do the `saturating_sub` here, because if we just started our node and
+ instantly received a reorg, our `transactions` field will still be zero.
+- Second one checks if there's a committed chain using `notification.committed_chain()`. If there is:
+ - We update the `first_block` field to the first block of the committed chain.
+ - We add the number of transactions in the committed chain to the total number of transactions.
+ - We send a `FinishedHeight` event back to the main node.
Finally, on every notification, we log the total number of transactions and
the first block that was committed since the start of the ExEx.
diff --git a/book/vocs/docs/pages/index.mdx b/book/vocs/docs/pages/index.mdx
new file mode 100644
index 0000000000..5e65d0695c
--- /dev/null
+++ b/book/vocs/docs/pages/index.mdx
@@ -0,0 +1,162 @@
+---
+content:
+ width: 100%
+layout: landing
+showLogo: false
+title: Reth
+description: Secure, performant and modular node implementation that supports both Ethereum and OP-Stack chains.
+---
+
+import { HomePage, Sponsors } from "vocs/components";
+import { SdkShowcase } from "../components/SdkShowcase";
+import { TrustedBy } from "../components/TrustedBy";
+
+
+
+
+
+
+
+
Secure, performant, and modular blockchain SDK and node.
+
+
+ Run a Node
+ Build a Node
+ Why Reth?
+
+
+
+
+ :::code-group
+
+ ```bash [Run a Node]
+ # Install the binary
+ brew install paradigmxyz/brew/reth
+
+ # Run the node with JSON-RPC enabled
+ reth node --http --http.api eth,trace
+ ```
+
+ ```rust [Build a Node]
+ // .. snip ..
+ let handle = node_builder
+ .with_types::()
+ .with_components(EthereumNode::components())
+ .with_add_ons(EthereumAddOns::default())
+ .launch()
+ .await?;
+ ```
+
+ :::
+
+
+
+## Trusted by the Best
+
+Leading infra companies use Reth for MEV applications, staking, RPC services and generating zero-knowledge proofs.
+
+
+
+
+
+## Built with Reth SDK
+
+Production chains and networks powered by Reth's modular architecture. These nodes are built using existing components without forking, saving several engineering hours while improving maintainability.
+
+
+
+
+
+## Supporters
+
+
+
diff --git a/book/installation/binaries.md b/book/vocs/docs/pages/installation/binaries.mdx
similarity index 90%
rename from book/installation/binaries.md
rename to book/vocs/docs/pages/installation/binaries.mdx
index fc741805cd..56c5cf2bac 100644
--- a/book/installation/binaries.md
+++ b/book/vocs/docs/pages/installation/binaries.mdx
@@ -1,3 +1,7 @@
+---
+description: Instructions for installing Reth using pre-built binaries for Windows, macOS, and Linux, including Homebrew and Arch Linux AUR options. Explains how to verify binary signatures and provides details about the release signing key.
+---
+
# Binaries
[**Archives of precompiled binaries of reth are available for Windows, macOS and Linux.**](https://github.com/paradigmxyz/reth/releases) They are static executables. Users of platforms not explicitly listed below should download one of these archives.
@@ -41,7 +45,7 @@ Replace the filenames by those corresponding to the downloaded Reth release.
Releases are signed using the key with ID [`50FB7CC55B2E8AFA59FE03B7AA5ED56A7FBF253E`](https://keyserver.ubuntu.com/pks/lookup?search=50FB7CC55B2E8AFA59FE03B7AA5ED56A7FBF253E&fingerprint=on&op=index).
-```none
+```text
-----BEGIN PGP PUBLIC KEY BLOCK-----
mDMEZl4GjhYJKwYBBAHaRw8BAQdAU5gnINBAfIgF9S9GzZ1zHDwZtv/WcJRIQI+h
diff --git a/book/installation/build-for-arm-devices.md b/book/vocs/docs/pages/installation/build-for-arm-devices.mdx
similarity index 82%
rename from book/installation/build-for-arm-devices.md
rename to book/vocs/docs/pages/installation/build-for-arm-devices.mdx
index 21d32c9e8b..534fe1c014 100644
--- a/book/installation/build-for-arm-devices.md
+++ b/book/vocs/docs/pages/installation/build-for-arm-devices.mdx
@@ -1,3 +1,7 @@
+---
+description: Building and troubleshooting Reth on ARM devices.
+---
+
# Building for ARM devices
Reth can be built for and run on ARM devices, but there are a few things to take into consideration before.
@@ -37,8 +41,8 @@ Some newer versions of ARM architecture offer support for Large Virtual Address
### Additional Resources
-- [ARM developer documentation](https://developer.arm.com/documentation/ddi0406/cb/Appendixes/ARMv4-and-ARMv5-Differences/System-level-memory-model/Virtual-memory-support)
-- [ARM Community Forums](https://community.arm.com)
+- [ARM developer documentation](https://developer.arm.com/documentation/ddi0406/cb/Appendixes/ARMv4-and-ARMv5-Differences/System-level-memory-model/Virtual-memory-support)
+- [ARM Community Forums](https://community.arm.com)
## Build Reth
@@ -57,16 +61,21 @@ This error is raised whenever MDBX can not open a database due to the limitation
You will need to recompile the Linux Kernel to fix the issue.
A simple and safe approach to achieve this is to use the Armbian build framework to create a new image of the OS that will be flashed to a storage device of your choice - an SD card for example - with the following kernel feature values:
-- **Page Size**: 64 KB
-- **Virtual Address Space Size**: 48 Bits
+
+- **Page Size**: 64 KB
+- **Virtual Address Space Size**: 48 Bits
To be able to build an Armbian image and set those values, you will need to:
-- Clone the Armbian build framework repository
+
+- Clone the Armbian build framework repository
+
```bash
git clone https://github.com/armbian/build
cd build
```
-- Run the compile script with the following parameters:
+
+- Run the compile script with the following parameters:
+
```bash
./compile.sh \
BUILD_MINIMAL=yes \
@@ -74,5 +83,6 @@ BUILD_DESKTOP=no \
KERNEL_CONFIGURE=yes \
CARD_DEVICE="/dev/sdX" # Replace sdX with your own storage device
```
-- From there, you will be able to select the target board, the OS release and branch. Then, once you get in the **Kernel Configuration** screen, select the **Kernel Features options** and set the previous values accordingly.
-- Wait for the process to finish, plug your storage device into your board and start it. You can now download or install Reth and it should work properly.
+
+- From there, you will be able to select the target board, the OS release and branch. Then, once you get in the **Kernel Configuration** screen, select the **Kernel Features options** and set the previous values accordingly.
+- Wait for the process to finish, plug your storage device into your board and start it. You can now download or install Reth and it should work properly.
diff --git a/book/installation/docker.md b/book/vocs/docs/pages/installation/docker.mdx
similarity index 80%
rename from book/installation/docker.md
rename to book/vocs/docs/pages/installation/docker.mdx
index 6ce2ae50a5..8774d549a5 100644
--- a/book/installation/docker.md
+++ b/book/vocs/docs/pages/installation/docker.mdx
@@ -1,3 +1,7 @@
+---
+description: Guide to running Reth using Docker, including obtaining images from GitHub or building locally, using Docker Compose.
+---
+
# Docker
There are two ways to obtain a Reth Docker image:
@@ -8,9 +12,10 @@ There are two ways to obtain a Reth Docker image:
Once you have obtained the Docker image, proceed to [Using the Docker
image](#using-the-docker-image).
-> **Note**
->
-> Reth requires Docker Engine version 20.10.10 or higher due to [missing support](https://docs.docker.com/engine/release-notes/20.10/#201010) for the `clone3` syscall in previous versions.
+:::note
+Reth requires Docker Engine version 20.10.10 or higher due to [missing support](https://docs.docker.com/engine/release-notes/20.10/#201010) for the `clone3` syscall in previous versions.
+:::
+
## GitHub
Reth docker images for both x86_64 and ARM64 machines are published with every release of reth on GitHub Container Registry.
@@ -52,6 +57,7 @@ docker run reth:local --version
## Using the Docker image
There are two ways to use the Docker image:
+
1. [Using Docker](#using-plain-docker)
2. [Using Docker Compose](#using-docker-compose)
@@ -86,12 +92,12 @@ To run Reth with Docker Compose, run the following command from a shell inside t
docker compose -f etc/docker-compose.yml -f etc/lighthouse.yml up -d
```
-> **Note**
->
-> If you want to run Reth with a CL that is not Lighthouse:
->
-> - The JWT for the consensus client can be found at `etc/jwttoken/jwt.hex` in this repository, after the `etc/generate-jwt.sh` script is run
-> - The Reth Engine API is accessible on `localhost:8551`
+:::note
+If you want to run Reth with a CL that is not Lighthouse:
+
+- The JWT for the consensus client can be found at `etc/jwttoken/jwt.hex` in this repository, after the `etc/generate-jwt.sh` script is run
+- The Reth Engine API is accessible on `localhost:8551`
+ :::
To check if Reth is running correctly, run:
@@ -101,18 +107,19 @@ docker compose -f etc/docker-compose.yml -f etc/lighthouse.yml logs -f reth
The default `docker-compose.yml` file will create three containers:
-- Reth
-- Prometheus
-- Grafana
+- Reth
+- Prometheus
+- Grafana
The optional `lighthouse.yml` file will create two containers:
-- Lighthouse
-- [`ethereum-metrics-exporter`](https://github.com/ethpandaops/ethereum-metrics-exporter)
+- Lighthouse
+- [`ethereum-metrics-exporter`](https://github.com/ethpandaops/ethereum-metrics-exporter)
Grafana will be exposed on `localhost:3000` and accessible via default credentials (username and password is `admin`), with two available dashboards:
-- reth
-- Ethereum Metrics Exporter (works only if Lighthouse is also running)
+
+- reth
+- Ethereum Metrics Exporter (works only if Lighthouse is also running)
## Interacting with Reth inside Docker
@@ -124,7 +131,7 @@ docker exec -it reth bash
**If Reth is running with Docker Compose, replace `reth` with `reth-reth-1` in the above command**
-Refer to the [CLI docs](../cli/cli.md) to interact with Reth once inside the Reth container.
+Refer to the [CLI docs](#TODO) to interact with Reth once inside the Reth container.
## Run only Grafana in Docker
@@ -134,4 +141,4 @@ This allows importing existing Grafana dashboards, without running Reth in Docke
docker compose -f etc/docker-compose.yml up -d --no-deps grafana
```
-After login with `admin:admin` credentials, Prometheus should be listed under [`Grafana datasources`](http://localhost:3000/connections/datasources). Replace its `Prometheus server URL` so it points to locally running one. On Mac or Windows, use `http://host.docker.internal:9090`. On Linux, try `http://172.17.0.1:9090`.
\ No newline at end of file
+After login with `admin:admin` credentials, Prometheus should be listed under [`Grafana datasources`](http://localhost:3000/connections/datasources). Replace its `Prometheus server URL` so it points to locally running one. On Mac or Windows, use `http://host.docker.internal:9090`. On Linux, try `http://172.17.0.1:9090`.
diff --git a/book/vocs/docs/pages/installation/overview.mdx b/book/vocs/docs/pages/installation/overview.mdx
new file mode 100644
index 0000000000..8101c509cd
--- /dev/null
+++ b/book/vocs/docs/pages/installation/overview.mdx
@@ -0,0 +1,18 @@
+---
+description: Installation instructions for Reth and hardware recommendations.
+---
+
+# Installation
+
+Reth runs on Linux and macOS (Windows tracked).
+
+There are three core methods to obtain Reth:
+
+- [Pre-built binaries](./binaries)
+- [Docker images](./docker)
+- [Building from source.](./source)
+
+:::note
+If you have Docker installed, we recommend using the [Docker Compose](./docker#using-docker-compose) configuration
+that will get you Reth, Lighthouse (Consensus Client), Prometheus and Grafana running and syncing with just one command.
+:::
diff --git a/book/vocs/docs/pages/installation/priorities.mdx b/book/vocs/docs/pages/installation/priorities.mdx
new file mode 100644
index 0000000000..4494083e39
--- /dev/null
+++ b/book/vocs/docs/pages/installation/priorities.mdx
@@ -0,0 +1,22 @@
+---
+description: Explains Reth update priorities for user classes such as payload builders and non-payload builders.
+---
+
+# Update Priorities
+
+When publishing releases, reth will include an "Update Priority" section in the release notes, in the same manner Lighthouse does.
+
+The "Update Priority" section will include a table which may appear like so:
+
+| User Class | Priority |
+| -------------------- | --------------- |
+| Payload Builders | Medium Priority |
+| Non-Payload Builders | Low Priority |
+
+To understand this table, the following terms are important:
+
+- _Payload builders_ are those who use reth to build and validate payloads.
+- _Non-payload builders_ are those who run reth for other purposes (e.g., data analysis, RPC or applications).
+- _High priority_ updates should be completed as soon as possible (e.g., hours or days).
+- _Medium priority_ updates should be completed at the next convenience (e.g., days or a week).
+- _Low priority_ updates should be completed in the next routine update cycle (e.g., two weeks).
diff --git a/book/installation/source.md b/book/vocs/docs/pages/installation/source.mdx
similarity index 72%
rename from book/installation/source.md
rename to book/vocs/docs/pages/installation/source.mdx
index d9642c4bc4..d3d412a58f 100644
--- a/book/installation/source.md
+++ b/book/vocs/docs/pages/installation/source.mdx
@@ -1,14 +1,18 @@
+---
+description: How to build, update, and troubleshoot Reth from source.
+---
+
# Build from Source
You can build Reth on Linux, macOS, Windows, and Windows WSL2.
-> **Note**
->
-> Reth does **not** work on Windows WSL1.
+:::note
+Reth does **not** work on Windows WSL1.
+:::
## Dependencies
-First, **install Rust** using [rustup](https://rustup.rs/)οΌ
+First, **install Rust** using [rustup](https://rustup.rs/)οΌ
```bash
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
@@ -16,19 +20,20 @@ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
The rustup installer provides an easy way to update the Rust compiler, and works on all platforms.
-> **Tips**
->
-> - During installation, when prompted, enter `1` for the default installation.
-> - After Rust installation completes, try running `cargo version` . If it cannot
-> be found, run `source $HOME/.cargo/env`. After that, running `cargo version` should return the version, for example `cargo 1.68.2`.
-> - It's generally advisable to append `source $HOME/.cargo/env` to `~/.bashrc`.
+:::tip
+
+- During installation, when prompted, enter `1` for the default installation.
+- After Rust installation completes, try running `cargo version` . If it cannot
+ be found, run `source $HOME/.cargo/env`. After that, running `cargo version` should return the version, for example `cargo 1.68.2`.
+- It's generally advisable to append `source $HOME/.cargo/env` to `~/.bashrc`.
+ :::
With Rust installed, follow the instructions below to install dependencies relevant to your
operating system:
-- **Ubuntu**: `apt-get install libclang-dev pkg-config build-essential`
-- **macOS**: `brew install llvm pkg-config`
-- **Windows**: `choco install llvm` or `winget install LLVM.LLVM`
+- **Ubuntu**: `apt-get install libclang-dev pkg-config build-essential`
+- **macOS**: `brew install llvm pkg-config`
+- **Windows**: `choco install llvm` or `winget install LLVM.LLVM`
These are needed to build bindings for Reth's database.
@@ -60,7 +65,7 @@ cargo build --release
This will place the reth binary under `./target/release/reth`, and you can copy it to your directory of preference after that.
-Compilation may take around 10 minutes. Installation was successful if `reth --help` displays the [command-line documentation](../cli/cli.md).
+Compilation may take around 10 minutes. Installation was successful if `reth --help` displays the [command-line documentation](#TODO).
If you run into any issues, please check the [Troubleshooting](#troubleshooting) section, or reach out to us on [Telegram](https://t.me/paradigm_reth).
@@ -88,11 +93,11 @@ You can customise the compiler settings used to compile Reth via
Reth includes several profiles which can be selected via the Cargo flag `--profile`.
-* `release`: default for source builds, enables most optimisations while not taking too long to
- compile.
-* `maxperf`: default for binary releases, enables aggressive optimisations including full LTO.
- Although compiling with this profile improves some benchmarks by around 20% compared to `release`,
- it imposes a _significant_ cost at compile time and is only recommended if you have a fast CPU.
+- `release`: default for source builds, enables most optimisations while not taking too long to
+ compile.
+- `maxperf`: default for binary releases, enables aggressive optimisations including full LTO.
+ Although compiling with this profile improves some benchmarks by around 20% compared to `release`,
+ it imposes a _significant_ cost at compile time and is only recommended if you have a fast CPU.
**Rust compiler flags**
@@ -107,9 +112,10 @@ RUSTFLAGS="-C target-cpu=native" cargo build --profile maxperf
Finally, some optional features are present that may improve performance, but may not very portable,
and as such might not compile on your particular system. These are currently:
-- `jemalloc`: replaces the default system memory allocator with [`jemalloc`](https://jemalloc.net/); this feature is unstable on Windows
-- `asm-keccak`: replaces the default, pure-Rust implementation of Keccak256 with one implemented in assembly; see [the `keccak-asm` crate](https://github.com/DaniPopes/keccak-asm) for more details and supported targets
-- `min-LEVEL-logs`, where `LEVEL` is one of `error`, `warn`, `info`, `debug`, `trace`: disables compilation of logs of lower level than the given one; this in general isn't that significant, and is not recommended due to the loss of debugging that the logs would provide
+
+- `jemalloc`: replaces the default system memory allocator with [`jemalloc`](https://jemalloc.net/); this feature is unstable on Windows
+- `asm-keccak`: replaces the default, pure-Rust implementation of Keccak256 with one implemented in assembly; see [the `keccak-asm` crate](https://github.com/DaniPopes/keccak-asm) for more details and supported targets
+- `min-LEVEL-logs`, where `LEVEL` is one of `error`, `warn`, `info`, `debug`, `trace`: disables compilation of logs of lower level than the given one; this in general isn't that significant, and is not recommended due to the loss of debugging that the logs would provide
You can activate features by passing them to the `--features` or `-F` Cargo flag;
multiple features can be activated with a space- or comma-separated list to the flag:
@@ -136,7 +142,7 @@ Rust Version (MSRV) which is listed under the `rust-version` key in Reth's
If compilation fails with `(signal: 9, SIGKILL: kill)`, this could mean your machine ran out of
memory during compilation. If you are on Docker, consider increasing the memory of the container, or use a [pre-built
-binary](../installation/binaries.md).
+binary](/installation/binaries).
If compilation fails in either the `keccak-asm` or `sha3-asm` crates, it is likely that your current
system configuration is not supported. See the [`keccak-asm` target table](https://github.com/DaniPopes/keccak-asm?tab=readme-ov-file#support) for supported targets.
@@ -147,7 +153,7 @@ _(Thanks to Sigma Prime for this section from [their Lighthouse book](https://li
### Bus error (WSL2)
-In WSL 2 on Windows, the default virtual disk size is set to 1TB.
+In WSL 2 on Windows, the default virtual disk size is set to 1TB.
You must increase the allocated disk size for your WSL2 instance before syncing reth.
diff --git a/book/vocs/docs/pages/introduction/contributing.mdx b/book/vocs/docs/pages/introduction/contributing.mdx
new file mode 100644
index 0000000000..63fc598715
--- /dev/null
+++ b/book/vocs/docs/pages/introduction/contributing.mdx
@@ -0,0 +1,258 @@
+# Contributing to Reth
+
+Reth has docs specifically geared for developers and contributors, including documentation on the structure and architecture of reth, the general workflow we employ, and other useful tips.
+
+## Getting Help
+
+Need support or have questions? Open a github issue and/or join the TG chat:
+
+- **GitHub Issues**: [Open an issue](https://github.com/paradigmxyz/reth/issues/new) for bugs or feature requests
+- **Telegram Chat**: [Join our Telegram](https://t.me/paradigm_reth) for real-time support and discussions
+
+## Repository and Project Structure
+
+Reth is organized as a modular codebase with clear separation and a contributor friendly architecture, you can read about it in detail [here](https://github.com/paradigmxyz/reth/tree/main/docs). Here's the TL;DR:
+
+### Design
+
+Reth follows a modular architecture where each component can be used independently:
+
+- **Consensus**: Block validation and consensus rules
+- **Storage**: Hybrid database with MDBX + static files
+- **Networking**: P2P networking stack
+- **RPC**: JSON-RPC server implementation
+- **Engine**: Consensus layer integration
+- **EVM**: Transaction execution
+- **Node Builder**: High-level orchestration
+
+### Crates
+
+The repository is organized into focused crates under `/crates/`:
+
+```
+crates/
+βββ consensus/ # Consensus and validation logic
+βββ storage/ # Database and storage implementations
+βββ net/ # Networking components
+βββ rpc/ # JSON-RPC server and APIs
+βββ engine/ # Engine API and consensus integration
+βββ evm/ # EVM execution
+βββ node/ # Node building and orchestration
+βββ ethereum/ # Ethereum-specific implementations
+βββ optimism/ # Optimism L2 support
+βββ ...
+```
+
+## Workflow: The Lifecycle of PRs
+
+### 1. Before You Start
+
+- Check existing issues to avoid duplicate work
+- For large features, open an issue first to discuss the approach
+- Fork the repository and create a feature branch
+
+### 2. Development Process
+
+#### Setting Up Your Environment
+
+```bash
+# Clone your fork
+git clone https://github.com/YOUR_USERNAME/reth.git
+cd reth
+
+# Install dependencies and tools
+# Use nightly Rust for formatting
+rustup install nightly
+rustup component add rustfmt --toolchain nightly
+
+# Run the validation suite
+make pr
+```
+
+#### Code Style and Standards
+
+- **Formatting**: Use nightly rustfmt (`cargo +nightly fmt`)
+- **Linting**: All clippy warnings must be addressed
+- **Documentation**: Add doc comments for public APIs
+- **Testing**: Include appropriate tests for your changes
+
+#### Recommended VS Code Settings
+
+Install the `rust-analyzer` extension and use these settings for the best development experience:
+
+```json
+{
+ "rust-analyzer.rustfmt.overrideCommand": ["rustfmt", "+nightly"],
+ "rust-analyzer.check.overrideCommand": [
+ "cargo",
+ "clippy",
+ "--workspace",
+ "--message-format=json",
+ "--all-targets",
+ "--all-features"
+ ]
+}
+```
+
+### 3. Testing Your Changes
+
+Reth uses comprehensive testing at multiple levels:
+
+#### Unit Tests
+
+Test specific functions and components:
+
+```bash
+cargo test --package reth-ethereum-consensus
+```
+
+#### Integration Tests
+
+Test component interactions:
+
+```bash
+cargo test --test integration_tests
+```
+
+#### Full Test Suite
+
+Run all tests including Ethereum Foundation tests:
+
+```bash
+make test
+```
+
+#### Validation Suite
+
+Before submitting, always run:
+
+```bash
+make pr
+```
+
+This runs:
+
+- Code formatting checks
+- Clippy linting
+- Documentation generation
+- Full test suite
+
+### 4. Submitting Your PR
+
+#### Draft PRs for Large Features
+
+For substantial changes, open a draft PR early to get feedback on the approach.
+
+#### PR Requirements
+
+- [ ] Clear, descriptive title and description
+- [ ] Tests for new functionality
+- [ ] Documentation updates if needed
+- [ ] All CI checks passing
+- [ ] Commit messages follow conventional format
+
+#### Commit Message Format
+
+```
+type: brief description
+
+More detailed explanation if needed.
+
+- feat: new feature
+- fix: bug fix
+- docs: documentation changes
+- refactor: code refactoring
+- test: adding tests
+- chore: maintenance tasks
+```
+
+### 5. Review Process
+
+#### Who Can Review
+
+Any community member can review PRs. We encourage participation from all skill levels.
+
+#### What Reviewers Look For
+
+- **Does the change improve Reth?**
+- **Are there clear bugs or issues?**
+- **Are commit messages clear and descriptive?**
+- **Is the code well-tested?**
+- **Is documentation updated appropriately?**
+
+#### Review Guidelines
+
+- Be constructive and respectful
+- Provide specific, actionable feedback
+- Focus on significant issues first
+- Acknowledge good work and improvements
+
+## Releases: How Reth is Released
+
+### Release Schedule
+
+- **Regular releases**: Following semantic versioning
+- **Security releases**: As needed for critical vulnerabilities
+- **Pre-releases**: For testing major changes
+
+### Release Process
+
+1. **Version bump**: Update version numbers across crates
+2. **Changelog**: Update `CHANGELOG.md` with notable changes
+3. **Testing**: Final validation on testnet and mainnet
+4. **Tagging**: Create release tags and GitHub releases
+5. **Distribution**: Update package registries and Docker images
+
+### Release Criteria
+
+- All CI checks passing
+- No known critical bugs
+- Documentation up to date
+- Backwards compatibility considerations addressed
+
+## Ways to Contribute
+
+### π‘ Feature Requests
+
+For feature requests, please include:
+
+- **Detailed explanation**: What should the feature do?
+- **Context and motivation**: Why is this feature needed?
+- **Examples**: How would it be used?
+- **Similar tools**: References to similar functionality elsewhere
+
+### π Documentation
+
+Documentation improvements are always welcome:
+
+- Add missing documentation
+- Improve code examples
+- Create tutorials or guides
+
+### π§ Code Contributions
+
+Contributing code changes:
+
+- Fix bugs identified in issues
+- Implement requested features
+- Improve performance
+- Refactor for better maintainability
+
+## Code of Conduct
+
+Reth follows the [Rust Code of Conduct](https://www.rust-lang.org/conduct.html). We are committed to providing a welcoming and inclusive environment for all contributors.
+
+### Our Standards
+
+- Be respectful and constructive
+- Focus on what's best for the community
+- Show empathy towards other contributors
+- Accept constructive criticism gracefully
+
+### Reporting Issues
+
+If you experience or witness behavior that violates our code of conduct, please report it to [georgios@paradigm.xyz](mailto:georgios@paradigm.xyz).
+
+:::note
+Also read [CONTRIBUTING.md](https://github.com/paradigmxyz/reth/blob/main/CONTRIBUTING.md) for in depth guidelines.
+:::
diff --git a/book/vocs/docs/pages/introduction/why-reth.mdx b/book/vocs/docs/pages/introduction/why-reth.mdx
new file mode 100644
index 0000000000..f140c0e312
--- /dev/null
+++ b/book/vocs/docs/pages/introduction/why-reth.mdx
@@ -0,0 +1,50 @@
+---
+description: Why Reth is the future of Ethereum infrastructure - powering everything from production staking to cutting-edge L2s and ZK applications.
+---
+
+# Why Reth?
+
+Reth is more than just another Ethereum clientβit's the foundation upon which the next generation of blockchain infrastructure is being built. From powering production staking environments at institutions like Coinbase to enabling cutting-edge L2 sequencers and ZK applications, Reth represents the convergence of security, performance, and extensibility that the ecosystem demands.
+
+Every piece of crypto infrastructure will be touching Reth one way or another. Here's why the world's leading developers and institutions are choosing Reth as their node of choice.
+
+## Institutional-Grade Security
+
+Reth secures real value on Ethereum mainnet today, trusted by institutions like [Coinbase](https://x.com/CoinbasePltfrm/status/1933546893742579890) for production staking infrastructure. It powers RPC providers such as Alchemy.
+
+## Future Proof Performance
+
+Reth pushes the performance frontier across every dimension, from L2 sequencers to MEV block building.
+
+- **L2 Sequencer Performance**: Used by [Base](https://www.base.org/), other production L2s and also rollup-as-a-service providers such as [Conduit](https://conduit.xyz) which require high throughput and fast block times.
+- **MEV & Block Building**: [rbuilder](https://github.com/flashbots/rbuilder) is an open-source implementation of a block builder built on Reth due to developer friendless and blazing fast performance.
+
+## Infinitely Customizable
+
+Reth's modular architecture means you are not locked into someone else's design decisionsβbuild exactly the chain you need.
+
+- **Component-Based Design**: Swap out consensus, execution, mempool, or networking modules independently
+- **Custom Transaction Types**: Build specialized DeFi chains, and unique economic models
+- **Rapid Development**: Reth SDK accelerates custom blockchain development with pre-built components
+
+## ZK & Stateless Ready
+
+Reth is designed from the ground up to excel in the zero-knowledge future with stateless execution and modular architecture.
+
+[SP1](https://github.com/succinctlabs/sp1), a zkVM for proving arbitrary Rust programs, and [Ress](https://www.paradigm.xyz/2025/03/stateless-reth-nodes), an experimental stateless node, demonstrate how Reth enables scalable zero-knowledge applications for Ethereum.
+
+## Thriving Open Source Ecosystem
+
+The most important factor in Reth's success is our vibrant open source community building the future together.
+
+500+ geo-distributed developers from leading companies and academia have played a role to build Reth into what it is today.
+
+## Join the community
+
+Reth isn't just a toolβit's a movement toward better blockchain infrastructure. Whether you're running a validator, building the next generation of L2s, or creating cutting-edge ZK applications, Reth provides the foundation you need to succeed.
+
+**Ready to build the future?**
+
+- [Get Started](/run/ethereum) with running your first Reth node
+- [Explore the SDK](/sdk/overview) to build custom blockchain infrastructure
+- [Join the Community](https://github.com/paradigmxyz/reth) and contribute to the future of Ethereum
diff --git a/book/jsonrpc/admin.md b/book/vocs/docs/pages/jsonrpc/admin.mdx
similarity index 79%
rename from book/jsonrpc/admin.md
rename to book/vocs/docs/pages/jsonrpc/admin.mdx
index b85cd194b6..cf1ef29c05 100644
--- a/book/jsonrpc/admin.md
+++ b/book/vocs/docs/pages/jsonrpc/admin.mdx
@@ -1,10 +1,13 @@
+---
+description: Admin API for node configuration and peer management.
+---
# `admin` Namespace
The `admin` API allows you to configure your node, including adding and removing peers.
-> **Note**
->
-> As this namespace can configure your node at runtime, it is generally **not advised** to expose it publicly.
+:::note
+As this namespace can configure your node at runtime, it is generally **not advised** to expose it publicly.
+:::
## `admin_addPeer`
@@ -13,7 +16,7 @@ Add the given peer to the current peer set of the node.
The method accepts a single argument, the [`enode`][enode] URL of the remote peer to connect to, and returns a `bool` indicating whether the peer was accepted or not.
| Client | Method invocation |
-|--------|------------------------------------------------|
+| ------ | ---------------------------------------------- |
| RPC | `{"method": "admin_addPeer", "params": [url]}` |
### Example
@@ -27,9 +30,9 @@ The method accepts a single argument, the [`enode`][enode] URL of the remote pee
Disconnects from a peer if the connection exists. Returns a `bool` indicating whether the peer was successfully removed or not.
-| Client | Method invocation |
-|--------|----------------------------------------------------|
-| RPC | `{"method": "admin_removePeer", "params": [url]}` |
+| Client | Method invocation |
+| ------ | ------------------------------------------------- |
+| RPC | `{"method": "admin_removePeer", "params": [url]}` |
### Example
@@ -45,7 +48,7 @@ Adds the given peer to a list of trusted peers, which allows the peer to always
It returns a `bool` indicating whether the peer was added to the list or not.
| Client | Method invocation |
-|--------|-------------------------------------------------------|
+| ------ | ----------------------------------------------------- |
| RPC | `{"method": "admin_addTrustedPeer", "params": [url]}` |
### Example
@@ -62,7 +65,7 @@ Removes a remote node from the trusted peer set, but it does not disconnect it a
Returns true if the peer was successfully removed.
| Client | Method invocation |
-|--------|----------------------------------------------------------|
+| ------ | -------------------------------------------------------- |
| RPC | `{"method": "admin_removeTrustedPeer", "params": [url]}` |
### Example
@@ -79,7 +82,7 @@ Returns all information known about the running node.
These include general information about the node itself, as well as what protocols it participates in, its IP and ports.
| Client | Method invocation |
-|--------|--------------------------------|
+| ------ | ------------------------------ |
| RPC | `{"method": "admin_nodeInfo"}` |
### Example
@@ -121,9 +124,9 @@ Like other subscription methods, this returns the ID of the subscription, which
To unsubscribe from peer events, call `admin_peerEvents_unsubscribe` with the subscription ID.
-| Client | Method invocation |
-|--------|-------------------------------------------------------|
-| RPC | `{"method": "admin_peerEvents", "params": []}` |
+| Client | Method invocation |
+| ------ | ------------------------------------------------------------ |
+| RPC | `{"method": "admin_peerEvents", "params": []}` |
| RPC | `{"method": "admin_peerEvents_unsubscribe", "params": [id]}` |
### Event Types
@@ -132,20 +135,20 @@ The subscription emits events with the following structure:
```json
{
- "jsonrpc": "2.0",
- "method": "admin_subscription",
- "params": {
- "subscription": "0xcd0c3e8af590364c09d0fa6a1210faf5",
- "result": {
- "type": "add", // or "drop", "error"
- "peer": {
- "id": "44826a5d6a55f88a18298bca4773fca5749cdc3a5c9f308aa7d810e9b31123f3e7c5fba0b1d70aac5308426f47df2a128a6747040a3815cc7dd7167d03be320d",
- "enode": "enode://44826a5d6a55f88a18298bca4773fca5749cdc3a5c9f308aa7d810e9b31123f3e7c5fba0b1d70aac5308426f47df2a128a6747040a3815cc7dd7167d03be320d@192.168.1.1:30303",
- "addr": "192.168.1.1:30303"
- },
- "error": "reason for disconnect or error" // only present for "drop" and "error" events
+ "jsonrpc": "2.0",
+ "method": "admin_subscription",
+ "params": {
+ "subscription": "0xcd0c3e8af590364c09d0fa6a1210faf5",
+ "result": {
+ "type": "add", // or "drop", "error"
+ "peer": {
+ "id": "44826a5d6a55f88a18298bca4773fca5749cdc3a5c9f308aa7d810e9b31123f3e7c5fba0b1d70aac5308426f47df2a128a6747040a3815cc7dd7167d03be320d",
+ "enode": "enode://44826a5d6a55f88a18298bca4773fca5749cdc3a5c9f308aa7d810e9b31123f3e7c5fba0b1d70aac5308426f47df2a128a6747040a3815cc7dd7167d03be320d@192.168.1.1:30303",
+ "addr": "192.168.1.1:30303"
+ },
+ "error": "reason for disconnect or error" // only present for "drop" and "error" events
+ }
}
- }
}
```
diff --git a/book/jsonrpc/debug.md b/book/vocs/docs/pages/jsonrpc/debug.mdx
similarity index 80%
rename from book/jsonrpc/debug.md
rename to book/vocs/docs/pages/jsonrpc/debug.mdx
index 7965e2e0d5..aa3a47685c 100644
--- a/book/jsonrpc/debug.md
+++ b/book/vocs/docs/pages/jsonrpc/debug.mdx
@@ -1,3 +1,6 @@
+---
+description: Debug API for inspecting Ethereum state and traces.
+---
# `debug` Namespace
The `debug` API provides several methods to inspect the Ethereum state, including Geth-style traces.
@@ -7,7 +10,7 @@ The `debug` API provides several methods to inspect the Ethereum state, includin
Returns an RLP-encoded header.
| Client | Method invocation |
-|--------|-------------------------------------------------------|
+| ------ | ----------------------------------------------------- |
| RPC | `{"method": "debug_getRawHeader", "params": [block]}` |
## `debug_getRawBlock`
@@ -15,7 +18,7 @@ Returns an RLP-encoded header.
Retrieves and returns the RLP encoded block by number, hash or tag.
| Client | Method invocation |
-|--------|------------------------------------------------------|
+| ------ | ---------------------------------------------------- |
| RPC | `{"method": "debug_getRawBlock", "params": [block]}` |
## `debug_getRawTransaction`
@@ -23,7 +26,7 @@ Retrieves and returns the RLP encoded block by number, hash or tag.
Returns an EIP-2718 binary-encoded transaction.
| Client | Method invocation |
-|--------|--------------------------------------------------------------|
+| ------ | ------------------------------------------------------------ |
| RPC | `{"method": "debug_getRawTransaction", "params": [tx_hash]}` |
## `debug_getRawReceipts`
@@ -31,7 +34,7 @@ Returns an EIP-2718 binary-encoded transaction.
Returns an array of EIP-2718 binary-encoded receipts.
| Client | Method invocation |
-|--------|---------------------------------------------------------|
+| ------ | ------------------------------------------------------- |
| RPC | `{"method": "debug_getRawReceipts", "params": [block]}` |
## `debug_getBadBlocks`
@@ -39,7 +42,7 @@ Returns an array of EIP-2718 binary-encoded receipts.
Returns an array of recent bad blocks that the client has seen on the network.
| Client | Method invocation |
-|--------|--------------------------------------------------|
+| ------ | ------------------------------------------------ |
| RPC | `{"method": "debug_getBadBlocks", "params": []}` |
## `debug_traceChain`
@@ -47,7 +50,7 @@ Returns an array of recent bad blocks that the client has seen on the network.
Returns the structured logs created during the execution of EVM between two blocks (excluding start) as a JSON object.
| Client | Method invocation |
-|--------|----------------------------------------------------------------------|
+| ------ | -------------------------------------------------------------------- |
| RPC | `{"method": "debug_traceChain", "params": [start_block, end_block]}` |
## `debug_traceBlock`
@@ -57,11 +60,11 @@ The `debug_traceBlock` method will return a full stack trace of all invoked opco
This expects an RLP-encoded block.
> **Note**
->
+>
> The parent of this block must be present, or it will fail.
| Client | Method invocation |
-|--------|---------------------------------------------------------|
+| ------ | ------------------------------------------------------- |
| RPC | `{"method": "debug_traceBlock", "params": [rlp, opts]}` |
## `debug_traceBlockByHash`
@@ -69,7 +72,7 @@ This expects an RLP-encoded block.
Similar to [`debug_traceBlock`](#debug_traceblock), `debug_traceBlockByHash` accepts a block hash and will replay the block that is already present in the database.
| Client | Method invocation |
-|--------|----------------------------------------------------------------------|
+| ------ | -------------------------------------------------------------------- |
| RPC | `{"method": "debug_traceBlockByHash", "params": [block_hash, opts]}` |
## `debug_traceBlockByNumber`
@@ -77,15 +80,15 @@ Similar to [`debug_traceBlock`](#debug_traceblock), `debug_traceBlockByHash` acc
Similar to [`debug_traceBlockByHash`](#debug_traceblockbyhash), `debug_traceBlockByNumber` accepts a block number and will replay the block that is already present in the database.
| Client | Method invocation |
-|--------|--------------------------------------------------------------------------|
+| ------ | ------------------------------------------------------------------------ |
| RPC | `{"method": "debug_traceBlockByNumber", "params": [block_number, opts]}` |
## `debug_traceTransaction`
The `debug_traceTransaction` debugging method will attempt to run the transaction in the exact same manner as it was executed on the network. It will replay any transaction that may have been executed prior to this one before it will finally attempt to execute the transaction that corresponds to the given hash.
-| Client | Method invocation |
-|--------|-------------------------------------------------------------|
+| Client | Method invocation |
+| ------ | ----------------------------------------------------------------- |
| RPC | `{"method": "debug_traceTransaction", "params": [tx_hash, opts]}` |
## `debug_traceCall`
@@ -97,5 +100,5 @@ The first argument (just as in `eth_call`) is a transaction request.
The block can optionally be specified either by hash or by number as the second argument.
| Client | Method invocation |
-|--------|-----------------------------------------------------------------------|
+| ------ | --------------------------------------------------------------------- |
| RPC | `{"method": "debug_traceCall", "params": [call, block_number, opts]}` |
diff --git a/book/jsonrpc/eth.md b/book/vocs/docs/pages/jsonrpc/eth.mdx
similarity index 72%
rename from book/jsonrpc/eth.md
rename to book/vocs/docs/pages/jsonrpc/eth.mdx
index 0a3003c405..052beb4c7b 100644
--- a/book/jsonrpc/eth.md
+++ b/book/vocs/docs/pages/jsonrpc/eth.mdx
@@ -1,3 +1,7 @@
+---
+description: Standard Ethereum JSON-RPC API methods.
+---
+
# `eth` Namespace
Documentation for the API methods in the `eth` namespace can be found on [ethereum.org](https://ethereum.org/en/developers/docs/apis/json-rpc/).
diff --git a/book/jsonrpc/intro.md b/book/vocs/docs/pages/jsonrpc/intro.mdx
similarity index 70%
rename from book/jsonrpc/intro.md
rename to book/vocs/docs/pages/jsonrpc/intro.mdx
index 6f9b894988..dac173142a 100644
--- a/book/jsonrpc/intro.md
+++ b/book/vocs/docs/pages/jsonrpc/intro.mdx
@@ -1,3 +1,7 @@
+---
+description: Overview of Reth's JSON-RPC API and namespaces.
+---
+
# JSON-RPC
You can interact with Reth over JSON-RPC. Reth supports all standard Ethereum JSON-RPC API methods.
@@ -12,22 +16,21 @@ Each namespace must be explicitly enabled.
The methods are grouped into namespaces, which are listed below:
-| Namespace | Description | Sensitive |
-|-------------------------|--------------------------------------------------------------------------------------------------------|-----------|
-| [`eth`](./eth.md) | The `eth` API allows you to interact with Ethereum. | Maybe |
-| [`web3`](./web3.md) | The `web3` API provides utility functions for the web3 client. | No |
-| [`net`](./net.md) | The `net` API provides access to network information of the node. | No |
-| [`txpool`](./txpool.md) | The `txpool` API allows you to inspect the transaction pool. | No |
-| [`debug`](./debug.md) | The `debug` API provides several methods to inspect the Ethereum state, including Geth-style traces. | No |
-| [`trace`](./trace.md) | The `trace` API provides several methods to inspect the Ethereum state, including Parity-style traces. | No |
-| [`admin`](./admin.md) | The `admin` API allows you to configure your node. | **Yes** |
-| [`rpc`](./rpc.md) | The `rpc` API provides information about the RPC server and its modules. | No |
+| Namespace | Description | Sensitive |
+| -------------------- | ------------------------------------------------------------------------------------------------------ | --------- |
+| [`eth`](./eth) | The `eth` API allows you to interact with Ethereum. | Maybe |
+| [`web3`](./web3) | The `web3` API provides utility functions for the web3 client. | No |
+| [`net`](./net) | The `net` API provides access to network information of the node. | No |
+| [`txpool`](./txpool) | The `txpool` API allows you to inspect the transaction pool. | No |
+| [`debug`](./debug) | The `debug` API provides several methods to inspect the Ethereum state, including Geth-style traces. | No |
+| [`trace`](./trace) | The `trace` API provides several methods to inspect the Ethereum state, including Parity-style traces. | No |
+| [`admin`](./admin) | The `admin` API allows you to configure your node. | **Yes** |
+| [`rpc`](./rpc) | The `rpc` API provides information about the RPC server and its modules. | No |
Note that some APIs are sensitive, since they can be used to configure your node (`admin`), or access accounts stored on the node (`eth`).
Generally, it is advisable to not expose any JSONRPC namespace publicly, unless you know what you are doing.
-
## Transports
Reth supports HTTP, WebSockets and IPC.
@@ -90,10 +93,10 @@ Because WebSockets are bidirectional, nodes can push events to clients, which en
The configuration of the WebSocket server follows the same pattern as the HTTP server:
-- Enable it using `--ws`
-- Configure the server address by passing `--ws.addr` and `--ws.port` (default `8546`)
-- Configure cross-origin requests using `--ws.origins`
-- Enable APIs using `--ws.api`
+- Enable it using `--ws`
+- Configure the server address by passing `--ws.addr` and `--ws.port` (default `8546`)
+- Configure cross-origin requests using `--ws.origins`
+- Enable APIs using `--ws.api`
### IPC
diff --git a/book/jsonrpc/net.md b/book/vocs/docs/pages/jsonrpc/net.mdx
similarity index 82%
rename from book/jsonrpc/net.md
rename to book/vocs/docs/pages/jsonrpc/net.mdx
index ac40c75b2a..145b9c2767 100644
--- a/book/jsonrpc/net.md
+++ b/book/vocs/docs/pages/jsonrpc/net.mdx
@@ -1,3 +1,7 @@
+---
+description: net_ namespace for Ethereum nodes.
+---
+
# `net` Namespace
The `net` API provides information about the networking component of the node.
@@ -7,7 +11,7 @@ The `net` API provides information about the networking component of the node.
Returns a `bool` indicating whether or not the node is listening for network connections.
| Client | Method invocation |
-|--------|---------------------------------------------|
+| ------ | ------------------------------------------- |
| RPC | `{"method": "net_listening", "params": []}` |
### Example
@@ -22,7 +26,7 @@ Returns a `bool` indicating whether or not the node is listening for network con
Returns the number of peers connected to the node.
| Client | Method invocation |
-|--------|---------------------------------------------|
+| ------ | ------------------------------------------- |
| RPC | `{"method": "net_peerCount", "params": []}` |
### Example
@@ -37,7 +41,7 @@ Returns the number of peers connected to the node.
Returns the network ID (e.g. 1 for mainnet)
| Client | Method invocation |
-|--------|-------------------------------------------|
+| ------ | ----------------------------------------- |
| RPC | `{"method": "net_version", "params": []}` |
### Example
@@ -45,4 +49,4 @@ Returns the network ID (e.g. 1 for mainnet)
```js
// > {"jsonrpc":"2.0","id":1,"method":"net_version","params":[]}
{"jsonrpc":"2.0","id":1,"result":1}
-```
\ No newline at end of file
+```
diff --git a/book/jsonrpc/rpc.md b/book/vocs/docs/pages/jsonrpc/rpc.mdx
similarity index 91%
rename from book/jsonrpc/rpc.md
rename to book/vocs/docs/pages/jsonrpc/rpc.mdx
index 0a4739718b..c85babcfe3 100644
--- a/book/jsonrpc/rpc.md
+++ b/book/vocs/docs/pages/jsonrpc/rpc.mdx
@@ -1,3 +1,7 @@
+---
+description: rpc_ namespace for retrieving server information such as enabled namespaces
+---
+
# `rpc` Namespace
The `rpc` API provides methods to get information about the RPC server itself, such as the enabled namespaces.
@@ -7,7 +11,7 @@ The `rpc` API provides methods to get information about the RPC server itself, s
Lists the enabled RPC namespaces and the versions of each.
| Client | Method invocation |
-|--------|-------------------------------------------|
+| ------ | ----------------------------------------- |
| RPC | `{"method": "rpc_modules", "params": []}` |
### Example
diff --git a/book/jsonrpc/trace.md b/book/vocs/docs/pages/jsonrpc/trace.mdx
similarity index 86%
rename from book/jsonrpc/trace.md
rename to book/vocs/docs/pages/jsonrpc/trace.mdx
index ba0f2490b5..38157e4423 100644
--- a/book/jsonrpc/trace.md
+++ b/book/vocs/docs/pages/jsonrpc/trace.mdx
@@ -1,33 +1,37 @@
+---
+description: Trace API for inspecting Ethereum state and transactions.
+---
+
# `trace` Namespace
-
+{/* TODO: We should probably document the format of the traces themselves, OE does not do that */}
The `trace` API provides several methods to inspect the Ethereum state, including Parity-style traces.
-A similar module exists (with other debug functions) with Geth-style traces ([`debug`](./debug.md)).
+A similar module exists (with other debug functions) with Geth-style traces ([`debug`](./debug)).
The `trace` API gives deeper insight into transaction processing.
There are two types of methods in this API:
-- **Ad-hoc tracing APIs** for performing diagnostics on calls or transactions (historical or hypothetical).
-- **Transaction-trace filtering APIs** for getting full externality traces on any transaction executed by reth.
+- **Ad-hoc tracing APIs** for performing diagnostics on calls or transactions (historical or hypothetical).
+- **Transaction-trace filtering APIs** for getting full externality traces on any transaction executed by reth.
## Ad-hoc tracing APIs
Ad-hoc tracing APIs allow you to perform diagnostics on calls or transactions (historical or hypothetical), including:
-- Transaction traces (`trace`)
-- VM traces (`vmTrace`)
-- State difference traces (`stateDiff`)
+- Transaction traces (`trace`)
+- VM traces (`vmTrace`)
+- State difference traces (`stateDiff`)
The ad-hoc tracing APIs are:
-- [`trace_call`](#trace_call)
-- [`trace_callMany`](#trace_callmany)
-- [`trace_rawTransaction`](#trace_rawtransaction)
-- [`trace_replayBlockTransactions`](#trace_replayblocktransactions)
-- [`trace_replayTransaction`](#trace_replaytransaction)
+- [`trace_call`](#trace_call)
+- [`trace_callMany`](#trace_callmany)
+- [`trace_rawTransaction`](#trace_rawtransaction)
+- [`trace_replayBlockTransactions`](#trace_replayblocktransactions)
+- [`trace_replayTransaction`](#trace_replaytransaction)
## Transaction-trace filtering APIs
@@ -37,10 +41,10 @@ Information returned includes the execution of all contract creations, destructi
The transaction trace filtering APIs are:
-- [`trace_block`](#trace_block)
-- [`trace_filter`](#trace_filter)
-- [`trace_get`](#trace_get)
-- [`trace_transaction`](#trace_transaction)
+- [`trace_block`](#trace_block)
+- [`trace_filter`](#trace_filter)
+- [`trace_get`](#trace_get)
+- [`trace_transaction`](#trace_transaction)
## `trace_call`
@@ -53,7 +57,7 @@ The second parameter is an array of one or more trace types (`vmTrace`, `trace`,
The third and optional parameter is a block number, block hash, or a block tag (`latest`, `finalized`, `safe`, `earliest`, `pending`).
| Client | Method invocation |
-|--------|-----------------------------------------------------------|
+| ------ | --------------------------------------------------------- |
| RPC | `{"method": "trace_call", "params": [tx, type[], block]}` |
### Example
@@ -90,7 +94,7 @@ The first parameter is a list of call traces, where each call trace is of the fo
The second and optional parameter is a block number, block hash, or a block tag (`latest`, `finalized`, `safe`, `earliest`, `pending`).
| Client | Method invocation |
-|--------|--------------------------------------------------------|
+| ------ | ------------------------------------------------------ |
| RPC | `{"method": "trace_call", "params": [trace[], block]}` |
### Example
@@ -154,7 +158,7 @@ The second and optional parameter is a block number, block hash, or a block tag
Traces a call to `eth_sendRawTransaction` without making the call, returning the traces.
| Client | Method invocation |
-|--------|--------------------------------------------------------|
+| ------ | ------------------------------------------------------ |
| RPC | `{"method": "trace_call", "params": [raw_tx, type[]]}` |
### Example
@@ -187,7 +191,7 @@ Traces a call to `eth_sendRawTransaction` without making the call, returning the
Replays all transactions in a block returning the requested traces for each transaction.
| Client | Method invocation |
-|--------|--------------------------------------------------------------------------|
+| ------ | ------------------------------------------------------------------------ |
| RPC | `{"method": "trace_replayBlockTransactions", "params": [block, type[]]}` |
### Example
@@ -224,7 +228,7 @@ Replays all transactions in a block returning the requested traces for each tran
Replays a transaction, returning the traces.
| Client | Method invocation |
-|--------|----------------------------------------------------------------------|
+| ------ | -------------------------------------------------------------------- |
| RPC | `{"method": "trace_replayTransaction", "params": [tx_hash, type[]]}` |
### Example
@@ -257,7 +261,7 @@ Replays a transaction, returning the traces.
Returns traces created at given block.
| Client | Method invocation |
-|--------|------------------------------------------------|
+| ------ | ---------------------------------------------- |
| RPC | `{"method": "trace_block", "params": [block]}` |
### Example
@@ -300,17 +304,17 @@ Returns traces matching given filter.
Filters are objects with the following properties:
-- `fromBlock`: Returns traces from the given block (a number, hash, or a tag like `latest`).
-- `toBlock`: Returns traces to the given block.
-- `fromAddress`: Sent from these addresses
-- `toAddress`: Sent to these addresses
-- `after`: The offset trace number
-- `count`: The number of traces to display in a batch
+- `fromBlock`: Returns traces from the given block (a number, hash, or a tag like `latest`).
+- `toBlock`: Returns traces to the given block.
+- `fromAddress`: Sent from these addresses
+- `toAddress`: Sent to these addresses
+- `after`: The offset trace number
+- `count`: The number of traces to display in a batch
All properties are optional.
| Client | Method invocation |
-|--------|--------------------------------------------------|
+| ------ | ------------------------------------------------ |
| RPC | `{"method": "trace_filter", "params": [filter]}` |
### Example
@@ -352,7 +356,7 @@ All properties are optional.
Returns trace at given position.
| Client | Method invocation |
-|--------|----------------------------------------------------------|
+| ------ | -------------------------------------------------------- |
| RPC | `{"method": "trace_get", "params": [tx_hash,indices[]]}` |
### Example
@@ -393,7 +397,7 @@ Returns trace at given position.
Returns all traces of given transaction
| Client | Method invocation |
-|--------|--------------------------------------------------------|
+| ------ | ------------------------------------------------------ |
| RPC | `{"method": "trace_transaction", "params": [tx_hash]}` |
### Example
@@ -430,4 +434,4 @@ Returns all traces of given transaction
...
]
}
-```
\ No newline at end of file
+```
diff --git a/book/jsonrpc/txpool.md b/book/vocs/docs/pages/jsonrpc/txpool.mdx
similarity index 81%
rename from book/jsonrpc/txpool.md
rename to book/vocs/docs/pages/jsonrpc/txpool.mdx
index cb9e9c0e69..57f89c643c 100644
--- a/book/jsonrpc/txpool.md
+++ b/book/vocs/docs/pages/jsonrpc/txpool.mdx
@@ -1,3 +1,7 @@
+---
+description: API for inspecting the transaction pool.
+---
+
# `txpool` Namespace
The `txpool` API allows you to inspect the transaction pool.
@@ -9,7 +13,7 @@ Returns the details of all transactions currently pending for inclusion in the n
See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool-content) for more details
| Client | Method invocation |
-|--------|----------------------------------------------|
+| ------ | -------------------------------------------- |
| RPC | `{"method": "txpool_content", "params": []}` |
## `txpool_contentFrom`
@@ -19,7 +23,7 @@ Retrieves the transactions contained within the txpool, returning pending as wel
See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool-contentfrom) for more details
| Client | Method invocation |
-|--------|---------------------------------------------------------|
+| ------ | ------------------------------------------------------- |
| RPC | `{"method": "txpool_contentFrom", "params": [address]}` |
## `txpool_inspect`
@@ -29,7 +33,7 @@ Returns a summary of all the transactions currently pending for inclusion in the
See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool-inspect) for more details
| Client | Method invocation |
-|--------|----------------------------------------------|
+| ------ | -------------------------------------------- |
| RPC | `{"method": "txpool_inspect", "params": []}` |
## `txpool_status`
@@ -39,5 +43,5 @@ Returns the number of transactions currently pending for inclusion in the next b
See [here](https://geth.ethereum.org/docs/rpc/ns-txpool#txpool-status) for more details
| Client | Method invocation |
-|--------|---------------------------------------------|
-| RPC | `{"method": "txpool_status", "params": []}` |
\ No newline at end of file
+| ------ | ------------------------------------------- |
+| RPC | `{"method": "txpool_status", "params": []}` |
diff --git a/book/jsonrpc/web3.md b/book/vocs/docs/pages/jsonrpc/web3.mdx
similarity index 83%
rename from book/jsonrpc/web3.md
rename to book/vocs/docs/pages/jsonrpc/web3.mdx
index 8221e5c250..f1eb68bcaf 100644
--- a/book/jsonrpc/web3.md
+++ b/book/vocs/docs/pages/jsonrpc/web3.mdx
@@ -1,3 +1,7 @@
+---
+description: Web3 API utility methods for Ethereum clients.
+---
+
# `web3` Namespace
The `web3` API provides utility functions for the web3 client.
@@ -6,9 +10,8 @@ The `web3` API provides utility functions for the web3 client.
Get the web3 client version.
-
| Client | Method invocation |
-|--------|------------------------------------|
+| ------ | ---------------------------------- |
| RPC | `{"method": "web3_clientVersion"}` |
### Example
@@ -23,7 +26,7 @@ Get the web3 client version.
Get the Keccak-256 hash of the given data.
| Client | Method invocation |
-|--------|----------------------------------------------|
+| ------ | -------------------------------------------- |
| RPC | `{"method": "web3_sha3", "params": [bytes]}` |
### Example
@@ -36,4 +39,4 @@ Get the Keccak-256 hash of the given data.
```js
// > {"jsonrpc":"2.0","id":1,"method":"web3_sha3","params":["0x7275737420697320617765736f6d65"]}
{"jsonrpc":"2.0","id":1,"result":"0xe421b3428564a5c509ac118bad93a3b84485ec3f927e214b0c4c23076d4bc4e0"}
-```
\ No newline at end of file
+```
diff --git a/book/intro.md b/book/vocs/docs/pages/overview.mdx
similarity index 72%
rename from book/intro.md
rename to book/vocs/docs/pages/overview.mdx
index 6abd3da7ac..e41ca3ad83 100644
--- a/book/intro.md
+++ b/book/vocs/docs/pages/overview.mdx
@@ -1,15 +1,14 @@
-# Reth Book
-_Documentation for Reth users and developers._
+---
+description: Reth - A secure, performant, and modular blockchain SDK and Ethereum node.
+---
-[![Telegram Chat][tg-badge]][tg-url]
+# Reth [Documentation for Reth users and developers]
Reth (short for Rust Ethereum, [pronunciation](https://twitter.com/kelvinfichter/status/1597653609411268608)) is an **Ethereum full node implementation that is focused on being user-friendly, highly modular, as well as being fast and efficient.**
Reth is production ready, and suitable for usage in mission-critical environments such as staking or high-uptime services. We also actively recommend professional node operators to switch to Reth in production for performance and cost reasons in use cases where high performance with great margins is required such as RPC, MEV, Indexing, Simulations, and P2P activities.
-
-
-
+
## What is this about?
@@ -60,8 +59,9 @@ We envision that Reth will be configurable enough for the tradeoffs that each te
## Who is this for?
Reth is a new Ethereum full node that allows users to sync and interact with the entire blockchain, including its historical state if in archive mode.
-- Full node: It can be used as a full node, which stores and processes the entire blockchain, validates blocks and transactions, and participates in the consensus process.
-- Archive node: It can also be used as an archive node, which stores the entire history of the blockchain and is useful for applications that need access to historical data.
+
+- Full node: It can be used as a full node, which stores and processes the entire blockchain, validates blocks and transactions, and participates in the consensus process.
+- Archive node: It can also be used as an archive node, which stores the entire history of the blockchain and is useful for applications that need access to historical data.
As a data engineer/analyst, or as a data indexer, you'll want to use Archive mode. For all other use cases where historical access is not needed, you can use Full mode.
@@ -79,21 +79,35 @@ We have completed an audit of the [Reth v1.0.0-rc.2](https://github.com/paradigm
[Revm](https://github.com/bluealloy/revm) (the EVM used in Reth) underwent an audit with [Guido Vranken](https://twitter.com/guidovranken) (#1 [Ethereum Bug Bounty](https://ethereum.org/en/bug-bounty)). We will publish the results soon.
+## Reth Metrics
+
+We operate several public Reth nodes across different networks. You can monitor their performance metrics through our public Grafana dashboards:
+
+| Name | Chain ID | Type | Grafana |
+| -------- | -------- | ------- | ---------------------------------------------------------------------------------- |
+| Ethereum | 1 | Full | [View](https://reth.ithaca.xyz/public-dashboards/23ceb3bd26594e349aaaf2bcf336d0d4) |
+| Ethereum | 1 | Archive | [View](https://reth.ithaca.xyz/public-dashboards/a49fa110dc9149298fa6763d5c89c8c0) |
+| Base | 8453 | Archive | [View](https://reth.ithaca.xyz/public-dashboards/b3e9f2e668ee4b86960b7fac691b5e64) |
+| OP | 10 | Archive | [View](https://reth.ithaca.xyz/public-dashboards/aa32f6c39a664f9aa371399b59622527) |
+
+:::tip
+Want to set up metrics for your own Reth node? Check out our [monitoring guide](/run/monitoring) to learn how to configure Prometheus metrics and build your own dashboards.
+:::
## Sections
Here are some useful sections to jump to:
-- Install Reth by following the [guide](./installation/installation.md).
-- Sync your node on any [official network](./run/run-a-node.md).
-- View [statistics and metrics](./run/observability.md) about your node.
-- Query the [JSON-RPC](./jsonrpc/intro.md) using Foundry's `cast` or `curl`.
-- Set up your [development environment and contribute](./developers/contribute.md)!
+- Install Reth by following the [guide](/installation/overview).
+- Sync your node on any [official network](/run/overview).
+- View [statistics and metrics](/run/monitoring) about your node.
+- Query the [JSON-RPC](/jsonrpc/intro) using Foundry's `cast` or `curl`.
+- Set up your [development environment and contribute](/introduction/contributing)!
-> π **About this book**
->
-> The book is continuously rendered [here](https://paradigmxyz.github.io/reth/)!
-> You can contribute to this book on [GitHub][gh-book].
+:::note
+The book is continuously rendered [here](https://reth.rs)!
+You can contribute to the docs on [GitHub][gh-book].
+:::
[tg-badge]: https://img.shields.io/endpoint?color=neon&logo=telegram&label=chat&url=https%3A%2F%2Ftg.sumanjay.workers.dev%2Fparadigm%5Freth
[tg-url]: https://t.me/paradigm_reth
diff --git a/book/run/config.md b/book/vocs/docs/pages/run/configuration.mdx
similarity index 90%
rename from book/run/config.md
rename to book/vocs/docs/pages/run/configuration.mdx
index bb28d855de..8f34cfc691 100644
--- a/book/run/config.md
+++ b/book/vocs/docs/pages/run/configuration.mdx
@@ -1,32 +1,36 @@
+---
+description: How to configure Reth using reth.toml and its options.
+---
+
# Configuring Reth
Reth places a configuration file named `reth.toml` in the data directory specified when starting the node. It is written in the [TOML] format.
The default data directory is platform dependent:
-- Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/`
-- Windows: `{FOLDERID_RoamingAppData}/reth/`
-- macOS: `$HOME/Library/Application Support/reth/`
+- Linux: `$XDG_DATA_HOME/reth/` or `$HOME/.local/share/reth/`
+- Windows: `{FOLDERID_RoamingAppData}/reth/`
+- macOS: `$HOME/Library/Application Support/reth/`
The configuration file contains the following sections:
-- [`[stages]`](#the-stages-section) -- Configuration of the individual sync stages
- - [`headers`](#headers)
- - [`bodies`](#bodies)
- - [`sender_recovery`](#sender_recovery)
- - [`execution`](#execution)
- - [`account_hashing`](#account_hashing)
- - [`storage_hashing`](#storage_hashing)
- - [`merkle`](#merkle)
- - [`transaction_lookup`](#transaction_lookup)
- - [`index_account_history`](#index_account_history)
- - [`index_storage_history`](#index_storage_history)
-- [`[peers]`](#the-peers-section)
- - [`connection_info`](#connection_info)
- - [`reputation_weights`](#reputation_weights)
- - [`backoff_durations`](#backoff_durations)
-- [`[sessions]`](#the-sessions-section)
-- [`[prune]`](#the-prune-section)
+- [`[stages]`](#the-stages-section) -- Configuration of the individual sync stages
+ - [`headers`](#headers)
+ - [`bodies`](#bodies)
+ - [`sender_recovery`](#sender_recovery)
+ - [`execution`](#execution)
+ - [`account_hashing`](#account_hashing)
+ - [`storage_hashing`](#storage_hashing)
+ - [`merkle`](#merkle)
+ - [`transaction_lookup`](#transaction_lookup)
+ - [`index_account_history`](#index_account_history)
+ - [`index_storage_history`](#index_storage_history)
+- [`[peers]`](#the-peers-section)
+ - [`connection_info`](#connection_info)
+ - [`reputation_weights`](#reputation_weights)
+ - [`backoff_durations`](#backoff_durations)
+- [`[sessions]`](#the-sessions-section)
+- [`[prune]`](#the-prune-section)
## The `[stages]` section
@@ -305,8 +309,8 @@ The sessions section configures the internal behavior of a single peer-to-peer c
You can configure the session buffer sizes, which limits the amount of pending events (incoming messages) and commands (outgoing messages) each session can hold before it will start to ignore messages.
> **Note**
->
-> These buffers are allocated *per peer*, which means that increasing the buffer sizes can have large impact on memory consumption.
+>
+> These buffers are allocated _per peer_, which means that increasing the buffer sizes can have large impact on memory consumption.
```toml
[sessions]
@@ -342,10 +346,11 @@ No pruning, run as archive node.
### Example of the custom pruning configuration
This configuration will:
-- Run pruning every 5 blocks
-- Continuously prune all transaction senders, account history and storage history before the block `head-100_000`,
-i.e. keep the data for the last `100_000` blocks
-- Prune all receipts before the block 1920000, i.e. keep receipts from the block 1920000
+
+- Run pruning every 5 blocks
+- Continuously prune all transaction senders, account history and storage history before the block `head-100_000`,
+ i.e. keep the data for the last `100_000` blocks
+- Prune all receipts before the block 1920000, i.e. keep receipts from the block 1920000
```toml
[prune]
@@ -370,6 +375,7 @@ storage_history = { distance = 100_000 } # Prune all historical storage states b
```
We can also prune receipts more granular, using the logs filtering:
+
```toml
# Receipts pruning configuration by retaining only those receipts that contain logs emitted
# by the specified addresses, discarding all others. This setting is overridden by `receipts`.
diff --git a/book/run/mainnet.md b/book/vocs/docs/pages/run/ethereum.mdx
similarity index 73%
rename from book/run/mainnet.md
rename to book/vocs/docs/pages/run/ethereum.mdx
index c4908971f6..7e0d01daa1 100644
--- a/book/run/mainnet.md
+++ b/book/vocs/docs/pages/run/ethereum.mdx
@@ -1,3 +1,7 @@
+---
+description: How to run Reth on Ethereum mainnet and testnets.
+---
+
# Running Reth on Ethereum Mainnet or testnets
Reth is an [_execution client_](https://ethereum.org/en/developers/docs/nodes-and-clients/#execution-clients). After Ethereum's transition to Proof of Stake (aka the Merge) it became required to run a [_consensus client_](https://ethereum.org/en/developers/docs/nodes-and-clients/#consensus-clients) along your execution client in order to sync into any "post-Merge" network. This is because the Ethereum execution layer now outsources consensus to a separate component, known as the consensus client.
@@ -6,12 +10,12 @@ Consensus clients decide what blocks are part of the chain, while execution clie
By running both an execution client like Reth and a consensus client, such as Lighthouse π¦ (which we will assume for this guide), you can effectively contribute to the Ethereum network and participate in the consensus process, even if you don't intend to run validators.
-| Client | Role |
-|-------------|--------------------------------------------------|
-| Execution | Validates transactions and blocks |
-| | (checks their validity and global state) |
-| Consensus | Determines which blocks are part of the chain |
-| | (makes consensus decisions) |
+| Client | Role |
+| --------- | --------------------------------------------- |
+| Execution | Validates transactions and blocks |
+| | (checks their validity and global state) |
+| Consensus | Determines which blocks are part of the chain |
+| | (makes consensus decisions) |
## Running the Reth Node
@@ -24,15 +28,22 @@ reth node
```
And to start the full node, run:
+
```bash
reth node --full
```
-On differences between archive and full nodes, see [Pruning & Full Node](./pruning.md#basic-concepts) section.
+On differences between archive and full nodes, see [Pruning & Full Node](/run/faq/pruning#basic-concepts) section.
-> Note that these commands will not open any HTTP/WS ports by default. You can change this by adding the `--http`, `--ws` flags, respectively and using the `--http.api` and `--ws.api` flags to enable various [JSON-RPC APIs](../jsonrpc/intro.md). For more commands, see the [`reth node` CLI reference](../cli/reth/node.md).
+:::note
+These commands will not open any HTTP/WS ports by default.
-The EL <> CL communication happens over the [Engine API](https://github.com/ethereum/execution-apis/blob/main/src/engine/common.md), which is by default exposed at `http://localhost:8551`. The connection is authenticated over JWT using a JWT secret which is auto-generated by Reth and placed in a file called `jwt.hex` in the data directory, which on Linux by default is `$HOME/.local/share/reth/` (`/Users//Library/Application Support/reth/mainnet/jwt.hex` in Mac).
+You can change this by adding the `--http`, `--ws` flags, respectively and using the `--http.api` and `--ws.api` flags to enable various [JSON-RPC APIs](/jsonrpc/intro).
+
+For more commands, see the [`reth node` CLI reference](/cli/cli).
+:::
+
+The EL \<> CL communication happens over the [Engine API](https://github.com/ethereum/execution-apis/blob/main/src/engine/common), which is by default exposed at `http://localhost:8551`. The connection is authenticated over JWT using a JWT secret which is auto-generated by Reth and placed in a file called `jwt.hex` in the data directory, which on Linux by default is `$HOME/.local/share/reth/` (`/Users//Library/Application Support/reth/mainnet/jwt.hex` in Mac).
You can override this path using the `--authrpc.jwtsecret` option. You MUST use the same JWT secret in BOTH Reth and the chosen Consensus Layer. If you want to override the address or port, you can use the `--authrpc.addr` and `--authrpc.port` options, respectively.
@@ -62,24 +73,24 @@ lighthouse bn \
If you don't intend on running validators on your node you can add:
-``` bash
+```bash
--disable-deposit-contract-sync
```
-The `--checkpoint-sync-url` argument value can be replaced with any checkpoint sync endpoint from a [community maintained list](https://eth-clients.github.io/checkpoint-sync-endpoints/#mainnet).
+The `--checkpoint-sync-url` argument value can be replaced with any checkpoint sync endpoint from a [community maintained list](https://eth-clients.github.io/checkpoint-sync-endpoints/#mainnet).
Your Reth node should start receiving "fork choice updated" messages, and begin syncing the chain.
## Verify the chain is growing
You can easily verify that by inspecting the logs, and seeing that headers are arriving in Reth. Sit back now and wait for the stages to run!
-In the meantime, consider setting up [observability](./observability.md) to monitor your node's health or [test the JSON RPC API](../jsonrpc/intro.md).
+In the meantime, consider setting up [observability](/run/monitoring) to monitor your node's health or [test the JSON RPC API](../jsonrpc/intro).
-
+{/* TODO: Add more logs to help node operators debug any weird CL to EL messages! */}
-[installation]: ./../installation/installation.md
+[installation]: ./../installation/installation
[docs]: https://github.com/paradigmxyz/reth/tree/main/docs
-[metrics]: https://github.com/paradigmxyz/reth/blob/main/docs/design/metrics.md#current-metrics
+[metrics]: https://github.com/paradigmxyz/reth/blob/main/docs/design/metrics#current-metrics
## Running without a Consensus Layer
@@ -90,7 +101,8 @@ We provide a method for running Reth without a Consensus Layer via the `--debug.
You can use `--debug.etherscan` to run Reth with a fake consensus client that advances the chain using recent blocks on Etherscan. This requires an Etherscan API key (set via `ETHERSCAN_API_KEY` environment variable). Optionally, specify a custom API URL with `--debug.etherscan `.
Example:
+
```bash
export ETHERSCAN_API_KEY=your_api_key_here
reth node --debug.etherscan
-```
\ No newline at end of file
+```
diff --git a/book/vocs/docs/pages/run/ethereum/snapshots.mdx b/book/vocs/docs/pages/run/ethereum/snapshots.mdx
new file mode 100644
index 0000000000..116d4359e5
--- /dev/null
+++ b/book/vocs/docs/pages/run/ethereum/snapshots.mdx
@@ -0,0 +1 @@
+# Snapshots
\ No newline at end of file
diff --git a/book/vocs/docs/pages/run/faq.mdx b/book/vocs/docs/pages/run/faq.mdx
new file mode 100644
index 0000000000..bdd0a9f68e
--- /dev/null
+++ b/book/vocs/docs/pages/run/faq.mdx
@@ -0,0 +1,11 @@
+# FAQ
+
+1. [Transaction Types](/run/faq/transactions) - Learn about the transaction types supported by Reth.
+
+2. [Pruning & Full Node](/run/faq/pruning) - Understand the differences between archive nodes, full nodes, and pruned nodes. Learn how to configure pruning options and what RPC methods are available for each node type.
+
+3. [Ports](/run/faq/ports) - Information about the network ports used by Reth for P2P communication, JSON-RPC APIs, and the Engine API for consensus layer communication.
+
+4. [Profiling](/run/faq/profiling) - Performance profiling techniques and tools for analyzing Reth node performance, including CPU profiling, memory analysis, and bottleneck identification.
+
+5. [Sync OP Mainnet](/run/faq/sync-op-mainnet) - Detailed guide for syncing a Reth node with OP Mainnet, including specific configuration requirements and considerations for the Optimism ecosystem.
diff --git a/book/vocs/docs/pages/run/faq/ports.mdx b/book/vocs/docs/pages/run/faq/ports.mdx
new file mode 100644
index 0000000000..f9a3ba9950
--- /dev/null
+++ b/book/vocs/docs/pages/run/faq/ports.mdx
@@ -0,0 +1,42 @@
+---
+description: Ports used by Reth.
+---
+
+# Ports
+
+This section provides essential information about the ports used by the system, their primary purposes, and recommendations for exposure settings.
+
+## Peering Ports
+
+- **Port:** `30303`
+- **Protocol:** TCP and UDP
+- **Purpose:** Peering with other nodes for synchronization of blockchain data. Nodes communicate through this port to maintain network consensus and share updated information.
+- **Exposure Recommendation:** This port should be exposed to enable seamless interaction and synchronization with other nodes in the network.
+
+## Metrics Port
+
+- **Port:** `9001`
+- **Protocol:** TCP
+- **Purpose:** This port is designated for serving metrics related to the system's performance and operation. It allows internal monitoring and data collection for analysis.
+- **Exposure Recommendation:** By default, this port should not be exposed to the public. It is intended for internal monitoring and analysis purposes.
+
+## HTTP RPC Port
+
+- **Port:** `8545`
+- **Protocol:** TCP
+- **Purpose:** Port 8545 provides an HTTP-based Remote Procedure Call (RPC) interface. It enables external applications to interact with the blockchain by sending requests over HTTP.
+- **Exposure Recommendation:** Similar to the metrics port, exposing this port to the public is not recommended by default due to security considerations.
+
+## WS RPC Port
+
+- **Port:** `8546`
+- **Protocol:** TCP
+- **Purpose:** Port 8546 offers a WebSocket-based Remote Procedure Call (RPC) interface. It allows real-time communication between external applications and the blockchain.
+- **Exposure Recommendation:** As with the HTTP RPC port, the WS RPC port should not be exposed by default for security reasons.
+
+## Engine API Port
+
+- **Port:** `8551`
+- **Protocol:** TCP
+- **Purpose:** Port 8551 facilitates communication between specific components, such as "reth" and "CL" (assuming their definitions are understood within the context of the system). It enables essential internal processes.
+- **Exposure Recommendation:** This port is not meant to be exposed to the public by default. It should be reserved for internal communication between vital components of the system.
diff --git a/book/developers/profiling.md b/book/vocs/docs/pages/run/faq/profiling.mdx
similarity index 84%
rename from book/developers/profiling.md
rename to book/vocs/docs/pages/run/faq/profiling.mdx
index fdae94e2d4..123808ad2d 100644
--- a/book/developers/profiling.md
+++ b/book/vocs/docs/pages/run/faq/profiling.mdx
@@ -1,11 +1,8 @@
-# Profiling reth
+---
+description: Profiling and debugging memory usage in Reth.
+---
-#### Table of Contents
- - [Memory profiling](#memory-profiling)
- - [Jemalloc](#jemalloc)
- - [Monitoring memory usage](#monitoring-memory-usage)
- - [Limiting process memory](#limiting-process-memory)
- - [Understanding allocation with jeprof](#understanding-allocation-with-jeprof)
+# Profiling Reth
## Memory profiling
@@ -16,10 +13,11 @@ Reth is also a complex program, with many moving pieces, and it can be difficult
Understanding how to profile memory usage is an extremely valuable skill when faced with this type of problem, and can quickly help shed light on the root cause of a memory leak.
In this tutorial, we will be reviewing:
- * How to monitor reth's memory usage,
- * How to emulate a low-memory environment to lab-reproduce OOM crashes,
- * How to enable `jemalloc` and its built-in memory profiling, and
- * How to use `jeprof` to interpret heap profiles and identify potential root causes for a memory leak.
+
+- How to monitor reth's memory usage,
+- How to emulate a low-memory environment to lab-reproduce OOM crashes,
+- How to enable `jemalloc` and its built-in memory profiling, and
+- How to use `jeprof` to interpret heap profiles and identify potential root causes for a memory leak.
### Jemalloc
@@ -27,21 +25,24 @@ In this tutorial, we will be reviewing:
We've seen significant performance benefits in reth when using jemalloc, but will be primarily focusing on its profiling capabilities.
Jemalloc also provides tools for analyzing and visualizing its allocation profiles it generates, notably `jeprof`.
-
#### Enabling jemalloc in reth
+
Reth includes a `jemalloc` feature to explicitly use jemalloc instead of the system allocator:
+
```
cargo build --features jemalloc
```
While the `jemalloc` feature does enable jemalloc, reth has an additional feature, `profiling`, that must be used to enable heap profiling. This feature implicitly enables the `jemalloc`
feature as well:
+
```
cargo build --features jemalloc-prof
```
When performing a longer-running or performance-sensitive task with reth, such as a sync test or load benchmark, it's usually recommended to use the `maxperf` profile. However, the `maxperf`
profile does not enable debug symbols, which are required for tools like `perf` and `jemalloc` to produce results that a human can interpret. Reth includes a performance profile with debug symbols called `profiling`. To compile reth with debug symbols, jemalloc, profiling, and a performance profile:
+
```
cargo build --features jemalloc-prof --profile profiling
@@ -51,19 +52,39 @@ RUSTFLAGS="-C target-cpu=native" cargo build --features jemalloc-prof --profile
### Monitoring memory usage
-Reth's dashboard has a few metrics that are important when monitoring memory usage. The **Jemalloc memory** graph shows reth's memory usage. The *allocated* label shows the memory used by the reth process which cannot be reclaimed unless reth frees that memory. This metric exceeding the available system memory would cause reth to be killed by the OOM killer.
-
+Reth's dashboard has a few metrics that are important when monitoring memory usage. The **Jemalloc memory** graph shows reth's memory usage. The _allocated_ label shows the memory used by the reth process which cannot be reclaimed unless reth frees that memory. This metric exceeding the available system memory would cause reth to be killed by the OOM killer.
+
+
Some of reth's internal components also have metrics for the memory usage of certain data structures, usually data structures that are likely to contain many elements or may consume a lot of memory at peak load.
**The bodies downloader buffer**:
-
+
+
**The blockchain tree block buffer**:
-
+
+
**The transaction pool subpools**:
-
+
+
One of these metrics growing beyond, 2GB for example, is likely a bug and could lead to an OOM on a low memory machine. It isn't likely for that to happen frequently, so in the best case these metrics can be used to
rule out these components from having a leak, if an OOM is occurring.
@@ -81,28 +102,37 @@ See the [canonical documentation for cgroups](https://git.kernel.org/pub/scm/lin
In order to use cgroups to limit process memory, sometimes it must be explicitly enabled as a kernel parameter. For example, the following line is sometimes necessary to enable cgroup memory limits on
Ubuntu machines that use GRUB:
+
```
GRUB_CMDLINE_LINUX_DEFAULT="cgroup_enable=memory"
```
+
Then, create a named cgroup:
+
```
sudo cgcreate -t $USER:$USER -a $USER:$USER -g memory:rethMemory
```
+
The memory limit for the named cgroup can be set in `sys/fs/cgroup/memory`. This for example sets an 8 gigabyte memory limit:
+
```
echo 8G > /sys/fs/cgroup/memory/rethMemory/memory.limit_in_bytes
```
+
If the intention of setting up the cgroup is to strictly limit memory and simulate OOMs, a high amount of swap may prevent those OOMs from happening.
To check swap, use `free -m`:
+
```
ubuntu@bench-box:~/reth$ free -m
total used free shared buff/cache available
Mem: 257668 10695 218760 12 28213 244761
Swap: 8191 159 8032
```
+
If this is a problem, it may be worth either adjusting the system swappiness or disabling swap overall.
Finally, `cgexec` can be used to run reth under the cgroup:
+
```
cgexec -g memory:rethMemory reth node
```
@@ -111,11 +141,13 @@ cgexec -g memory:rethMemory reth node
When reth is built with the `jemalloc-prof` feature and debug symbols, the profiling still needs to be configured and enabled at runtime. This is done with the `_RJEM_MALLOC_CONF` environment variable. Take the following
command to launch reth with jemalloc profiling enabled:
+
```
_RJEM_MALLOC_CONF=prof:true,lg_prof_interval:32,lg_prof_sample:19 reth node
```
If reth is not built properly, you will see this when you try to run reth:
+
```
~/p/reth (dan/managing-memory)> _RJEM_MALLOC_CONF=prof:true,lg_prof_interval:32,lg_prof_sample:19 reth node
: Invalid conf pair: prof:true
diff --git a/book/run/pruning.md b/book/vocs/docs/pages/run/faq/pruning.mdx
similarity index 92%
rename from book/run/pruning.md
rename to book/vocs/docs/pages/run/faq/pruning.mdx
index 25d11b4e46..2a800b7bae 100644
--- a/book/run/pruning.md
+++ b/book/vocs/docs/pages/run/faq/pruning.mdx
@@ -1,8 +1,14 @@
+---
+description: Pruning and full node options in Reth.
+---
+
# Pruning & Full Node
-> Pruning and full node are new features of Reth,
-> and we will be happy to hear about your experience using them either
-> on [GitHub](https://github.com/paradigmxyz/reth/issues) or in the [Telegram group](https://t.me/paradigm_reth).
+:::info
+Pruning and full node are new features of Reth,
+and we will be happy to hear about your experience using them either
+on [GitHub](https://github.com/paradigmxyz/reth/issues) or in the [Telegram group](https://t.me/paradigm_reth).
+:::
By default, Reth runs as an archive node. Such nodes have all historical blocks and the state at each of these blocks
available for querying and tracing.
@@ -12,31 +18,31 @@ the steps for running Reth as a full node, what caveats to expect and how to con
## Basic concepts
-- Archive node β Reth node that has all historical data from genesis.
-- Pruned node β Reth node that has its historical data pruned partially or fully through
- a [custom configuration](./config.md#the-prune-section).
-- Full Node β Reth node that has the latest state and historical data for only the last 10064 blocks available
- for querying in the same way as an archive node.
+- Archive node β Reth node that has all historical data from genesis.
+- Pruned node β Reth node that has its historical data pruned partially or fully through
+ a [custom configuration](/run/configuration#the-prune-section).
+- Full Node β Reth node that has the latest state and historical data for only the last 10064 blocks available
+ for querying in the same way as an archive node.
-The node type that was chosen when first [running a node](./run-a-node.md) **cannot** be changed after
+The node type that was chosen when first [running a node](/run/overview) **cannot** be changed after
the initial sync. Turning Archive into Pruned, or Pruned into Full is not supported.
## Modes
### Archive Node
-Default mode, follow the steps from the previous chapter on [how to run on mainnet or official testnets](./mainnet.md).
+Default mode, follow the steps from the previous chapter on [how to run on mainnet or official testnets](/run/ethereum).
### Pruned Node
-To run Reth as a pruned node configured through a [custom configuration](./config.md#the-prune-section),
+To run Reth as a pruned node configured through a [custom configuration](/run/configuration#the-prune-section),
modify the `reth.toml` file and run Reth in the same way as archive node by following the steps from
-the previous chapter on [how to run on mainnet or official testnets](./mainnet.md).
+the previous chapter on [how to run on mainnet or official testnets](/run/ethereum).
### Full Node
To run Reth as a full node, follow the steps from the previous chapter on
-[how to run on mainnet or official testnets](./mainnet.md), and add a `--full` flag. For example:
+[how to run on mainnet or official testnets](/run/ethereum), and add a `--full` flag. For example:
```bash
reth node \
@@ -95,21 +101,21 @@ storage_history = { distance = 10_064 }
Meaning, it prunes:
-- Account History and Storage History up to the last 10064 blocks
-- All of Sender Recovery data. The caveat is that it's pruned gradually after the initial sync
- is completed, so the disk space is reclaimed slowly.
-- Receipts up to the last 10064 blocks, preserving all receipts with the logs from Beacon Deposit Contract
+- Account History and Storage History up to the last 10064 blocks
+- All of Sender Recovery data. The caveat is that it's pruned gradually after the initial sync
+ is completed, so the disk space is reclaimed slowly.
+- Receipts up to the last 10064 blocks, preserving all receipts with the logs from Beacon Deposit Contract
## RPC support
-As it was mentioned in the [pruning configuration chapter](./config.md#the-prune-section), there are several segments which can be pruned
+As it was mentioned in the [pruning configuration chapter](/run/configuration#the-prune-section), there are several segments which can be pruned
independently of each other:
-- Sender Recovery
-- Transaction Lookup
-- Receipts
-- Account History
-- Storage History
+- Sender Recovery
+- Transaction Lookup
+- Receipts
+- Account History
+- Storage History
Pruning of each of these segments disables different RPC methods, because the historical data or lookup indexes
become unavailable.
@@ -215,8 +221,8 @@ The following tables describe RPC methods available in the full node.
The following tables describe the requirements for prune segments, per RPC method:
-- β β if the segment is pruned, the RPC method still works
-- β - if the segment is pruned, the RPC method doesn't work anymore
+- β β if the segment is pruned, the RPC method still works
+- β - if the segment is pruned, the RPC method doesn't work anymore
#### `debug` namespace
diff --git a/book/run/sync-op-mainnet.md b/book/vocs/docs/pages/run/faq/sync-op-mainnet.mdx
similarity index 70%
rename from book/run/sync-op-mainnet.md
rename to book/vocs/docs/pages/run/faq/sync-op-mainnet.mdx
index 0e2090acbc..e895331288 100644
--- a/book/run/sync-op-mainnet.md
+++ b/book/vocs/docs/pages/run/faq/sync-op-mainnet.mdx
@@ -1,13 +1,17 @@
+---
+description: Syncing Reth with OP Mainnet and Bedrock state.
+---
+
# Sync OP Mainnet
To sync OP mainnet, Bedrock state needs to be imported as a starting point. There are currently two ways:
-* Minimal bootstrap **(recommended)**: only state snapshot at Bedrock block is imported without any OVM historical data.
-* Full bootstrap **(not recommended)**: state, blocks and receipts are imported. *Not recommended for now: [storage consistency issue](https://github.com/paradigmxyz/reth/pull/11099) tldr: sudden crash may break the node
+- Minimal bootstrap **(recommended)**: only state snapshot at Bedrock block is imported without any OVM historical data.
+- Full bootstrap **(not recommended)**: state, blocks and receipts are imported. \*Not recommended for now: [storage consistency issue](https://github.com/paradigmxyz/reth/pull/11099) tldr: sudden crash may break the node
## Minimal bootstrap (recommended)
-**The state snapshot at Bedrock block is required.** It can be exported from [op-geth](https://github.com/testinprod-io/op-erigon/blob/pcw109550/bedrock-db-migration/bedrock-migration.md#export-state) (**.jsonl**) or downloaded directly from [here](https://mega.nz/file/GdZ1xbAT#a9cBv3AqzsTGXYgX7nZc_3fl--tcBmOAIwIA5ND6kwc).
+**The state snapshot at Bedrock block is required.** It can be exported from [op-geth](https://github.com/testinprod-io/op-erigon/blob/pcw109550/bedrock-db-migration/bedrock-migration#export-state) (**.jsonl**) or downloaded directly from [here](https://mega.nz/file/GdZ1xbAT#a9cBv3AqzsTGXYgX7nZc_3fl--tcBmOAIwIA5ND6kwc).
Import the state snapshot
@@ -21,12 +25,11 @@ Sync the node to a recent finalized block (e.g. 125200000) to catch up close to
$ op-reth node --chain optimism --datadir op-mainnet --debug.tip 0x098f87b75c8b861c775984f9d5dbe7b70cbbbc30fc15adb03a5044de0144f2d0 # block #125200000
```
-
## Full bootstrap (not recommended)
**Not recommended for now**: [storage consistency issue](https://github.com/paradigmxyz/reth/pull/11099) tldr: sudden crash may break the node.
-### Import state
+### Import state
To sync OP mainnet, the Bedrock datadir needs to be imported to use as starting point.
Blocks lower than the OP mainnet Bedrock fork, are built on the OVM and cannot be executed on the EVM.
@@ -35,15 +38,15 @@ execution in reth's sync pipeline.
Importing OP mainnet Bedrock datadir requires exported data:
-- Blocks [and receipts] below Bedrock
-- State snapshot at first Bedrock block
+- Blocks [and receipts] below Bedrock
+- State snapshot at first Bedrock block
### Manual Export Steps
-The `op-geth` Bedrock datadir can be downloaded from .
+The `op-geth` Bedrock datadir can be downloaded from [https://datadirs.optimism.io](https://datadirs.optimism.io).
To export the OVM chain from `op-geth`, clone the `testinprod-io/op-geth` repo and checkout
-. Commands to export blocks, receipts and state dump can be
+[testinprod-io/op-geth#1](https://github.com/testinprod-io/op-geth/pull/1). Commands to export blocks, receipts and state dump can be
found in `op-geth/migrate.sh`.
### Manual Import Steps
@@ -64,7 +67,7 @@ This step is optional. To run a full node, skip this step. If however receipts a
corresponding transactions must already be imported (see [step 1](#1-import-blocks)).
Imports a `.rlp` file of receipts, that has been exported with command specified in
- (command for exporting receipts uses custom RLP-encoding).
+[testinprod-io/op-geth#1](https://github.com/testinprod-io/op-geth/pull/1) (command for exporting receipts uses custom RLP-encoding).
Import of >100 million OVM receipts, from genesis to Bedrock, completes in 30 minutes.
@@ -86,7 +89,7 @@ $ op-reth init-state --chain optimism
## Sync from Bedrock to tip
Running the node with `--debug.tip `syncs the node without help from CL until a fixed tip. The
-block hash can be taken from the latest block on .
+block hash can be taken from the latest block on [https://optimistic.etherscan.io](https://optimistic.etherscan.io).
Use `op-node` to track the tip. Start `op-node` with `--syncmode=execution-layer` and `--l2.enginekind=reth`. If `op-node`'s RPC
connection to L1 is over localhost, `--l1.trustrpc` can be set to improve performance.
diff --git a/book/run/transactions.md b/book/vocs/docs/pages/run/faq/transactions.mdx
similarity index 97%
rename from book/run/transactions.md
rename to book/vocs/docs/pages/run/faq/transactions.mdx
index edb3a24d76..a4d19df38d 100644
--- a/book/run/transactions.md
+++ b/book/vocs/docs/pages/run/faq/transactions.mdx
@@ -1,3 +1,7 @@
+---
+description: Overview of Ethereum transaction types in Reth.
+---
+
# Transaction types
Over time, the Ethereum network has undergone various upgrades and improvements to enhance transaction efficiency, security, and user experience. Four significant transaction types that have evolved are:
diff --git a/book/run/troubleshooting.md b/book/vocs/docs/pages/run/faq/troubleshooting.mdx
similarity index 52%
rename from book/run/troubleshooting.md
rename to book/vocs/docs/pages/run/faq/troubleshooting.mdx
index 7b8ec6ba19..3dafa678ac 100644
--- a/book/run/troubleshooting.md
+++ b/book/vocs/docs/pages/run/faq/troubleshooting.mdx
@@ -1,102 +1,107 @@
+---
+description: Troubleshooting common Reth node and database issues.
+---
+
# Troubleshooting
This page tries to answer how to deal with the most popular issues.
-- [Troubleshooting](#troubleshooting)
- - [Database](#database)
- - [Docker](#docker)
- - [Error code 13](#error-code-13)
- - [Slow database inserts and updates](#slow-database-inserts-and-updates)
- - [Compact the database](#compact-the-database)
- - [Re-sync from scratch](#re-sync-from-scratch)
- - [Database write error](#database-write-error)
- - [Concurrent database access error (using containers/Docker)](#concurrent-database-access-error-using-containersdocker)
- - [Hardware Performance Testing](#hardware-performance-testing)
- - [Disk Speed Testing with IOzone](#disk-speed-testing-with-iozone)
-
+- [Troubleshooting](#troubleshooting)
+ - [Database](#database)
+ - [Docker](#docker)
+ - [Error code 13](#error-code-13)
+ - [Slow database inserts and updates](#slow-database-inserts-and-updates)
+ - [Compact the database](#compact-the-database)
+ - [Re-sync from scratch](#re-sync-from-scratch)
+ - [Database write error](#database-write-error)
+ - [Concurrent database access error (using containers/Docker)](#concurrent-database-access-error-using-containersdocker)
+ - [Hardware Performance Testing](#hardware-performance-testing)
+ - [Disk Speed Testing with IOzone](#disk-speed-testing-with-iozone)
## Database
-### Docker
+### Docker
Externally accessing a `datadir` inside a named docker volume will usually come with folder/file ownership/permissions issues.
**It is not recommended** to use the path to the named volume as it will trigger an error code 13. `RETH_DB_PATH: /var/lib/docker/volumes/named_volume/_data/eth/db cargo r --examples db-access --path ` is **DISCOURAGED** and a mounted volume with the right permissions should be used instead.
-### Error code 13
+### Error code 13
`the environment opened in read-only code: 13`
Externally accessing a database in a read-only folder is not supported, **UNLESS** there's no `mdbx.lck` present, and it's called with `exclusive` on calling `open_db_read_only`. Meaning that there's no node syncing concurrently.
-If the error persists, ensure that you have the right `rx` permissions on the `datadir` **and its parent** folders. Eg. the following command should succeed:
+If the error persists, ensure that you have the right `rx` permissions on the `datadir` **and its parent** folders. Eg. the following command should succeed:
```bash,ignore
stat /full/path/datadir
```
-
### Slow database inserts and updates
If you're:
+
1. Running behind the tip
-2. Have slow canonical commit time according to the `Canonical Commit Latency Time` chart on [Grafana dashboard](./observability.md#prometheus--grafana) (more than 2-3 seconds)
-3. Seeing warnings in your logs such as
- ```console
- 2023-11-08T15:17:24.789731Z WARN providers::db: Transaction insertion took too long block_number=18528075 tx_num=2150227643 hash=0xb7de1d6620efbdd3aa8547c47a0ff09a7fd3e48ba3fd2c53ce94c6683ed66e7c elapsed=6.793759034s
- ```
+2. Have slow canonical commit time according to the `Canonical Commit Latency Time` chart on [Grafana dashboard](/run/monitoring#prometheus--grafana) (more than 2-3 seconds)
+3. Seeing warnings in your logs such as
+ ```console
+ 2023-11-08T15:17:24.789731Z WARN providers::db: Transaction insertion took too long block_number=18528075 tx_num=2150227643 hash=0xb7de1d6620efbdd3aa8547c47a0ff09a7fd3e48ba3fd2c53ce94c6683ed66e7c elapsed=6.793759034s
+ ```
then most likely you're experiencing issues with the [database freelist](https://github.com/paradigmxyz/reth/issues/5228).
-To confirm it, check if the values on the `Freelist` chart on [Grafana dashboard](./observability.md#prometheus--grafana)
+To confirm it, check if the values on the `Freelist` chart on [Grafana dashboard](/run/monitoring#prometheus--grafana)
is greater than 10M.
Currently, there are two main ways to fix this issue.
-
#### Compact the database
+
It will take around 5-6 hours and require **additional** disk space located on the same or different drive
-equal to the [freshly synced node](../installation/installation.md#hardware-requirements).
+equal to the [freshly synced node](/installation/overview#hardware-requirements).
1. Clone Reth
- ```bash
- git clone https://github.com/paradigmxyz/reth
- cd reth
- ```
+ ```bash
+ git clone https://github.com/paradigmxyz/reth
+ cd reth
+ ```
2. Build database debug tools
- ```bash
- make db-tools
- ```
+ ```bash
+ make db-tools
+ ```
3. Run compaction (this step will take 5-6 hours, depending on the I/O speed)
- ```bash
- ./db-tools/mdbx_copy -c $(reth db path) reth_compact.dat
- ```
+ ```bash
+ ./db-tools/mdbx_copy -c $(reth db path) reth_compact.dat
+ ```
4. Stop Reth
5. Backup original database
- ```bash
- mv $(reth db path)/mdbx.dat reth_old.dat
- ```
+ ```bash
+ mv $(reth db path)/mdbx.dat reth_old.dat
+ ```
6. Move compacted database in place of the original database
- ```bash
- mv reth_compact.dat $(reth db path)/mdbx.dat
- ```
+ ```bash
+ mv reth_compact.dat $(reth db path)/mdbx.dat
+ ```
7. Start Reth
8. Confirm that the values on the `Freelist` chart are near zero and the values on the `Canonical Commit Latency Time` chart
-is less than 1 second.
+ is less than 1 second.
9. Delete original database
- ```bash
- rm reth_old.dat
- ```
+ ```bash
+ rm reth_old.dat
+ ```
#### Re-sync from scratch
+
It will take the same time as initial sync.
1. Stop Reth
-2. Drop the database using [`reth db drop`](../cli/reth/db/drop.md)
+2. Drop the database using [`reth db drop`](#TODO)
3. Start reth
### Database write error
If you encounter an irrecoverable database-related errors, in most of the cases it's related to the RAM/NVMe/SSD you use. For example:
+
```console
Error: A stage encountered an irrecoverable error.
@@ -132,6 +137,7 @@ If you encounter an error while accessing the database from multiple processes a
```console
mdbx:0: panic: Assertion `osal_rdt_unlock() failed: err 1' failed.
```
+
or
```console
@@ -151,61 +157,71 @@ If your hardware performance is significantly lower than these reference numbers
### Disk Speed Testing with [IOzone](https://linux.die.net/man/1/iozone)
1. Test disk speed:
- ```bash
- iozone -e -t1 -i0 -i2 -r1k -s1g /tmp
- ```
- Reference numbers (on Latitude c3.large.x86):
- ```console
- Children see throughput for 1 initial writers = 907733.81 kB/sec
- Parent sees throughput for 1 initial writers = 907239.68 kB/sec
- Children see throughput for 1 rewriters = 1765222.62 kB/sec
- Parent sees throughput for 1 rewriters = 1763433.35 kB/sec
- Children see throughput for 1 random readers = 1557497.38 kB/sec
- Parent sees throughput for 1 random readers = 1554846.58 kB/sec
- Children see throughput for 1 random writers = 984428.69 kB/sec
- Parent sees throughput for 1 random writers = 983476.67 kB/sec
- ```
+ ```bash
+ iozone -e -t1 -i0 -i2 -r1k -s1g /tmp
+ ```
+
+ Reference numbers (on Latitude c3.large.x86):
+
+ ```console
+ Children see throughput for 1 initial writers = 907733.81 kB/sec
+ Parent sees throughput for 1 initial writers = 907239.68 kB/sec
+ Children see throughput for 1 rewriters = 1765222.62 kB/sec
+ Parent sees throughput for 1 rewriters = 1763433.35 kB/sec
+ Children see throughput for 1 random readers = 1557497.38 kB/sec
+ Parent sees throughput for 1 random readers = 1554846.58 kB/sec
+ Children see throughput for 1 random writers = 984428.69 kB/sec
+ Parent sees throughput for 1 random writers = 983476.67 kB/sec
+ ```
+
2. Test disk speed with memory-mapped files:
- ```bash
- iozone -B -G -e -t1 -i0 -i2 -r1k -s1g /tmp
- ```
- Reference numbers (on Latitude c3.large.x86):
- ```console
- Children see throughput for 1 initial writers = 56471.06 kB/sec
- Parent sees throughput for 1 initial writers = 56365.14 kB/sec
- Children see throughput for 1 rewriters = 241650.69 kB/sec
- Parent sees throughput for 1 rewriters = 239067.96 kB/sec
- Children see throughput for 1 random readers = 6833161.00 kB/sec
- Parent sees throughput for 1 random readers = 5597659.65 kB/sec
- Children see throughput for 1 random writers = 220248.53 kB/sec
- Parent sees throughput for 1 random writers = 219112.26 kB/sec
+ ```bash
+ iozone -B -G -e -t1 -i0 -i2 -r1k -s1g /tmp
+ ```
+
+ Reference numbers (on Latitude c3.large.x86):
+
+ ```console
+ Children see throughput for 1 initial writers = 56471.06 kB/sec
+ Parent sees throughput for 1 initial writers = 56365.14 kB/sec
+ Children see throughput for 1 rewriters = 241650.69 kB/sec
+ Parent sees throughput for 1 rewriters = 239067.96 kB/sec
+ Children see throughput for 1 random readers = 6833161.00 kB/sec
+ Parent sees throughput for 1 random readers = 5597659.65 kB/sec
+ Children see throughput for 1 random writers = 220248.53 kB/sec
+ Parent sees throughput for 1 random writers = 219112.26 kB/sec
```
### RAM Speed and Health Testing
1. Check RAM speed with [lshw](https://linux.die.net/man/1/lshw):
- ```bash
- sudo lshw -short -C memory
- ```
- Look for the frequency in the output. Reference output:
- ```console
- H/W path Device Class Description
- ================================================================
- /0/24/0 memory 64GiB DIMM DDR4 Synchronous Registered (Buffered) 3200 MHz (0.3 ns)
- /0/24/1 memory 64GiB DIMM DDR4 Synchronous Registered (Buffered) 3200 MHz (0.3 ns)
- ...
- ```
+ ```bash
+ sudo lshw -short -C memory
+ ```
+
+ Look for the frequency in the output. Reference output:
+
+ ```console
+ H/W path Device Class Description
+ ================================================================
+ /0/24/0 memory 64GiB DIMM DDR4 Synchronous Registered (Buffered) 3200 MHz (0.3 ns)
+ /0/24/1 memory 64GiB DIMM DDR4 Synchronous Registered (Buffered) 3200 MHz (0.3 ns)
+ ...
+ ```
2. Test RAM health with [memtester](https://linux.die.net/man/8/memtester):
- ```bash
- sudo memtester 10G
- ```
- This will take a while. You can test with a smaller amount first:
- ```bash
- sudo memtester 1G 1
- ```
- All checks should report "ok".
+ ```bash
+ sudo memtester 10G
+ ```
+
+ This will take a while. You can test with a smaller amount first:
+
+ ```bash
+ sudo memtester 1G 1
+ ```
+
+ All checks should report "ok".
diff --git a/book/run/observability.md b/book/vocs/docs/pages/run/monitoring.mdx
similarity index 92%
rename from book/run/observability.md
rename to book/vocs/docs/pages/run/monitoring.mdx
index aa4e9387a0..d09b795dc4 100644
--- a/book/run/observability.md
+++ b/book/vocs/docs/pages/run/monitoring.mdx
@@ -1,3 +1,7 @@
+---
+description: Reth observability and metrics with Prometheus and Grafana.
+---
+
# Observability with Prometheus & Grafana
Reth exposes a number of metrics which can be enabled by adding the `--metrics` flag:
@@ -41,6 +45,7 @@ brew install grafana
### Linux
#### Debian/Ubuntu
+
```bash
# Install Prometheus
# Visit https://prometheus.io/download/ for the latest version
@@ -58,6 +63,7 @@ sudo apt-get install grafana
```
#### Fedora/RHEL/CentOS
+
```bash
# Install Prometheus
# Visit https://prometheus.io/download/ for the latest version
@@ -74,16 +80,18 @@ sudo dnf install -y https://dl.grafana.com/oss/release/grafana-latest-1.x86_64.r
### Windows
#### Using Chocolatey
+
```powershell
choco install prometheus
choco install grafana
```
#### Manual installation
+
1. Download the latest Prometheus from [prometheus.io/download](https://prometheus.io/download/)
- - Select the Windows binary (.zip) for your architecture (typically windows-amd64)
+ - Select the Windows binary (.zip) for your architecture (typically windows-amd64)
2. Download the latest Grafana from [grafana.com/grafana/download](https://grafana.com/grafana/download)
- - Choose the Windows installer (.msi) or standalone version
+ - Choose the Windows installer (.msi) or standalone version
3. Extract Prometheus to a location of your choice (e.g., `C:\prometheus`)
4. Install Grafana by running the installer or extracting the standalone version
5. Configure Prometheus and Grafana to run as services if needed
@@ -95,7 +103,7 @@ Then, kick off the Prometheus and Grafana services:
brew services start prometheus
brew services start grafana
-# For Linux (systemd-based distributions)
+# For Linux (syst-based distributions)
sudo systemctl start prometheus
sudo systemctl start grafana-server
@@ -110,9 +118,9 @@ You can find an example config for the Prometheus service in the repo here: [`et
Depending on your installation you may find the config for your Prometheus service at:
-- OSX: `/opt/homebrew/etc/prometheus.yml`
-- Linuxbrew: `/home/linuxbrew/.linuxbrew/etc/prometheus.yml`
-- Others: `/usr/local/etc/prometheus/prometheus.yml`
+- OSX: `/opt/homebrew/etc/prometheus.yml`
+- Linuxbrew: `/home/linuxbrew/.linuxbrew/etc/prometheus.yml`
+- Others: `/usr/local/etc/prometheus/prometheus.yml`
Next, open up "localhost:3000" in your browser, which is the default URL for Grafana. Here, "admin" is the default for both the username and password.
@@ -130,7 +138,7 @@ In this runbook, we took you through starting the node, exposing different log l
This will all be very useful to you, whether you're simply running a home node and want to keep an eye on its performance, or if you're a contributor and want to see the effect that your (or others') changes have on Reth's operations.
-[installation]: ../installation/installation.md
+[installation]: ../installation/installation
[release-profile]: https://doc.rust-lang.org/cargo/reference/profiles.html#release
[docs]: https://github.com/paradigmxyz/reth/tree/main/docs
-[metrics]: https://github.com/paradigmxyz/reth/blob/main/docs/design/metrics.md#current-metrics
+[metrics]: https://github.com/paradigmxyz/reth/blob/main/docs/design/metrics#current-metrics
diff --git a/book/vocs/docs/pages/run/networks.mdx b/book/vocs/docs/pages/run/networks.mdx
new file mode 100644
index 0000000000..1bb6593b2e
--- /dev/null
+++ b/book/vocs/docs/pages/run/networks.mdx
@@ -0,0 +1 @@
+# Networks
diff --git a/book/run/optimism.md b/book/vocs/docs/pages/run/opstack.mdx
similarity index 95%
rename from book/run/optimism.md
rename to book/vocs/docs/pages/run/opstack.mdx
index a3f747dd1f..86e9ad7243 100644
--- a/book/run/optimism.md
+++ b/book/vocs/docs/pages/run/opstack.mdx
@@ -1,7 +1,12 @@
+---
+description: Running Reth on Optimism and OP Stack chains.
+---
+
# Running Reth on OP Stack chains
`reth` ships with the `optimism` feature flag in several crates, including the binary, enabling support for OP Stack chains out of the box. Optimism has a small diff from the [L1 EELS][l1-el-spec],
comprising of the following key changes:
+
1. A new transaction type, [`0x7E (Deposit)`][deposit-spec], which is used to deposit funds from L1 to L2.
1. Modifications to the `PayloadAttributes` that allow the [sequencer][sequencer] to submit transactions to the EL through the Engine API. Payloads will be built with deposit transactions at the top of the block,
with the first deposit transaction always being the "L1 Info Transaction."
@@ -19,6 +24,7 @@ Since 1.4.0 op-reth has built in support for all chains in the [superchain regis
## Running on Optimism
You will need three things to run `op-reth`:
+
1. An archival L1 node, synced to the settlement layer of the OP Stack chain you want to sync (e.g. `reth`, `geth`, `besu`, `nethermind`, etc.)
1. A rollup node (e.g. `op-node`, `magi`, `hildr`, etc.)
1. An instance of `op-reth`.
@@ -40,6 +46,7 @@ This will install the `op-reth` binary to `~/.cargo/bin/op-reth`.
### Installing a Rollup Node
Next, you'll need to install a [Rollup Node][rollup-node-spec], which is the equivalent to the Consensus Client on the OP Stack. Available options include:
+
1. [`op-node`][op-node]
1. [`magi`][magi]
1. [`hildr`][hildr]
@@ -49,11 +56,13 @@ For the sake of this tutorial, we'll use the reference implementation of the Rol
### Running `op-reth`
op-reth supports additional OP Stack specific CLI arguments:
+
1. `--rollup.sequencer-http ` - The sequencer endpoint to connect to. Transactions sent to the `op-reth` EL are also forwarded to this sequencer endpoint for inclusion, as the sequencer is the entity that builds blocks on OP Stack chains.
1. `--rollup.disable-tx-pool-gossip` - Disables gossiping of transactions in the mempool to peers. This can be omitted for personal nodes, though providers should always opt to enable this flag.
1. `--rollup.discovery.v4` - Enables the discovery v4 protocol for peer discovery. By default, op-reth, similar to op-geth, has discovery v5 enabled and discovery v4 disabled, whereas regular reth has discovery v4 enabled and discovery v5 disabled.
First, ensure that your L1 archival node is running and synced to tip. Also make sure that the beacon node / consensus layer client is running and has http APIs enabled. Then, start `op-reth` with the `--rollup.sequencer-http` flag set to the `Base Mainnet` sequencer endpoint:
+
```sh
op-reth node \
--chain base \
@@ -65,6 +74,7 @@ op-reth node \
```
Then, once `op-reth` has been started, start up the `op-node`:
+
```sh
op-node \
--network="base-mainnet" \
@@ -81,17 +91,15 @@ op-node \
Consider adding the `--l1.trustrpc` flag to improve performance, if the connection to l1 is over localhost.
[l1-el-spec]: https://github.com/ethereum/execution-specs
-[rollup-node-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/rollup-node.md
+[rollup-node-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/rollup-node
[op-geth-forkdiff]: https://op-geth.optimism.io
-[sequencer]: https://github.com/ethereum-optimism/specs/blob/main/specs/background.md#sequencers
+[sequencer]: https://github.com/ethereum-optimism/specs/blob/main/specs/background#sequencers
[op-stack-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs
-[l2-el-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/exec-engine.md
-[deposit-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/deposits.md
-[derivation-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/derivation.md
+[l2-el-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/exec-engine
+[deposit-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/deposits
+[derivation-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/derivation
[superchain-registry]: https://github.com/ethereum-optimism/superchain-registry
-
[op-node-docker]: https://console.cloud.google.com/artifacts/docker/oplabs-tools-artifacts/us/images/op-node
-
[reth]: https://github.com/paradigmxyz/reth
[op-node]: https://github.com/ethereum-optimism/optimism/tree/develop/op-node
[magi]: https://github.com/a16z/magi
diff --git a/book/vocs/docs/pages/run/opstack/op-mainnet-caveats.mdx b/book/vocs/docs/pages/run/opstack/op-mainnet-caveats.mdx
new file mode 100644
index 0000000000..94f1024dfc
--- /dev/null
+++ b/book/vocs/docs/pages/run/opstack/op-mainnet-caveats.mdx
@@ -0,0 +1 @@
+# Caveats OP-Mainnet
\ No newline at end of file
diff --git a/book/vocs/docs/pages/run/overview.mdx b/book/vocs/docs/pages/run/overview.mdx
new file mode 100644
index 0000000000..06b595ad48
--- /dev/null
+++ b/book/vocs/docs/pages/run/overview.mdx
@@ -0,0 +1,47 @@
+---
+description: Guide to running a Reth node.
+---
+
+# Run a Node
+
+Congratulations, now that you have installed Reth, it's time to run it!
+
+In this section, we'll guide you through running a Reth node on various networks and configurations.
+
+## Networks
+
+Choose the network you want to run your node on:
+
+- **[Ethereum](/run/ethereum)** - Run a node on Ethereum mainnet or testnets
+- **[OP-stack](/run/opstack)** - Run a node on OP Stack chains like Base, Optimism, and others
+- **[Private testnets](/run/private-testnets)** - Set up and run private test networks
+
+## Configuration & Monitoring
+
+Learn how to configure and monitor your node:
+
+- **[Configuration](/run/configuration)** - Configure your node using reth.toml
+- **[Monitoring](/run/monitoring)** - Set up logs, metrics, and observability
+
+## Frequently Asked Questions
+
+Find answers to common questions and troubleshooting tips:
+
+- **[Transaction Types](/run/faq/transactions)** - Understanding different transaction types
+- **[Pruning & Full Node](/run/faq/pruning)** - Storage management and node types
+- **[Ports](/run/faq/ports)** - Network port configuration
+- **[Profiling](/run/faq/profiling)** - Performance profiling and optimization
+- **[Sync OP Mainnet](/run/faq/sync-op-mainnet)** - Tips for syncing OP Mainnet
+
+## List of Supported Networks
+
+| Network | Chain ID | RPC URL |
+| --------------- | -------- | ------------------------------------ |
+| Ethereum | 1 | https://reth-ethereum.ithaca.xyz/rpc |
+| Sepolia Testnet | 11155111 | https://sepolia.drpc.org |
+| Base | 8453 | https://base-mainnet.rpc.ithaca.xyz |
+| Base Sepolia | 84532 | https://base-sepolia.rpc.ithaca.xyz |
+
+:::tip
+Want to add more networks to this table? Feel free to [contribute](https://github.com/paradigmxyz/reth/edit/main/book/vocs/docs/pages/run/overview.mdx) by submitting a PR with additional networks that Reth supports!
+:::
diff --git a/book/run/private-testnet.md b/book/vocs/docs/pages/run/private-testnets.mdx
similarity index 90%
rename from book/run/private-testnet.md
rename to book/vocs/docs/pages/run/private-testnets.mdx
index 28253ca9f0..af281fc512 100644
--- a/book/run/private-testnet.md
+++ b/book/vocs/docs/pages/run/private-testnets.mdx
@@ -1,10 +1,17 @@
+---
+description: Running Reth in a private testnet using Kurtosis.
+---
+
# Run Reth in a private testnet using Kurtosis
+
For those who need a private testnet to validate functionality or scale with Reth.
## Using Docker locally
+
This guide uses [Kurtosis' ethereum-package](https://github.com/ethpandaops/ethereum-package) and assumes you have Kurtosis and Docker installed and have Docker already running on your machine.
-* Go [here](https://docs.kurtosis.com/install/) to install Kurtosis
-* Go [here](https://docs.docker.com/get-docker/) to install Docker
+
+- Go [here](https://docs.kurtosis.com/install/) to install Kurtosis
+- Go [here](https://docs.docker.com/get-docker/) to install Docker
The [`ethereum-package`](https://github.com/ethpandaops/ethereum-package) is a [package](https://docs.kurtosis.com/advanced-concepts/packages) for a general purpose Ethereum testnet definition used for instantiating private testnets at any scale over Docker or Kubernetes, locally or in the cloud. This guide will go through how to spin up a local private testnet with Reth and various CL clients locally. Specifically, you will instantiate a 2-node network over Docker with Reth/Lighthouse and Reth/Teku client combinations.
@@ -13,17 +20,19 @@ To see all possible configurations and flags you can use, including metrics and
Genesis data will be generated using this [genesis-generator](https://github.com/ethpandaops/ethereum-genesis-generator) to be used to bootstrap the EL and CL clients for each node. The end result will be a private testnet with nodes deployed as Docker containers in an ephemeral, isolated environment on your machine called an [enclave](https://docs.kurtosis.com/advanced-concepts/enclaves/). Read more about how the `ethereum-package` works by going [here](https://github.com/ethpandaops/ethereum-package/).
### Step 1: Define the parameters and shape of your private network
+
First, in your home directory, create a file with the name `network_params.yaml` with the following contents:
+
```yaml
participants:
- - el_type: reth
- el_image: ghcr.io/paradigmxyz/reth
- cl_type: lighthouse
- cl_image: sigp/lighthouse:latest
- - el_type: reth
- el_image: ghcr.io/paradigmxyz/reth
- cl_type: teku
- cl_image: consensys/teku:latest
+ - el_type: reth
+ el_image: ghcr.io/paradigmxyz/reth
+ cl_type: lighthouse
+ cl_image: sigp/lighthouse:latest
+ - el_type: reth
+ el_image: ghcr.io/paradigmxyz/reth
+ cl_type: teku
+ cl_image: consensys/teku:latest
```
> [!TIP]
@@ -32,10 +41,13 @@ participants:
### Step 2: Spin up your network
Next, run the following command from your command line:
+
```bash
kurtosis run github.com/ethpandaops/ethereum-package --args-file ~/network_params.yaml --image-download always
```
+
Kurtosis will spin up an [enclave](https://docs.kurtosis.com/advanced-concepts/enclaves/) (i.e an ephemeral, isolated environment) and begin to configure and instantiate the nodes in your network. In the end, Kurtosis will print the services running in your enclave that form your private testnet alongside all the container ports and files that were generated & used to start up the private testnet. Here is a sample output:
+
```console
INFO[2024-07-09T12:01:35+02:00] ========================================================
INFO[2024-07-09T12:01:35+02:00] || Created enclave: silent-mountain ||
@@ -88,14 +100,18 @@ f0a7d5343346 vc-1-reth-lighthouse metrics: 8080/tc
Great! You now have a private network with 2 full Ethereum nodes on your local machine over Docker - one that is a Reth/Lighthouse pair and another that is Reth/Teku. Check out the [Kurtosis docs](https://docs.kurtosis.com/cli) to learn about the various ways you can interact with and inspect your network.
## Using Kurtosis on Kubernetes
+
Kurtosis packages are portable and reproducible, meaning they will work the same way over Docker or Kubernetes, locally or on remote infrastructure. For use cases that require a larger scale, Kurtosis can be deployed on Kubernetes by following these docs [here](https://docs.kurtosis.com/k8s/).
## Running the network with additional services
+
The [`ethereum-package`](https://github.com/ethpandaops/ethereum-package) comes with many optional flags and arguments you can enable for your private network. Some include:
-- A Grafana + Prometheus instance
-- A transaction spammer called [`tx-fuzz`](https://github.com/MariusVanDerWijden/tx-fuzz)
-- [A network metrics collector](https://github.com/dapplion/beacon-metrics-gazer)
-- Flashbot's `mev-boost` implementation of PBS (to test/simulate MEV workflows)
+
+- A Grafana + Prometheus instance
+- A transaction spammer called [`tx-fuzz`](https://github.com/MariusVanDerWijden/tx-fuzz)
+- [A network metrics collector](https://github.com/dapplion/beacon-metrics-gazer)
+- Flashbot's `mev-boost` implementation of PBS (to test/simulate MEV workflows)
### Questions?
+
Please reach out to the [Kurtosis discord](https://discord.com/invite/6Jjp9c89z9) should you have any questions about how to use the `ethereum-package` for your private testnet needs. Thanks!
diff --git a/book/installation/installation.md b/book/vocs/docs/pages/run/system-requirements.mdx
similarity index 68%
rename from book/installation/installation.md
rename to book/vocs/docs/pages/run/system-requirements.mdx
index 602601b9f3..5db81bc29b 100644
--- a/book/installation/installation.md
+++ b/book/vocs/docs/pages/run/system-requirements.mdx
@@ -1,31 +1,38 @@
-# Installation
-
-Reth runs on Linux and macOS (Windows tracked).
-
-There are three core methods to obtain Reth:
-
-* [Pre-built binaries](./binaries.md)
-* [Docker images](./docker.md)
-* [Building from source.](./source.md)
-
-> **Note**
->
-> If you have Docker installed, we recommend using the [Docker Compose](./docker.md#using-docker-compose) configuration
-> that will get you Reth, Lighthouse (Consensus Client), Prometheus and Grafana running and syncing with just one command.
-
-## Hardware Requirements
+# System Requirements
The hardware requirements for running Reth depend on the node configuration and can change over time as the network grows or new features are implemented.
The most important requirement is by far the disk, whereas CPU and RAM requirements are relatively flexible.
+## Ethereum Mainnet Requirements
+
+Below are the requirements for Ethereum Mainnet:
+
| | Archive Node | Full Node |
-|-----------|---------------------------------------|---------------------------------------|
+| --------- | ------------------------------------- | ------------------------------------- |
| Disk | At least 2.8TB (TLC NVMe recommended) | At least 1.8TB (TLC NVMe recommended) |
| Memory | 16GB+ | 8GB+ |
| CPU | Higher clock speed over core count | Higher clock speeds over core count |
| Bandwidth | Stable 24Mbps+ | Stable 24Mbps+ |
+## Base System Requirements
+
+Below are the minimum system requirements for running a Base node as of 2025-06-23, block number 31.9M:
+
+| | Archive Node | Full Node |
+| --------- | ------------------------------------- | ------------------------------------- |
+| Disk | At least 4.1TB (TLC NVMe recommended) | At least 2TB (TLC NVMe recommended) |
+| Memory | 128GB+ | 128GB+ |
+| CPU | 6 cores+, Higher clock speed over core count | 6 cores+, Higher clock speed over core count |
+| Bandwidth | Stable 24Mbps+ | Stable 24Mbps+ |
+
+:::note
+**On CPU clock speeds**: We've seen >1s payload latency on EPYC GENOA 9254 (2.9 GHz/3.9 GHz), best performance we see on AMD EPYCβ’ 4004.
+
+**On CPU cores for Base**: 5+ cores are needed because the state root task splits work into separate threads that run in parallel with each other. The state root task is generally more performant and can scale with the number of CPU cores, while regular state root always uses only one core. This is not a requirement for Mainnet, but for Base you may encounter block processing latencies of more than 2s, which can lead to lagging behind the head of the chain.
+:::
+
+
#### QLC and TLC
It is crucial to understand the difference between QLC and TLC NVMe drives when considering the disk requirement.
@@ -34,29 +41,29 @@ QLC (Quad-Level Cell) NVMe drives utilize four bits of data per cell, allowing f
TLC (Triple-Level Cell) NVMe drives, on the other hand, use three bits of data per cell. While they have a slightly lower storage density compared to QLC drives, TLC drives offer faster performance. They typically have higher read and write speeds, making them more suitable for demanding tasks such as data-intensive applications, gaming, and multimedia editing. TLC drives also tend to have a higher endurance, making them more durable and longer-lasting.
-Prior to purchasing an NVMe drive, it is advisable to research and determine whether the disk will be based on QLC or TLC technology. An overview of recommended and not-so-recommended NVMe boards can be found at [here]( https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038).
+Prior to purchasing an NVMe drive, it is advisable to research and determine whether the disk will be based on QLC or TLC technology. An overview of recommended and not-so-recommended NVMe boards can be found at [here](https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038).
### Disk
There are multiple types of disks to sync Reth, with varying size requirements, depending on the syncing mode.
As of April 2025 at block number 22.1M:
-* Archive Node: At least 2.8TB is required
-* Full Node: At least 1.8TB is required
+- Archive Node: At least 2.8TB is required
+- Full Node: At least 1.8TB is required
NVMe based SSD drives are recommended for the best performance, with SATA SSDs being a cheaper alternative. HDDs are the cheapest option, but they will take the longest to sync, and are not recommended.
As of February 2024, syncing an Ethereum mainnet node to block 19.3M on NVMe drives takes about 50 hours, while on a GCP "Persistent SSD" it takes around 5 days.
-> **Note**
->
-> It is highly recommended to choose a TLC drive when using an NVMe drive, and not a QLC drive. See [the note](#qlc-and-tlc) above. A list of recommended drives can be found [here]( https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038).
+:::tip
+It is highly recommended to choose a TLC drive when using an NVMe drive, and not a QLC drive. See [the note](#qlc-and-tlc) above. A list of recommended drives can be found [here](https://gist.github.com/yorickdowne/f3a3e79a573bf35767cd002cc977b038).
+:::
### CPU
Most of the time during syncing is spent executing transactions, which is a single-threaded operation due to potential state dependencies of a transaction on previous ones.
-As a result, the number of cores matters less, but in general higher clock speeds are better. More cores are better for parallelizable [stages](https://github.com/paradigmxyz/reth/blob/main/docs/crates/stages.md) (like sender recovery or bodies downloading), but these stages are not the primary bottleneck for syncing.
+As a result, the number of cores matters less, but in general higher clock speeds are better. More cores are better for parallelizable [stages](https://github.com/paradigmxyz/reth/blob/main/docs/crates/stages) (like sender recovery or bodies downloading), but these stages are not the primary bottleneck for syncing.
### Memory
diff --git a/book/vocs/docs/pages/sdk/custom-node/modifications.mdx b/book/vocs/docs/pages/sdk/custom-node/modifications.mdx
new file mode 100644
index 0000000000..b375feb901
--- /dev/null
+++ b/book/vocs/docs/pages/sdk/custom-node/modifications.mdx
@@ -0,0 +1 @@
+# Modifying Node Components
diff --git a/book/vocs/docs/pages/sdk/custom-node/prerequisites.mdx b/book/vocs/docs/pages/sdk/custom-node/prerequisites.mdx
new file mode 100644
index 0000000000..8dbf0a1bf4
--- /dev/null
+++ b/book/vocs/docs/pages/sdk/custom-node/prerequisites.mdx
@@ -0,0 +1 @@
+# Prerequisites and Considerations
diff --git a/book/vocs/docs/pages/sdk/examples/modify-node.mdx b/book/vocs/docs/pages/sdk/examples/modify-node.mdx
new file mode 100644
index 0000000000..b8f21a06bb
--- /dev/null
+++ b/book/vocs/docs/pages/sdk/examples/modify-node.mdx
@@ -0,0 +1,16 @@
+# How to Modify an Existing Node
+
+This guide demonstrates how to extend a Reth node with custom functionality, including adding RPC endpoints, modifying transaction validation, and implementing custom services.
+
+## Adding Custom RPC Endpoints
+
+One of the most common modifications is adding custom RPC methods to expose additional functionality.
+
+### Basic Custom RPC Module
+
+
+## Next Steps
+
+- Explore [Standalone Components](/sdk/examples/standalone-components) for direct blockchain interaction
+- Learn about [Custom Node Building](/sdk/custom-node/prerequisites) for production deployments
+- Review [Type System](/sdk/typesystem/block) for working with blockchain data
diff --git a/book/vocs/docs/pages/sdk/examples/standalone-components.mdx b/book/vocs/docs/pages/sdk/examples/standalone-components.mdx
new file mode 100644
index 0000000000..3c16e1cf12
--- /dev/null
+++ b/book/vocs/docs/pages/sdk/examples/standalone-components.mdx
@@ -0,0 +1,12 @@
+# Using Standalone Components
+
+This guide demonstrates how to use Reth components independently without running a full node. This is useful for building tools, analyzers, indexers, or any application that needs direct access to blockchain data.
+
+## Direct Database Access
+
+
+## Next Steps
+
+- Learn about [Modifying Nodes](/sdk/examples/modify-node) to add functionality
+- Explore the [Type System](/sdk/typesystem/block) for working with data
+- Check [Custom Node Building](/sdk/custom-node/prerequisites) for production use
diff --git a/book/vocs/docs/pages/sdk/node-components.mdx b/book/vocs/docs/pages/sdk/node-components.mdx
new file mode 100644
index 0000000000..cdd4b93650
--- /dev/null
+++ b/book/vocs/docs/pages/sdk/node-components.mdx
@@ -0,0 +1,112 @@
+# Node Components
+
+Reth's modular architecture allows developers to customize and extend individual components of the node. Each component serves a specific purpose and can be replaced or modified to suit your needs.
+
+## Architecture Overview
+
+A Reth node consists of several key components that work together and can interact with each other:
+
+```mermaid
+graph LR
+ Network[Network] --> Pool[Transaction Pool]
+ Network --> Consensus[Consensus]
+ Pool --> DB[(Database)]
+ Consensus --> EVM
+ EVM --> DB[(Database)]
+ RPC[RPC Server] --> Pool
+ RPC --> DB
+ RPC --> EVM
+```
+
+## Core Components
+
+### [Network](/sdk/node-components/network)
+Handles P2P communication, peer discovery, and block/transaction propagation. The network component is responsible for:
+- Peer discovery and management
+- Transaction gossip
+- State synchronization (downloading blocks)
+- Protocol message handling
+
+### [Transaction Pool](/sdk/node-components/pool)
+Manages pending transactions before they're included in blocks:
+- Transaction validation
+- Ordering and prioritization
+- Transaction replacement logic
+- Pool size management and eviction
+
+### [Consensus](/sdk/node-components/consensus)
+Validates blocks according to protocol rules:
+- Header validation (e.g. gas limit, base fee)
+- Block body validation (e.g. transaction root)
+
+### [EVM](/sdk/node-components/evm)
+Executes transactions and manages state transitions:
+- Block execution
+- Transaction execution
+- Block building
+
+### [RPC](/sdk/node-components/rpc)
+Provides external API access to the node:
+- Standard Ethereum JSON-RPC methods
+- Custom endpoints
+- WebSocket subscriptions
+
+## Component Customization
+
+Each component can be customized through Reth's builder pattern:
+
+```rust
+use reth_ethereum::node::{EthereumNode, NodeBuilder};
+
+let node = NodeBuilder::new(config)
+ .with_types::()
+ .with_components(|ctx| {
+ // Use the ComponentBuilder to customize components
+ ctx.components_builder()
+ // Custom network configuration
+ .network(|network_builder| {
+ network_builder
+ .peer_manager(custom_peer_manager)
+ .build()
+ })
+ // Custom transaction pool
+ .pool(|pool_builder| {
+ pool_builder
+ .validator(custom_validator)
+ .ordering(custom_ordering)
+ .build()
+ })
+ // Custom consensus
+ .consensus(custom_consensus)
+ // Custom EVM configuration
+ .evm(|evm_builder| {
+ evm_builder
+ .with_precompiles(custom_precompiles)
+ .build()
+ })
+ // Build all components
+ .build()
+ })
+ .build()
+ .await?;
+```
+
+## Component Lifecycle
+
+Components follow a specific lifecycle startng from node builder initialization to shutdown:
+
+1. **Initialization**: Components are created with their dependencies
+2. **Configuration**: Settings and parameters are applied
+3. **Startup**: Components begin their main operations
+4. **Runtime**: Components process requests and events
+5. **Shutdown**: Graceful cleanup and resource release
+
+
+## Next Steps
+
+Explore each component in detail:
+- [Network Component](/sdk/node-components/network) - P2P and synchronization
+- [Transaction Pool](/sdk/node-components/pool) - Mempool management
+- [Consensus](/sdk/node-components/consensus) - Block validation
+- [EVM](/sdk/node-components/evm) - Transaction execution
+- [RPC](/sdk/node-components/rpc) - External APIs
diff --git a/book/vocs/docs/pages/sdk/node-components/consensus.mdx b/book/vocs/docs/pages/sdk/node-components/consensus.mdx
new file mode 100644
index 0000000000..1541d351d5
--- /dev/null
+++ b/book/vocs/docs/pages/sdk/node-components/consensus.mdx
@@ -0,0 +1,45 @@
+# Consensus Component
+
+The consensus component validates blocks according to Ethereum protocol rules, handles chain reorganizations, and manages the canonical chain state.
+
+## Overview
+
+The consensus component is responsible for:
+- Validating block headers and bodies
+- Verifying state transitions
+- Managing fork choice rules
+- Handling chain reorganizations
+- Tracking finalized and safe blocks
+- Validating blob transactions (EIP-4844)
+
+## Key Concepts
+
+### Block Validation
+The consensus component performs multiple validation steps:
+1. **Pre-execution validation**: Header and body checks before running transactions
+2. **Post-execution validation**: State root and receipts verification after execution
+
+### Header Validation
+Headers must pass several checks:
+- **Timestamp**: Must be greater than parent's timestamp
+- **Gas limit**: Changes must be within protocol limits (1/1024 of parent)
+- **Extra data**: Size restrictions based on network rules
+- **Difficulty/PoS**: Appropriate validation for pre/post-merge
+
+### Body Validation
+Block bodies are validated against headers:
+- **Transaction root**: Merkle root must match header
+- **Withdrawals root**: For post-Shanghai blocks
+- **Blob validation**: For EIP-4844 transactions
+
+### Fork Choice
+The consensus engine determines the canonical chain:
+- Tracks multiple chain branches
+- Applies fork choice rules (longest chain, most work, etc.)
+- Handles reorganizations when better chains are found
+
+## Next Steps
+
+- Explore [EVM](/sdk/node-components/evm) execution
+- Learn about [RPC](/sdk/node-components/rpc) server integration
+- Understand [Transaction Pool](/sdk/node-components/pool) interaction
\ No newline at end of file
diff --git a/book/vocs/docs/pages/sdk/node-components/evm.mdx b/book/vocs/docs/pages/sdk/node-components/evm.mdx
new file mode 100644
index 0000000000..6047f69bd7
--- /dev/null
+++ b/book/vocs/docs/pages/sdk/node-components/evm.mdx
@@ -0,0 +1,45 @@
+# EVM Component
+
+The EVM (Ethereum Virtual Machine) component handles transaction execution and state transitionss. It's responsible for processing transactions and updating the blockchain state.
+
+## Overview
+
+The EVM component manages:
+- Transaction execution
+- State transitions and updates
+- Gas calculation and metering
+- Custom precompiles and opcodes
+- Block execution and validation
+- State management and caching
+
+## Architecture
+
+
+## Key Concepts
+
+### Transaction Execution
+The EVM executes transactions in a deterministic way:
+1. **Environment Setup**: Configure block and transaction context
+2. **State Access**: Load accounts and storage from the database
+3. **Execution**: Run EVM bytecode with gas metering
+4. **State Updates**: Apply changes to accounts and storage
+5. **Receipt Generation**: Create execution receipts with logs
+
+### Block Execution
+Block executors process all transactions in a block:
+- Validate pre-state conditions
+- Execute transactions sequentially
+- Apply block rewards
+- Verify post-state (state root, receipts root)
+
+### Block Building
+Block builders construct new blocks for proposal:
+- Select transactions (e.g. mempool)
+- Order and execute transactions
+- Seal the block with a header (state root)
+
+## Next Steps
+
+- Learn about [RPC](/sdk/node-components/rpc) server integration
+- Explore [Transaction Pool](/sdk/node-components/pool) interaction
+- Review [Consensus](/sdk/node-components/consensus) validation
\ No newline at end of file
diff --git a/book/vocs/docs/pages/sdk/node-components/network.mdx b/book/vocs/docs/pages/sdk/node-components/network.mdx
new file mode 100644
index 0000000000..308087305a
--- /dev/null
+++ b/book/vocs/docs/pages/sdk/node-components/network.mdx
@@ -0,0 +1,55 @@
+# Network Component
+
+The network component handles all peer-to-peer communication in Reth, including peer discovery, connection management, and protocol message handling.
+
+## Overview
+
+The network stack implements the Ethereum Wire Protocol (ETH) and provides:
+- Peer discovery via discv4 and discv5
+- Connection management with configurable peer limits
+- Transaction propagation
+- State synchronization
+- Request/response protocols (e.g. GetBHeaders, GetBodies)
+
+## Architecture
+
+```mermaid
+graph TD
+ NetworkManager[Network Manager] --> Discovery[Discovery]
+ NetworkManager --> Sessions[Session Manager]
+ NetworkManager --> Swarm[Swarm]
+
+ Discovery --> discv4[discv4]
+ Discovery --> discv5[discv5]
+ Discovery --> DNS[DNS Discovery]
+
+ Sessions --> ETH[ETH Protocol]
+```
+
+## Key Concepts
+
+### Peer Discovery
+The network uses multiple discovery mechanisms to find and connect to peers:
+- **discv4**: UDP-based discovery protocol for finding peers
+- **discv5**: Improved discovery protocol with better security
+- **DNS Discovery**: Peer lists published via DNS for bootstrap
+
+### Connection Management
+- Maintains separate limits for inbound and outbound connections
+- Implements peer scoring and reputation tracking
+- Handles connection lifecycle and graceful disconnections
+
+### Protocol Support
+- **ETH Protocol**: Core Ethereum wire protocol for blocks and transactions
+
+### Message Broadcasting
+The network efficiently propagates new blocks and transactions to peers using:
+- Transaction pooling and deduplication
+- Block announcement strategies
+- Bandwidth management
+
+## Next Steps
+
+- Learn about the [Transaction Pool](/sdk/node-components/pool)
+- Understand [Consensus](/sdk/node-components/consensus) integration
+- Explore [RPC](/sdk/node-components/rpc) server setup
\ No newline at end of file
diff --git a/book/vocs/docs/pages/sdk/node-components/pool.mdx b/book/vocs/docs/pages/sdk/node-components/pool.mdx
new file mode 100644
index 0000000000..301d794b3f
--- /dev/null
+++ b/book/vocs/docs/pages/sdk/node-components/pool.mdx
@@ -0,0 +1,80 @@
+# Transaction Pool Component
+
+The transaction pool (mempool) manages pending transactions before they are included in blocks. It handles validation, ordering, replacement, and eviction of transactions.
+
+## Overview
+
+The transaction pool is responsible for:
+- Validating incoming transactions
+- Maintaining transaction ordering (e.g. by fees)
+- Handling transaction replacement
+- Managing pool size limits
+- Broadcasting transactions to peers
+- Providing transactions for block building
+
+## Architecture
+
+```mermaid
+graph TD
+ API[Pool API] --> Validator[Transaction Validator]
+ API --> Pool[Transaction Pool]
+
+ Pool --> SubPools[Sub-Pools]
+ SubPools --> Pending[Pending Pool]
+ SubPools --> Queued[Queued Pool]
+ SubPools --> Base[Base Fee Pool]
+
+ Pool --> Ordering[Transaction Ordering]
+ Pool --> Listeners[Event Listeners]
+
+ Validator --> Checks[Validation Checks]
+ Checks --> Nonce[Nonce Check]
+ Checks --> Balance[Balance Check]
+```
+
+## Key Concepts
+
+### Transaction Validation
+The pool validates transactions before accepting them, checking:
+- Sender has sufficient balance for gas and value
+- Nonce is correct (either next expected or future)
+- Gas price meets minimum requirements
+- Transaction size is within limits
+- Signature is valid
+
+### Transaction Ordering
+Transactions are ordered by their effective tip per gas to maximize block rewards. Custom ordering strategies can prioritize certain addresses or implement MEV protection.
+
+### Sub-Pools
+- **Pending**: Transactions ready for inclusion (correct nonce)
+- **Queued**: Future transactions (nonce gap exists)
+- **Base Fee**: Transactions priced below current base fee
+
+### Pool Maintenance
+The pool requires periodic maintenance to:
+- Remove stale transactions
+- Revalidate after chain reorganizations
+- Update base fee thresholds
+- Enforce size limits
+
+## Advanced Features
+
+### Blob Transaction Support
+EIP-4844 introduces blob transactions with separate blob storage and special validation rules.
+
+### Transaction Filters
+Custom filters can block specific addresses, limit gas prices, or implement custom acceptance criteria.
+
+### Event System
+The pool supports an event system that allows other components to listen for transaction lifecycle events such as:
+- Transaction added
+- Transaction removed
+- Transaction replaced
+- Transaction promoted to pending state
+
+
+## Next Steps
+
+- Learn about [Consensus](/sdk/node-components/consensus) validation
+- Explore [EVM](/sdk/node-components/evm) execution
+- Understand [RPC](/sdk/node-components/rpc) server integration
\ No newline at end of file
diff --git a/book/vocs/docs/pages/sdk/node-components/rpc.mdx b/book/vocs/docs/pages/sdk/node-components/rpc.mdx
new file mode 100644
index 0000000000..4f9fa1e3d7
--- /dev/null
+++ b/book/vocs/docs/pages/sdk/node-components/rpc.mdx
@@ -0,0 +1,20 @@
+# RPC Component
+
+The RPC component provides external API access to the node, implementing the Ethereum JSON-RPC specification and allowing custom extensions.
+
+## Overview
+
+The RPC component provides:
+- Standard Ethereum JSON-RPC methods
+- WebSocket subscriptions
+- Custom method extensions
+- Rate limiting and access control
+- Request batching support
+- Multiple transport protocols (HTTP, WebSocket, IPC)
+
+
+## Next Steps
+
+- Explore [Network](/sdk/node-components/network) component integration
+- Learn about [Transaction Pool](/sdk/node-components/pool) APIs
+- Understand [EVM](/sdk/node-components/evm) execution context
\ No newline at end of file
diff --git a/book/vocs/docs/pages/sdk/overview.mdx b/book/vocs/docs/pages/sdk/overview.mdx
new file mode 100644
index 0000000000..b427ae8834
--- /dev/null
+++ b/book/vocs/docs/pages/sdk/overview.mdx
@@ -0,0 +1,127 @@
+# Reth for Developers
+
+Reth can be used as a library to build custom Ethereum nodes, interact with blockchain data, or create specialized tools for blockchain analysis and indexing.
+
+## What is the Reth SDK?
+
+The Reth SDK allows developers to:
+- Use components of the Reth node as libraries
+- Build custom Ethereum execution nodes with modified behavior (e.g. payload building)
+- Access blockchain data directly from the database
+- Create high-performance indexing solutions
+- Extend a new with new RPC endpoints and functionality
+- Implement custom consensus mechanisms
+- Build specialized tools for blockchain analysis
+
+## Quick Start
+
+Add Reth to your project:
+
+## Ethereum
+
+```toml
+[dependencies]
+# Ethereum meta crate
+reth-ethereum = { git = "https://github.com/paradigmxyz/reth" }
+```
+
+## Opstack
+
+```toml
+[dependencies]
+reth-op = { git = "https://github.com/paradigmxyz/reth" }
+```
+
+## Key Concepts
+
+### Node Architecture
+
+Reth is built with modularity in mind. The main components include:
+
+- **Primitives**: Core data type abstractions like `Block`
+- **Node Builder**: Constructs and configures node instances
+- **Database**: Efficient storage using MDBX and static files
+- **Network**: P2P communication and block synchronization
+- **Consensus**: Block validation and chain management
+- **EVM**: Transaction execution and state transitions
+- **RPC**: JSON-RPC server for external communication
+- **Transaction Pool**: Pending transaction management
+
+### Dependency Management
+Reth is primarily built on top of the [alloy](https://github.com/alloy-rs/alloy) ecosystem, which provides the necessary abstractions and implementations for core ethereum blockchain data types, transaction handling, and EVM execution.
+
+
+### Type System
+
+Reth uses its own type system to handle different representations of blockchain data:
+
+- **Primitives**: Core types like `B256`, `Address`, `U256`
+- **Transactions**: Multiple representations for different contexts (pooled, consensus, RPC)
+- **Blocks**: Headers, bodies, and sealed blocks with proven properties
+- **State**: Accounts, storage, and state transitions
+
+### Building Custom Nodes
+
+The node builder pattern allows you to customize every aspect of node behavior:
+
+```rust
+use reth_ethereum::node::{EthereumNode, NodeBuilder};
+
+// Build a custom node with modified components
+let node = NodeBuilder::new(config)
+ // install the ethereum specific node primitives
+ .with_types::()
+ .with_components(|components| {
+ // Customize components here
+ components
+ })
+ .build()
+ .await?;
+```
+
+## Architecture Overview
+
+```mermaid
+graph TD
+ A[Node Builder] --> B[Database]
+ A --> C[Network]
+ A --> D[Consensus]
+ A --> E[EVM]
+ A --> F[RPC Server]
+ A --> G[Transaction Pool]
+
+ B --> H[DB Storage]
+ B --> I[Static Files]
+
+ C --> J[Discovery]
+ C --> K[ETH Protocol]
+
+ E --> L[State Provider]
+ E --> M[Block Executor]
+```
+
+## Nodes Built with Reth
+
+Several production networks have been built using Reth's node builder pattern:
+
+### [BSC Reth](https://github.com/loocapro/reth-bsc)
+A Binance Smart Chain execution client, implementing BSC-specific consensus rules and features.
+
+### [Bera Reth](https://github.com/berachain/bera-reth)
+Berachain's execution client.
+
+### [Gnosis Reth](https://github.com/gnosischain/reth_gnosis)
+Gnosis Chain's implementation using Reth.
+
+
+## Next Steps
+
+- **[Node Components](/sdk/node-components)**: Deep dive into each component
+- **[Type System](/sdk/typesystem/block)**: Understanding Reth's type system
+- **[Custom Nodes](/sdk/custom-node/prerequisites)**: Building production nodes
+- **[Examples](/sdk/examples/modify-node)**: Real-world implementations
+
+## Resources
+
+- [API Documentation](https://docs.rs/reth/latest/reth/)
+- [GitHub Repository](https://github.com/paradigmxyz/reth)
diff --git a/book/vocs/docs/pages/sdk/typesystem/block.mdx b/book/vocs/docs/pages/sdk/typesystem/block.mdx
new file mode 100644
index 0000000000..450b4f93d1
--- /dev/null
+++ b/book/vocs/docs/pages/sdk/typesystem/block.mdx
@@ -0,0 +1,26 @@
+# Block Types
+
+The Reth type system provides a flexible abstraction for blocks through traits, allowing different implementations while maintaining type safety and consistency.
+
+## Type Relationships
+
+```mermaid
+graph TD
+ Block[Block Trait] --> Header[BlockHeader Trait]
+ Block --> Body[BlockBody Trait]
+
+ SealedBlock -.-> Block
+ SealedBlock --> SealedHeader
+ RecoveredBlock --> SealedBlock
+
+ SealedHeader --> Header
+
+ Body --> Transaction[Transactions]
+ Body --> Withdrawals[Withdrawals]
+```
+
+## Next Steps
+
+- Learn about [Transaction Types](/sdk/typesystem/transaction-types)
+- Understand [Consensus](/sdk/node-components/consensus) validation
+- Explore [EVM](/sdk/node-components/evm) execution
diff --git a/book/vocs/docs/pages/sdk/typesystem/transaction-types.mdx b/book/vocs/docs/pages/sdk/typesystem/transaction-types.mdx
new file mode 100644
index 0000000000..e541727da8
--- /dev/null
+++ b/book/vocs/docs/pages/sdk/typesystem/transaction-types.mdx
@@ -0,0 +1,92 @@
+# Transaction Types and Representations
+
+Reth provides multiple transaction representations optimized for different stages of the transaction lifecycle. Understanding these types is crucial for working with the node's transaction handling pipeline.
+
+## Transaction Lifecycle
+
+Transactions go through several stages, each with its own optimized representation:
+
+```mermaid
+graph LR
+ RPC[RPC Transaction] --> Pool[Pooled Transaction]
+ Pool --> Consensus[Consensus Transaction]
+ Consensus --> Executed[Executed Transaction]
+
+ Pool -.-> RPC
+ Consensus -.-> Pool
+```
+
+## Transaction Representations
+
+### RPC Transaction
+
+The RPC representation is designed for JSON-RPC communication with external clients. It uses JSON-compatible types and includes all information clients need to understand transaction status.
+
+Key characteristics:
+- **JSON-compatible types**: Uses U256 for numbers, hex strings for binary data
+- **Optional fields**: Supports both legacy and EIP-1559 transactions with appropriate fields
+- **Block context**: Includes block hash, number, and index when transaction is mined
+- **Human-readable**: Optimized for external consumption and debugging
+- **Complete information**: Contains all transaction details including signature components
+
+Use cases:
+- Sending transactions via `eth_sendTransaction`
+- Querying transaction details via `eth_getTransactionByHash`
+- Transaction receipts and history
+- Block explorer displays
+
+### Pooled Transaction
+
+The pooled representation is optimized for mempool storage and validation. It pre-computes expensive values and includes additional data needed for pool management.
+
+Key characteristics:
+- **Cached values**: Pre-computed sender address and transaction cost to avoid repeated calculations
+- **Validation ready**: Includes all data needed for quick pool validation
+- **Blob support**: Handles EIP-4844 blob sidecars separately from the core transaction
+- **Memory efficient**: Optimized structure for storing thousands of pending transactions
+- **Priority ordering**: Structured for efficient sorting by gas price/priority fee
+
+Use cases:
+- Transaction pool storage and management
+- Gas price ordering and replacement logic
+- Validation against account state
+- Broadcasting to peers
+
+### Consensus Transaction
+
+The consensus representation is the canonical format used in blocks and for network propagation. It's the most compact representation and follows Ethereum's wire protocol.
+
+Key characteristics:
+- **Type safety**: Enum variants for different transaction types (Legacy, EIP-2930, EIP-1559, EIP-4844)
+- **Compact encoding**: For storage on disk
+- **No redundancy**: Minimal data, with values like sender recovered from signature when needed
+
+Use cases:
+- Block construction and validation
+- Network propagation between nodes
+- Persistent storage in the database
+- State transition execution
+
+## Representation Conversions
+
+### RPC β Pooled
+When transactions arrive via RPC:
+1. Validate JSON format and fields
+2. Convert to consensus format
+3. Recover sender from signature
+4. Create pooled representation
+
+### Pooled β Consensus
+When including in a block:
+1. Extract core transaction consensus data
+2. Remove cached values (sender, cost)
+
+### Consensus β RPC
+When serving RPC requests:
+1. Add block context (hash, number, index)
+
+## Next Steps
+
+- Learn about [Block Types](/sdk/typesystem/block) and how transactions fit in blocks
+- Understand [Transaction Pool](/sdk/node-components/pool) management
+- Explore [EVM](/sdk/node-components/evm) transaction execution
\ No newline at end of file
diff --git a/book/vocs/docs/public/alchemy.png b/book/vocs/docs/public/alchemy.png
new file mode 100644
index 0000000000..422feb0327
Binary files /dev/null and b/book/vocs/docs/public/alchemy.png differ
diff --git a/book/vocs/docs/public/coinbase.png b/book/vocs/docs/public/coinbase.png
new file mode 100644
index 0000000000..2e71f9ec3a
Binary files /dev/null and b/book/vocs/docs/public/coinbase.png differ
diff --git a/book/vocs/docs/public/flashbots.png b/book/vocs/docs/public/flashbots.png
new file mode 100644
index 0000000000..1a4622becd
Binary files /dev/null and b/book/vocs/docs/public/flashbots.png differ
diff --git a/book/vocs/docs/public/logo.png b/book/vocs/docs/public/logo.png
new file mode 100644
index 0000000000..04a889b9d2
Binary files /dev/null and b/book/vocs/docs/public/logo.png differ
diff --git a/book/developers/exex/assets/remote_exex.png b/book/vocs/docs/public/remote_exex.png
similarity index 100%
rename from book/developers/exex/assets/remote_exex.png
rename to book/vocs/docs/public/remote_exex.png
diff --git a/book/vocs/docs/public/reth-prod.png b/book/vocs/docs/public/reth-prod.png
new file mode 100644
index 0000000000..d06c4579cc
Binary files /dev/null and b/book/vocs/docs/public/reth-prod.png differ
diff --git a/book/vocs/docs/public/succinct.png b/book/vocs/docs/public/succinct.png
new file mode 100644
index 0000000000..1261974aa8
Binary files /dev/null and b/book/vocs/docs/public/succinct.png differ
diff --git a/book/sources/Cargo.toml b/book/vocs/docs/snippets/sources/Cargo.toml
similarity index 100%
rename from book/sources/Cargo.toml
rename to book/vocs/docs/snippets/sources/Cargo.toml
diff --git a/book/sources/exex/hello-world/Cargo.toml b/book/vocs/docs/snippets/sources/exex/hello-world/Cargo.toml
similarity index 100%
rename from book/sources/exex/hello-world/Cargo.toml
rename to book/vocs/docs/snippets/sources/exex/hello-world/Cargo.toml
diff --git a/book/sources/exex/hello-world/src/bin/1.rs b/book/vocs/docs/snippets/sources/exex/hello-world/src/bin/1.rs
similarity index 100%
rename from book/sources/exex/hello-world/src/bin/1.rs
rename to book/vocs/docs/snippets/sources/exex/hello-world/src/bin/1.rs
diff --git a/book/sources/exex/hello-world/src/bin/2.rs b/book/vocs/docs/snippets/sources/exex/hello-world/src/bin/2.rs
similarity index 100%
rename from book/sources/exex/hello-world/src/bin/2.rs
rename to book/vocs/docs/snippets/sources/exex/hello-world/src/bin/2.rs
diff --git a/book/sources/exex/hello-world/src/bin/3.rs b/book/vocs/docs/snippets/sources/exex/hello-world/src/bin/3.rs
similarity index 100%
rename from book/sources/exex/hello-world/src/bin/3.rs
rename to book/vocs/docs/snippets/sources/exex/hello-world/src/bin/3.rs
diff --git a/book/sources/exex/remote/Cargo.toml b/book/vocs/docs/snippets/sources/exex/remote/Cargo.toml
similarity index 100%
rename from book/sources/exex/remote/Cargo.toml
rename to book/vocs/docs/snippets/sources/exex/remote/Cargo.toml
diff --git a/book/sources/exex/remote/build.rs b/book/vocs/docs/snippets/sources/exex/remote/build.rs
similarity index 100%
rename from book/sources/exex/remote/build.rs
rename to book/vocs/docs/snippets/sources/exex/remote/build.rs
diff --git a/book/sources/exex/remote/proto/exex.proto b/book/vocs/docs/snippets/sources/exex/remote/proto/exex.proto
similarity index 100%
rename from book/sources/exex/remote/proto/exex.proto
rename to book/vocs/docs/snippets/sources/exex/remote/proto/exex.proto
diff --git a/book/sources/exex/remote/src/consumer.rs b/book/vocs/docs/snippets/sources/exex/remote/src/consumer.rs
similarity index 100%
rename from book/sources/exex/remote/src/consumer.rs
rename to book/vocs/docs/snippets/sources/exex/remote/src/consumer.rs
diff --git a/book/sources/exex/remote/src/exex.rs b/book/vocs/docs/snippets/sources/exex/remote/src/exex.rs
similarity index 100%
rename from book/sources/exex/remote/src/exex.rs
rename to book/vocs/docs/snippets/sources/exex/remote/src/exex.rs
diff --git a/book/sources/exex/remote/src/exex_1.rs b/book/vocs/docs/snippets/sources/exex/remote/src/exex_1.rs
similarity index 100%
rename from book/sources/exex/remote/src/exex_1.rs
rename to book/vocs/docs/snippets/sources/exex/remote/src/exex_1.rs
diff --git a/book/sources/exex/remote/src/exex_2.rs b/book/vocs/docs/snippets/sources/exex/remote/src/exex_2.rs
similarity index 100%
rename from book/sources/exex/remote/src/exex_2.rs
rename to book/vocs/docs/snippets/sources/exex/remote/src/exex_2.rs
diff --git a/book/sources/exex/remote/src/exex_3.rs b/book/vocs/docs/snippets/sources/exex/remote/src/exex_3.rs
similarity index 100%
rename from book/sources/exex/remote/src/exex_3.rs
rename to book/vocs/docs/snippets/sources/exex/remote/src/exex_3.rs
diff --git a/book/sources/exex/remote/src/exex_4.rs b/book/vocs/docs/snippets/sources/exex/remote/src/exex_4.rs
similarity index 100%
rename from book/sources/exex/remote/src/exex_4.rs
rename to book/vocs/docs/snippets/sources/exex/remote/src/exex_4.rs
diff --git a/book/sources/exex/remote/src/lib.rs b/book/vocs/docs/snippets/sources/exex/remote/src/lib.rs
similarity index 100%
rename from book/sources/exex/remote/src/lib.rs
rename to book/vocs/docs/snippets/sources/exex/remote/src/lib.rs
diff --git a/book/sources/exex/tracking-state/Cargo.toml b/book/vocs/docs/snippets/sources/exex/tracking-state/Cargo.toml
similarity index 100%
rename from book/sources/exex/tracking-state/Cargo.toml
rename to book/vocs/docs/snippets/sources/exex/tracking-state/Cargo.toml
diff --git a/book/sources/exex/tracking-state/src/bin/1.rs b/book/vocs/docs/snippets/sources/exex/tracking-state/src/bin/1.rs
similarity index 100%
rename from book/sources/exex/tracking-state/src/bin/1.rs
rename to book/vocs/docs/snippets/sources/exex/tracking-state/src/bin/1.rs
diff --git a/book/sources/exex/tracking-state/src/bin/2.rs b/book/vocs/docs/snippets/sources/exex/tracking-state/src/bin/2.rs
similarity index 100%
rename from book/sources/exex/tracking-state/src/bin/2.rs
rename to book/vocs/docs/snippets/sources/exex/tracking-state/src/bin/2.rs
diff --git a/book/vocs/docs/styles.css b/book/vocs/docs/styles.css
new file mode 100644
index 0000000000..fcfc8cf2cd
--- /dev/null
+++ b/book/vocs/docs/styles.css
@@ -0,0 +1,31 @@
+@import "tailwindcss" important;
+
+@custom-variant dark (&:where(.dark, .dark *));
+
+[data-layout="landing"] .vocs_Button_button {
+ border-radius: 4px !important;
+ height: 36px !important;
+ padding: 0 16px !important;
+}
+
+[data-layout="landing"] .vocs_Content {
+ position: inherit;
+}
+
+#home-install .vocs_CodeGroup {
+ display: flex;
+ height: 100%;
+ flex-direction: column;
+}
+
+#home-install .vocs_Tabs_content {
+ flex: 1;
+}
+
+#home-install .vocs_Code {
+ font-size: 18px;
+}
+
+.border-accent {
+ border: 1px solid var(--vocs-color_borderAccent) !important;
+}
diff --git a/book/vocs/generate-redirects.ts b/book/vocs/generate-redirects.ts
new file mode 100644
index 0000000000..99466c294e
--- /dev/null
+++ b/book/vocs/generate-redirects.ts
@@ -0,0 +1,54 @@
+#!/usr/bin/env bun
+import { writeFileSync, mkdirSync } from 'fs'
+import { join, dirname } from 'path'
+import { redirects, basePath } from './redirects.config'
+// Base path for the site
+
+function generateRedirectHtml(targetPath: string): string {
+ return `
+
+
+
+ Redirecting...
+
+
+
+
+
+
Reth mdbook has been migrated to new docs. If you are not redirected please click here.